hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
316c411af1d1bd5a4f2fe573457c399a1322caf8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* file name: TilingMatrix.cu
* NOTE:
* squareMatrixMult is much more efficent than the regular multiplier
* currently compiling with: nvcc TilingMatrix.cu -o tileTest
* Device Standards for: GeForce GTX 1060 6GB
* total global mem size: 6078 MBytes (6373572608 bytes)
* total shared mem per block: 49.152 KBytes (49152 bytes)
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <stdlib.h>
#include <sys/time.h> //measuring performance data
#define BLOCK_SIZE 32
/**********************************************************************
function name: matrixTriUpper
description: sets a matrix to an upper bound triangle matrix
parameters:
&a GPU device pointer to a m X n matrix (A)
Note:
return: none
**********************************************************************/
__global__ void matrixTriUpper(float *a, int m, int n) {
//setting matricies to their upper bound
for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
if(i>j)
a[i*n + j] = 0;
a[i*n + j] = a[i*n + j];
}
}
}
/**********************************************************************
function name: matrixMult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
**********************************************************************/
__global__ void matrixMult(float *a, float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if( col < k && row < m) {
for(int i = 0; i < n; i++)
sum += a[row * n + i] * b[i * k + col];
c[row * k + col] = sum;
}
}
/**********************************************************************
function name: squareMatrixMult
description: dot product of two matrix (not only square) in GPU
parameters:
&a GPU device pointer to a n X n matrix (A)
&b GPU device pointer to a n X n matrix (B)
&c GPU device output purpose pointer to a n X n matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
SQUARE IS MUCH MORE EFFICENT THAN REGULAR
return: none
**********************************************************************/
__global__ void squareMatrixMult(float *d_a, float *d_b, float *d_result, int n)
{
__shared__ float tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
float tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub) {
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= n*n) {
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n*n) {
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = threadIdx.x/n; k < BLOCK_SIZE; ++k) {
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n) {
d_result[row * n + col] = tmp;
}
}
/**********************************************************************
function name: main
description: test and compare
parameters:
none
return: none
**********************************************************************/
int main(int argc, char** argv) {
int printAllMat = 1; // debug flag for printing all of the maticies
// Set sizes of the matrixes
int m=15;
int n=15;
int k=15;
/* Fixed seed for illustration */
srand(3333);
// Allocate memory in host RAM
float *copyA, *copyB, *copyC;
hipHostMalloc((void **) ©A, sizeof(float)*m*n); // copied matrix is m x n
hipHostMalloc((void **) ©B, sizeof(float)*n*k); // copied matrix is n x k
hipHostMalloc((void **) ©C, sizeof(float)*m*k); // copied matrix is m x k
// float x = (float)rand()/(float)(RAND_MAX/a);
// random initialize matrix A
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
copyA[i * n + j] =((float)rand()/(float)(RAND_MAX)) * 1024;
}
}
// random initialize matrix B
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
copyB[i * k + j] = ((float)rand()/(float)(RAND_MAX)) * 1024;
}
}
// Allocate memory space on the device
float *matA, *matB, *matC;
hipMalloc((void **) &matA, sizeof(float)*m*n); // matrix is m x n
hipMalloc((void **) &matB, sizeof(float)*n*k); // matrix is n x k
hipMalloc((void **) &matC, sizeof(float)*m*k); // matrix is m x k
// copy matrix A and B from host to device memory
hipMemcpy(matA, copyA, sizeof(float)*m*n, hipMemcpyHostToDevice);
hipMemcpy(matB, copyB, sizeof(float)*n*k, hipMemcpyHostToDevice);
printf("size of matA %dX%d: %zu bytes\n", m,n,(sizeof(float)*m*n));
printf("size of matB %dX%d: %zu bytes\n", n,k,(sizeof(float)*n*k));
printf("size of matC %dX%d: %zu bytes\n", m,k,(sizeof(float)*m*k));
printf("total bytes allocated to mem: %zu bytes ", ((sizeof(float)*m*n) + (sizeof(float)*n*k)+ (sizeof(float)*m*k)));
printf("(~%zu MBytes)\n\n", (((sizeof(float)*m*n) + (sizeof(float)*n*k)+ (sizeof(float)*m*k)) / 1000000)); // get megabytes of the allocated arrays
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
printf("Calculating...\n\n");
// Launch kernel, check if it is a square
if(m == n && n == k) {
hipLaunchKernelGGL(( matrixTriUpper), dim3(dimGrid), dim3(dimBlock), 0, 0, matA, m, n);
hipLaunchKernelGGL(( matrixTriUpper), dim3(dimGrid), dim3(dimBlock), 0, 0, matB, n, k);
hipLaunchKernelGGL(( squareMatrixMult), dim3(dimGrid), dim3(dimBlock), 0, 0, matA, matB, matC, n); // square, thus only need 1 param to define size
}
else { // not a square, thus it needs param to define all sizes
hipLaunchKernelGGL(( matrixMult), dim3(dimGrid), dim3(dimBlock), 0, 0, matA, matB, matC, m, n, k);
}
// Transefr results from device to host
hipMemcpy(copyC, matC, sizeof(float)*m*k, hipMemcpyDeviceToHost);
hipDeviceSynchronize(); //possibly
//hipDeviceSynchronize();
//prints the matricies
// printf("[%d][%d]:%d, ", i, j, copyC[i*k + j]); //Another possible way to print the matrix
//if the debug flag is on it will print the first two product arrays as well
int i,j;
if(printAllMat == 1) {
// print matrix A
printf("matA matrix: \n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
//printf("[%d][%d]:%d, ", i, j, copyA[i*k + j]);
printf(" %f ", copyA[i*k + j]);
}
printf("\n");
}
// print matrix B
printf("\nmatB matrix: \n");
for (i = 0; i < n; i++) {
for (j = 0; j < k; j++) {
//printf("[%d][%d]:%d, ", i, j, copyB[i*k + j]);
printf(" %f ", copyB[i*k + j]);
}
printf("\n");
}
}
// print result matrix
printf("\nResult matrix: \n");
for (i = 0; i < m; i++) {
for (j = 0; j < k; j++) {
//printf("[%d][%d]:%d, ", i, j, copyC[i*k + j]);
printf(" %f ", copyC[i*k + j]);
}
printf("\n");
}
// free memory
hipFree(matA);
hipFree(matB);
hipFree(matC);
hipHostFree(copyA);
hipHostFree(copyB);
hipHostFree(copyC);
return 0;
}
| 316c411af1d1bd5a4f2fe573457c399a1322caf8.cu | /*
* file name: TilingMatrix.cu
* NOTE:
* squareMatrixMult is much more efficent than the regular multiplier
* currently compiling with: nvcc TilingMatrix.cu -o tileTest
* Device Standards for: GeForce GTX 1060 6GB
* total global mem size: 6078 MBytes (6373572608 bytes)
* total shared mem per block: 49.152 KBytes (49152 bytes)
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <stdlib.h>
#include <sys/time.h> //measuring performance data
#define BLOCK_SIZE 32
/**********************************************************************
function name: matrixTriUpper
description: sets a matrix to an upper bound triangle matrix
parameters:
&a GPU device pointer to a m X n matrix (A)
Note:
return: none
**********************************************************************/
__global__ void matrixTriUpper(float *a, int m, int n) {
//setting matricies to their upper bound
for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
if(i>j)
a[i*n + j] = 0;
a[i*n + j] = a[i*n + j];
}
}
}
/**********************************************************************
function name: matrixMult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
**********************************************************************/
__global__ void matrixMult(float *a, float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if( col < k && row < m) {
for(int i = 0; i < n; i++)
sum += a[row * n + i] * b[i * k + col];
c[row * k + col] = sum;
}
}
/**********************************************************************
function name: squareMatrixMult
description: dot product of two matrix (not only square) in GPU
parameters:
&a GPU device pointer to a n X n matrix (A)
&b GPU device pointer to a n X n matrix (B)
&c GPU device output purpose pointer to a n X n matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
SQUARE IS MUCH MORE EFFICENT THAN REGULAR
return: none
**********************************************************************/
__global__ void squareMatrixMult(float *d_a, float *d_b, float *d_result, int n)
{
__shared__ float tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
float tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub) {
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= n*n) {
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n*n) {
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = threadIdx.x/n; k < BLOCK_SIZE; ++k) {
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n) {
d_result[row * n + col] = tmp;
}
}
/**********************************************************************
function name: main
description: test and compare
parameters:
none
return: none
**********************************************************************/
int main(int argc, char** argv) {
int printAllMat = 1; // debug flag for printing all of the maticies
// Set sizes of the matrixes
int m=15;
int n=15;
int k=15;
/* Fixed seed for illustration */
srand(3333);
// Allocate memory in host RAM
float *copyA, *copyB, *copyC;
cudaMallocHost((void **) ©A, sizeof(float)*m*n); // copied matrix is m x n
cudaMallocHost((void **) ©B, sizeof(float)*n*k); // copied matrix is n x k
cudaMallocHost((void **) ©C, sizeof(float)*m*k); // copied matrix is m x k
// float x = (float)rand()/(float)(RAND_MAX/a);
// random initialize matrix A
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
copyA[i * n + j] =((float)rand()/(float)(RAND_MAX)) * 1024;
}
}
// random initialize matrix B
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
copyB[i * k + j] = ((float)rand()/(float)(RAND_MAX)) * 1024;
}
}
// Allocate memory space on the device
float *matA, *matB, *matC;
cudaMalloc((void **) &matA, sizeof(float)*m*n); // matrix is m x n
cudaMalloc((void **) &matB, sizeof(float)*n*k); // matrix is n x k
cudaMalloc((void **) &matC, sizeof(float)*m*k); // matrix is m x k
// copy matrix A and B from host to device memory
cudaMemcpy(matA, copyA, sizeof(float)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(matB, copyB, sizeof(float)*n*k, cudaMemcpyHostToDevice);
printf("size of matA %dX%d: %zu bytes\n", m,n,(sizeof(float)*m*n));
printf("size of matB %dX%d: %zu bytes\n", n,k,(sizeof(float)*n*k));
printf("size of matC %dX%d: %zu bytes\n", m,k,(sizeof(float)*m*k));
printf("total bytes allocated to mem: %zu bytes ", ((sizeof(float)*m*n) + (sizeof(float)*n*k)+ (sizeof(float)*m*k)));
printf("(~%zu MBytes)\n\n", (((sizeof(float)*m*n) + (sizeof(float)*n*k)+ (sizeof(float)*m*k)) / 1000000)); // get megabytes of the allocated arrays
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
printf("Calculating...\n\n");
// Launch kernel, check if it is a square
if(m == n && n == k) {
matrixTriUpper<<<dimGrid, dimBlock>>>(matA, m, n);
matrixTriUpper<<<dimGrid, dimBlock>>>(matB, n, k);
squareMatrixMult<<<dimGrid, dimBlock>>>(matA, matB, matC, n); // square, thus only need 1 param to define size
}
else { // not a square, thus it needs param to define all sizes
matrixMult<<<dimGrid, dimBlock>>>(matA, matB, matC, m, n, k);
}
// Transefr results from device to host
cudaMemcpy(copyC, matC, sizeof(float)*m*k, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize(); //possibly
//cudaThreadSynchronize();
//prints the matricies
// printf("[%d][%d]:%d, ", i, j, copyC[i*k + j]); //Another possible way to print the matrix
//if the debug flag is on it will print the first two product arrays as well
int i,j;
if(printAllMat == 1) {
// print matrix A
printf("matA matrix: \n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
//printf("[%d][%d]:%d, ", i, j, copyA[i*k + j]);
printf(" %f ", copyA[i*k + j]);
}
printf("\n");
}
// print matrix B
printf("\nmatB matrix: \n");
for (i = 0; i < n; i++) {
for (j = 0; j < k; j++) {
//printf("[%d][%d]:%d, ", i, j, copyB[i*k + j]);
printf(" %f ", copyB[i*k + j]);
}
printf("\n");
}
}
// print result matrix
printf("\nResult matrix: \n");
for (i = 0; i < m; i++) {
for (j = 0; j < k; j++) {
//printf("[%d][%d]:%d, ", i, j, copyC[i*k + j]);
printf(" %f ", copyC[i*k + j]);
}
printf("\n");
}
// free memory
cudaFree(matA);
cudaFree(matB);
cudaFree(matC);
cudaFreeHost(copyA);
cudaFreeHost(copyB);
cudaFreeHost(copyC);
return 0;
}
|
77f0da584ad6b3a0b3456fce8c038813fc400153.hip | // !!! This is a file automatically generated by hipify!!!
// http://csweb.cs.wfu.edu/bigiron/LittleFE-CUDA-TrapezoidalRule/build/html/cudaAlg.html
// This program implements trapezoidal integration for a function
// f(x) over the interval [c,d] using N subdivisions. This program
// runs on a host and device (NVIDIA graphics chip with cuda
// certification). The function f(x) is implemented as a callable
// function on the device. The kernel computes the sums f(xi)+f(xi+deltaX).
// The host function computes of the individual sums computed on the
// device and multiplies by deltaX/2.
#include <iostream>
#include <ctime>
using namespace std;
#include <hip/hip_runtime.h>
#include <math_constants.h>
#include <hip/hip_runtime.h>
// function to integrate, defined as a function on the
// GPU device
__device__ float myfunction(float a)
{
return a*a+2.0f*a + 3.0f;
}
// kernel function to compute the summation used in the trapezoidal
// rule for numerical integration
// __global__ __device__ void integratorKernel(float *a, float c, float deltaX, int N)
__global__ void integratorKernel(float *a, float c, float deltaX, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float x = c + (float)idx * deltaX;
if (idx<N)
{
a[idx] = myfunction(x)+myfunction(x+deltaX);
}
}
// cudaIntegrate() is the host function that sets up the
// computation of the integral of f(x) over the interval
// [c,d].
__host__ float cudaIntegrate(float c, float d, int N)
{
// deltaX
float deltaX = (d-c)/N;
// error code variable
hipError_t errorcode = hipSuccess;
// size of the arrays in bytes
int size = N*sizeof(float);
// allocate array on host and device
float* a_h = (float *)malloc(size);
float* a_d;
if (( errorcode = hipMalloc((void **)&a_d,size))!= hipSuccess)
{
cout << "hipMalloc(): " << hipGetErrorString(errorcode) << endl;
exit(1);
}
// do calculation on device
int block_size = 256;
int n_blocks = N/block_size + ( N % block_size == 0 ? 0:1);
// cout << "blocks: " << n_blocks << endl;
// cout << "block size: " << block_size << endl;
hipLaunchKernelGGL(( integratorKernel) , dim3(n_blocks), dim3(block_size) , 0, 0, a_d, c, deltaX, N);
// copy results from device to host
if((errorcode = hipMemcpy(a_h, a_d, sizeof(float)*N, hipMemcpyDeviceToHost))!=hipSuccess)
{
cout << "hipMemcpy(): " << hipGetErrorString(errorcode) << endl;
exit(1);
}
// add up results
float sum = 0.0;
for(int i=0; i<N; i++) sum += a_h[i];
sum *= deltaX/2.0;
// clean up
free(a_h);
hipFree(a_d);
return sum;
}
// utility host function to convert the length of time into
// micro seconds
__host__ double diffclock(clock_t clock1, clock_t clock2)
{
double diffticks = clock1-clock2;
double diffms = diffticks/(CLOCKS_PER_SEC/1000);
return diffms;
}
// host main program
int main()
{
clock_t start = clock();
float answer = cudaIntegrate(0.0,1.0,1000);
clock_t end = clock();
cout << "The answer is " << answer << endl;
cout << "Computation time: " << diffclock(end,start);
cout << " micro seconds" << endl;
return 0;
} | 77f0da584ad6b3a0b3456fce8c038813fc400153.cu | // http://csweb.cs.wfu.edu/bigiron/LittleFE-CUDA-TrapezoidalRule/build/html/cudaAlg.html
// This program implements trapezoidal integration for a function
// f(x) over the interval [c,d] using N subdivisions. This program
// runs on a host and device (NVIDIA graphics chip with cuda
// certification). The function f(x) is implemented as a callable
// function on the device. The kernel computes the sums f(xi)+f(xi+deltaX).
// The host function computes of the individual sums computed on the
// device and multiplies by deltaX/2.
#include <iostream>
#include <ctime>
using namespace std;
#include <cuda.h>
#include <math_constants.h>
#include <cuda_runtime.h>
// function to integrate, defined as a function on the
// GPU device
__device__ float myfunction(float a)
{
return a*a+2.0f*a + 3.0f;
}
// kernel function to compute the summation used in the trapezoidal
// rule for numerical integration
// __global__ __device__ void integratorKernel(float *a, float c, float deltaX, int N)
__global__ void integratorKernel(float *a, float c, float deltaX, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float x = c + (float)idx * deltaX;
if (idx<N)
{
a[idx] = myfunction(x)+myfunction(x+deltaX);
}
}
// cudaIntegrate() is the host function that sets up the
// computation of the integral of f(x) over the interval
// [c,d].
__host__ float cudaIntegrate(float c, float d, int N)
{
// deltaX
float deltaX = (d-c)/N;
// error code variable
cudaError_t errorcode = cudaSuccess;
// size of the arrays in bytes
int size = N*sizeof(float);
// allocate array on host and device
float* a_h = (float *)malloc(size);
float* a_d;
if (( errorcode = cudaMalloc((void **)&a_d,size))!= cudaSuccess)
{
cout << "cudaMalloc(): " << cudaGetErrorString(errorcode) << endl;
exit(1);
}
// do calculation on device
int block_size = 256;
int n_blocks = N/block_size + ( N % block_size == 0 ? 0:1);
// cout << "blocks: " << n_blocks << endl;
// cout << "block size: " << block_size << endl;
integratorKernel <<< n_blocks, block_size >>> (a_d, c, deltaX, N);
// copy results from device to host
if((errorcode = cudaMemcpy(a_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost))!=cudaSuccess)
{
cout << "cudaMemcpy(): " << cudaGetErrorString(errorcode) << endl;
exit(1);
}
// add up results
float sum = 0.0;
for(int i=0; i<N; i++) sum += a_h[i];
sum *= deltaX/2.0;
// clean up
free(a_h);
cudaFree(a_d);
return sum;
}
// utility host function to convert the length of time into
// micro seconds
__host__ double diffclock(clock_t clock1, clock_t clock2)
{
double diffticks = clock1-clock2;
double diffms = diffticks/(CLOCKS_PER_SEC/1000);
return diffms;
}
// host main program
int main()
{
clock_t start = clock();
float answer = cudaIntegrate(0.0,1.0,1000);
clock_t end = clock();
cout << "The answer is " << answer << endl;
cout << "Computation time: " << diffclock(end,start);
cout << " micro seconds" << endl;
return 0;
} |
23130f61080bd2a8d8fa3e84c7d92911b8dda823.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@author Azzam Haidar
@author Ahmad Abdelfattah
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "shuffle.cuh"
#include "sync.cuh"
#include "atomics.cuh"
#include "batched_kernel_param.h"
#define PRECISION_z
/**
Purpose
-------
LU factorization of m-by-n matrix ( m >= n ).
Each thread block caches an entire column in register.
Thread blocks communicate and synchronize through global memory.
Assumptions:
1. dA is of size MxN such that N <= M.
2. Thread block must be 1D, with TX multiple of 32 (warp size)
3. TX must be >= n
4. n must be less than the number of SMs on the GPU
**/
// =============================================================================
// init kernel
__global__ void
zgetf2_native_init_kernel( int n, int npages, magma_int_t *ipiv, int* update_flags)
{
const int tx = threadIdx.x;
if( tx < n){
ipiv[ tx ] = 0;
}
if( tx < max(n,npages) ){
update_flags[ tx ] = 0;
}
}
// =============================================================================
// the main kernel
template<int TX, int NPAGES>
__global__ void
zgetf2_native_kernel( int m, int n,
magmaDoubleComplex_ptr dA, int ldda,
volatile magma_int_t *ipiv, int gbstep,
volatile int* update_flag,
volatile magma_int_t *info)
{
#ifdef HAVE_CUBLAS
const int tx = threadIdx.x;
const int bx = blockIdx.x;
magmaDoubleComplex rA[NPAGES] = {MAGMA_Z_ZERO};
magmaDoubleComplex rx, rx_max;
magmaDoubleComplex_ptr da = dA;
int rx_id, max_id, flag = 0, linfo;
double rx_abs = 0.0, rx_abs_max = 0.0;
const int m_ = m-(NPAGES-1)*TX;
if( bx >= n ) return;
// read the info (if it is set to non-zero a previous panel, then we don't set it again)
linfo = (int)(*info);
__shared__ magmaDoubleComplex sx[ TX ];
__shared__ double sabs[ TX ];
__shared__ int smax_id[ TX ];
__shared__ magmaDoubleComplex sreg;
// read
dA += bx * ldda + tx;
#pragma unroll
for(int i = 0; i < NPAGES-1; i++){
rA[i] = dA[ i * TX ];
}
if( tx < m_){
rA[NPAGES-1] = dA[ (NPAGES-1) * TX ];
}
// main loop
#pragma unroll
for(int i = 0; i < n; i++){
// izamax and write pivot for the ith thread block
if(bx == i){
rx_max = rx = (tx < i) ? MAGMA_Z_ZERO : rA[0];
rx_abs_max = rx_abs = fabs(MAGMA_Z_REAL(rx)) + fabs(MAGMA_Z_IMAG(rx));
max_id = rx_id = tx;
#pragma unroll
for(int j = 1; j < NPAGES; j++){
rx = rA[j];
rx_abs = fabs(MAGMA_Z_REAL(rx)) + fabs(MAGMA_Z_IMAG(rx));
if ( rx_abs > rx_abs_max ){
rx_max = rx;
rx_abs_max = rx_abs;
max_id = j * TX + tx;
}
}
sx[ tx ] = rx_max;
sabs[ tx ] = rx_abs_max;
smax_id[ tx ] = max_id;
__syncthreads();
// let the first warp do the final reduction step
if(tx < 32){
#pragma unroll
for(int j = 0; j < TX; j+= 32){
rx = sx[ j + tx ];
rx_abs = sabs[ j + tx ];
rx_id = smax_id[ j + tx ];
if ( rx_abs > rx_abs_max ){
rx_max = rx;
rx_abs_max = rx_abs;
max_id = rx_id;
}
}
magmablas_syncwarp();
sx[ tx ] = rx_max;
sabs[ tx ] = rx_abs_max;
smax_id[ tx ] = max_id;
magmablas_syncwarp();
#pragma unroll
for(int j = 0; j < 32; j++){
rx = sx[j];
rx_abs = sabs[j];
rx_id = smax_id[j];
if ( rx_abs > rx_abs_max ){
rx_abs_max = rx_abs;
rx_max = rx;
max_id = rx_id;
}
}
}
if(tx == 0){
sx[ 0 ] = rx_max;
sabs[ 0 ] = rx_abs_max;
smax_id[ 0 ] = (rx_abs_max == MAGMA_D_ZERO) ? i : max_id;
}
__syncthreads();
rx_max = sx[ 0 ];
rx_abs_max = sabs[ 0 ];
max_id = smax_id[ 0 ];
__syncthreads();
// now every thread in the i^th block has the maximum
linfo = (rx_abs_max == MAGMA_D_ZERO && linfo == 0) ? (max_id+gbstep+1) : linfo;
if( tx == 0){
//printf("[%2d]: bx = %d, max_id, = %d, rx_abs_max = %f, linfo = %d\n", i, bx, max_id, rx_abs_max, linfo);
magmablas_iatomic_exchange((magma_int_t*)info, (magma_int_t)(linfo) );
magmablas_iatomic_exchange((magma_int_t*)&ipiv[i], (magma_int_t)(max_id+1) ); // fortran indexing
}
__syncthreads();
//if( rx_abs_max == MAGMA_D_ZERO )return;
}
else{ // other thread blocks are waiting
if(tx == 0){
max_id = 0;
while( max_id == 0 ){
max_id = ipiv[i];
};
smax_id[ 0 ] = max_id;
}
__syncthreads();
max_id = smax_id[ 0 ];
max_id -= 1; // revert fortran indexing
linfo = (*info);
__syncthreads();
//if( (*info) != 0 ) return;
}
// swap
// swap always happens between page 0 and page x
// to avoid spilling rA to local memory, we use shared memory
if( max_id != i){
// all blocks swap in registers
// for bx < i, the column is already written in memory,
// but we have a copy in reg., so continue to swap in reg.,
// and do one final write to memory
#pragma unroll
for(int j = 0; j < NPAGES; j++){
if( j == (max_id/TX) ){
sx[ tx ] = rA[j];
__syncthreads();
if( tx == i ){
magmaDoubleComplex tmp = sx[ max_id%TX ];
sx[ max_id%TX ] = rA[0];
rA[0] = tmp;
}
__syncthreads();
if( tx == max_id%TX ){
rA[j] = sx[ tx ];
}
__syncthreads();
}
}
//__syncthreads();
}
// the ith block does scal
if(bx == i){
magmaDoubleComplex reg = (rx_max == MAGMA_Z_ZERO) ? MAGMA_Z_ONE : MAGMA_Z_DIV(MAGMA_Z_ONE, rx_max );
// scal
if( tx > i ){
rA[0] *= reg;
}
#pragma unroll
for(int j = 1; j < NPAGES; j++){
rA[j] *= reg;
}
// write column i to global memory
#pragma unroll
for(int j = 0; j < NPAGES-1; j++){
dA[ j * TX ] = rA[j];
}
if( tx < m_){
dA[ (NPAGES-1) * TX ] = rA[NPAGES-1];
}
__threadfence(); __syncthreads(); // after cuda 9.0, both are needed, not sure why
if(tx == 0) magmablas_iatomic_exchange( (int *)&update_flag[ i ], 1);
}
// thread blocks with ID larger than i perform ger
if(bx > i){
if( tx == i ){
sreg = rA[0];
}
// wait for scal
if( tx == 0){
flag = 0;
while( flag == 0 ){
flag = update_flag[ i ];
};
}
__syncthreads();
magmaDoubleComplex reg = sreg;
if( NPAGES == 1){
if(tx > i && tx < m_){
rA[0] -= da[ i * ldda + tx ] * reg;
}
}else{
if(tx > i){
rA[0] -= da[ i * ldda + tx ] * reg;
}
}
#pragma unroll
for(int j = 1; j < NPAGES-1; j++){
rA[j] -= da[ i * ldda + j * TX + tx ] * reg;
}
if( NPAGES > 1){
if( tx < m_ ){
rA[ NPAGES-1 ] -= da[ i * ldda + (NPAGES-1)*TX + tx ] * reg;
}
}
}
}
// all blocks write their columns again except the last one
if( bx < n-1 ){
#pragma unroll
for(int i = 0; i < NPAGES-1; i++){
dA[ i * TX ] = rA[i];
}
if( tx < m_){
dA[ (NPAGES-1) * TX ] = rA[NPAGES-1];
}
}
#endif // HAVE_CUBLAS
}
// =============================================================================
extern "C" magma_int_t
magma_zgetf2_native_fused(
magma_int_t m, magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_int_t *ipiv, magma_int_t gbstep,
magma_int_t *flags,
magma_int_t *info, magma_queue_t queue )
{
magma_int_t arginfo = 0;
const magma_int_t ntx = ZGETF2_FUSED_NTH;
if( m < n || m > ZGETF2_FUSED_MAX_M ){
arginfo = -1;
}
else if( n > magma_getdevice_multiprocessor_count() ){
arginfo = -2;
}
else if( ldda < max(1, m) ){
arginfo = -4;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
magma_int_t arch = magma_getdevice_arch();
dim3 grid(n, 1, 1);
dim3 threads(ntx, 1, 1);
const magma_int_t npages = magma_ceildiv(m, ntx);
// the kernel uses communication among thread blocks
// as a safeguard, force one thread block per multiprocessor
// by allocating more than half the shared memory
magma_int_t shmem = magma_getdevice_shmem_block();
shmem = (shmem / 2);
int *update_flag = (int*) flags; // update_flag is an int, not magma_int_t
size_t max_n_npages = max(n,npages);
hipLaunchKernelGGL(( zgetf2_native_init_kernel), dim3(1), dim3(max_n_npages), 0, queue->cuda_stream() , n, npages, ipiv, update_flag);
// The case statement should cover up to ( xGETF2_CHAIN_MAX_M / ntx )
switch(npages){
case 1:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 1>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 2:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 2>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 3:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 3>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 4:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 4>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 5:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 5>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 6:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 6>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 7:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 7>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 8:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 8>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 9:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 9>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 10:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 10>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 11:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 11>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 12:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 12>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 13:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 13>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 14:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 14>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 15:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 15>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 16:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 16>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 17:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 17>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 18:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 18>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 19:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 19>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 20:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 20>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#if defined(PRECISION_s) || defined(PRECISION_d)
case 21:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 21>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 22:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 22>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 23:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 23>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 24:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 24>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 25:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 25>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 26:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 26>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 27:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 27>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 28:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 28>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 29:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 29>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 30:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 30>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 31:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 31>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 32:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 32>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 33:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 33>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 34:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 34>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 35:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 35>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 36:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 36>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 37:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 37>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 38:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 38>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 39:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 39>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 40:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 40>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 41:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 41>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 42:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 42>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 43:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 43>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 44:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 44>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 45:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 45>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 46:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 46>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#endif // defined(PRECISION_s) || defined(PRECISION_d)
#if defined(PRECISION_s)
case 47:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 47>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 48:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 48>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 49:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 49>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 50:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 50>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 51:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 51>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 52:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 52>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 53:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 53>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 54:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 54>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 55:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 55>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 56:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 56>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 57:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 57>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 58:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 58>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 59:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 59>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 60:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 60>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 61:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 61>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 62:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 62>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 63:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 63>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 64:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 64>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 65:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 65>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 66:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 66>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 67:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 67>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 68:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 68>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 69:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 69>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 70:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 70>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 71:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 71>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 72:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 72>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 73:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 73>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 74:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 74>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 75:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 75>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 76:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 76>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 77:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 77>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 78:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 78>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 79:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 79>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 80:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 80>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#endif // defined(PRECISION_s)
default: printf("size not supported \n");
}
return 0;
}
| 23130f61080bd2a8d8fa3e84c7d92911b8dda823.cu | /*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@author Azzam Haidar
@author Ahmad Abdelfattah
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "shuffle.cuh"
#include "sync.cuh"
#include "atomics.cuh"
#include "batched_kernel_param.h"
#define PRECISION_z
/**
Purpose
-------
LU factorization of m-by-n matrix ( m >= n ).
Each thread block caches an entire column in register.
Thread blocks communicate and synchronize through global memory.
Assumptions:
1. dA is of size MxN such that N <= M.
2. Thread block must be 1D, with TX multiple of 32 (warp size)
3. TX must be >= n
4. n must be less than the number of SMs on the GPU
**/
// =============================================================================
// init kernel
__global__ void
zgetf2_native_init_kernel( int n, int npages, magma_int_t *ipiv, int* update_flags)
{
const int tx = threadIdx.x;
if( tx < n){
ipiv[ tx ] = 0;
}
if( tx < max(n,npages) ){
update_flags[ tx ] = 0;
}
}
// =============================================================================
// the main kernel
template<int TX, int NPAGES>
__global__ void
zgetf2_native_kernel( int m, int n,
magmaDoubleComplex_ptr dA, int ldda,
volatile magma_int_t *ipiv, int gbstep,
volatile int* update_flag,
volatile magma_int_t *info)
{
#ifdef HAVE_CUBLAS
const int tx = threadIdx.x;
const int bx = blockIdx.x;
magmaDoubleComplex rA[NPAGES] = {MAGMA_Z_ZERO};
magmaDoubleComplex rx, rx_max;
magmaDoubleComplex_ptr da = dA;
int rx_id, max_id, flag = 0, linfo;
double rx_abs = 0.0, rx_abs_max = 0.0;
const int m_ = m-(NPAGES-1)*TX;
if( bx >= n ) return;
// read the info (if it is set to non-zero a previous panel, then we don't set it again)
linfo = (int)(*info);
__shared__ magmaDoubleComplex sx[ TX ];
__shared__ double sabs[ TX ];
__shared__ int smax_id[ TX ];
__shared__ magmaDoubleComplex sreg;
// read
dA += bx * ldda + tx;
#pragma unroll
for(int i = 0; i < NPAGES-1; i++){
rA[i] = dA[ i * TX ];
}
if( tx < m_){
rA[NPAGES-1] = dA[ (NPAGES-1) * TX ];
}
// main loop
#pragma unroll
for(int i = 0; i < n; i++){
// izamax and write pivot for the ith thread block
if(bx == i){
rx_max = rx = (tx < i) ? MAGMA_Z_ZERO : rA[0];
rx_abs_max = rx_abs = fabs(MAGMA_Z_REAL(rx)) + fabs(MAGMA_Z_IMAG(rx));
max_id = rx_id = tx;
#pragma unroll
for(int j = 1; j < NPAGES; j++){
rx = rA[j];
rx_abs = fabs(MAGMA_Z_REAL(rx)) + fabs(MAGMA_Z_IMAG(rx));
if ( rx_abs > rx_abs_max ){
rx_max = rx;
rx_abs_max = rx_abs;
max_id = j * TX + tx;
}
}
sx[ tx ] = rx_max;
sabs[ tx ] = rx_abs_max;
smax_id[ tx ] = max_id;
__syncthreads();
// let the first warp do the final reduction step
if(tx < 32){
#pragma unroll
for(int j = 0; j < TX; j+= 32){
rx = sx[ j + tx ];
rx_abs = sabs[ j + tx ];
rx_id = smax_id[ j + tx ];
if ( rx_abs > rx_abs_max ){
rx_max = rx;
rx_abs_max = rx_abs;
max_id = rx_id;
}
}
magmablas_syncwarp();
sx[ tx ] = rx_max;
sabs[ tx ] = rx_abs_max;
smax_id[ tx ] = max_id;
magmablas_syncwarp();
#pragma unroll
for(int j = 0; j < 32; j++){
rx = sx[j];
rx_abs = sabs[j];
rx_id = smax_id[j];
if ( rx_abs > rx_abs_max ){
rx_abs_max = rx_abs;
rx_max = rx;
max_id = rx_id;
}
}
}
if(tx == 0){
sx[ 0 ] = rx_max;
sabs[ 0 ] = rx_abs_max;
smax_id[ 0 ] = (rx_abs_max == MAGMA_D_ZERO) ? i : max_id;
}
__syncthreads();
rx_max = sx[ 0 ];
rx_abs_max = sabs[ 0 ];
max_id = smax_id[ 0 ];
__syncthreads();
// now every thread in the i^th block has the maximum
linfo = (rx_abs_max == MAGMA_D_ZERO && linfo == 0) ? (max_id+gbstep+1) : linfo;
if( tx == 0){
//printf("[%2d]: bx = %d, max_id, = %d, rx_abs_max = %f, linfo = %d\n", i, bx, max_id, rx_abs_max, linfo);
magmablas_iatomic_exchange((magma_int_t*)info, (magma_int_t)(linfo) );
magmablas_iatomic_exchange((magma_int_t*)&ipiv[i], (magma_int_t)(max_id+1) ); // fortran indexing
}
__syncthreads();
//if( rx_abs_max == MAGMA_D_ZERO )return;
}
else{ // other thread blocks are waiting
if(tx == 0){
max_id = 0;
while( max_id == 0 ){
max_id = ipiv[i];
};
smax_id[ 0 ] = max_id;
}
__syncthreads();
max_id = smax_id[ 0 ];
max_id -= 1; // revert fortran indexing
linfo = (*info);
__syncthreads();
//if( (*info) != 0 ) return;
}
// swap
// swap always happens between page 0 and page x
// to avoid spilling rA to local memory, we use shared memory
if( max_id != i){
// all blocks swap in registers
// for bx < i, the column is already written in memory,
// but we have a copy in reg., so continue to swap in reg.,
// and do one final write to memory
#pragma unroll
for(int j = 0; j < NPAGES; j++){
if( j == (max_id/TX) ){
sx[ tx ] = rA[j];
__syncthreads();
if( tx == i ){
magmaDoubleComplex tmp = sx[ max_id%TX ];
sx[ max_id%TX ] = rA[0];
rA[0] = tmp;
}
__syncthreads();
if( tx == max_id%TX ){
rA[j] = sx[ tx ];
}
__syncthreads();
}
}
//__syncthreads();
}
// the ith block does scal
if(bx == i){
magmaDoubleComplex reg = (rx_max == MAGMA_Z_ZERO) ? MAGMA_Z_ONE : MAGMA_Z_DIV(MAGMA_Z_ONE, rx_max );
// scal
if( tx > i ){
rA[0] *= reg;
}
#pragma unroll
for(int j = 1; j < NPAGES; j++){
rA[j] *= reg;
}
// write column i to global memory
#pragma unroll
for(int j = 0; j < NPAGES-1; j++){
dA[ j * TX ] = rA[j];
}
if( tx < m_){
dA[ (NPAGES-1) * TX ] = rA[NPAGES-1];
}
__threadfence(); __syncthreads(); // after cuda 9.0, both are needed, not sure why
if(tx == 0) magmablas_iatomic_exchange( (int *)&update_flag[ i ], 1);
}
// thread blocks with ID larger than i perform ger
if(bx > i){
if( tx == i ){
sreg = rA[0];
}
// wait for scal
if( tx == 0){
flag = 0;
while( flag == 0 ){
flag = update_flag[ i ];
};
}
__syncthreads();
magmaDoubleComplex reg = sreg;
if( NPAGES == 1){
if(tx > i && tx < m_){
rA[0] -= da[ i * ldda + tx ] * reg;
}
}else{
if(tx > i){
rA[0] -= da[ i * ldda + tx ] * reg;
}
}
#pragma unroll
for(int j = 1; j < NPAGES-1; j++){
rA[j] -= da[ i * ldda + j * TX + tx ] * reg;
}
if( NPAGES > 1){
if( tx < m_ ){
rA[ NPAGES-1 ] -= da[ i * ldda + (NPAGES-1)*TX + tx ] * reg;
}
}
}
}
// all blocks write their columns again except the last one
if( bx < n-1 ){
#pragma unroll
for(int i = 0; i < NPAGES-1; i++){
dA[ i * TX ] = rA[i];
}
if( tx < m_){
dA[ (NPAGES-1) * TX ] = rA[NPAGES-1];
}
}
#endif // HAVE_CUBLAS
}
// =============================================================================
extern "C" magma_int_t
magma_zgetf2_native_fused(
magma_int_t m, magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_int_t *ipiv, magma_int_t gbstep,
magma_int_t *flags,
magma_int_t *info, magma_queue_t queue )
{
magma_int_t arginfo = 0;
const magma_int_t ntx = ZGETF2_FUSED_NTH;
if( m < n || m > ZGETF2_FUSED_MAX_M ){
arginfo = -1;
}
else if( n > magma_getdevice_multiprocessor_count() ){
arginfo = -2;
}
else if( ldda < max(1, m) ){
arginfo = -4;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
magma_int_t arch = magma_getdevice_arch();
dim3 grid(n, 1, 1);
dim3 threads(ntx, 1, 1);
const magma_int_t npages = magma_ceildiv(m, ntx);
// the kernel uses communication among thread blocks
// as a safeguard, force one thread block per multiprocessor
// by allocating more than half the shared memory
magma_int_t shmem = magma_getdevice_shmem_block();
shmem = (shmem / 2);
int *update_flag = (int*) flags; // update_flag is an int, not magma_int_t
size_t max_n_npages = max(n,npages);
zgetf2_native_init_kernel<<< 1, max_n_npages, 0, queue->cuda_stream() >>>( n, npages, ipiv, update_flag);
// The case statement should cover up to ( xGETF2_CHAIN_MAX_M / ntx )
switch(npages){
case 1: zgetf2_native_kernel< ntx, 1><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 2: zgetf2_native_kernel< ntx, 2><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 3: zgetf2_native_kernel< ntx, 3><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 4: zgetf2_native_kernel< ntx, 4><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 5: zgetf2_native_kernel< ntx, 5><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 6: zgetf2_native_kernel< ntx, 6><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 7: zgetf2_native_kernel< ntx, 7><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 8: zgetf2_native_kernel< ntx, 8><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 9: zgetf2_native_kernel< ntx, 9><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 10: zgetf2_native_kernel< ntx, 10><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 11: zgetf2_native_kernel< ntx, 11><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 12: zgetf2_native_kernel< ntx, 12><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 13: zgetf2_native_kernel< ntx, 13><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 14: zgetf2_native_kernel< ntx, 14><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 15: zgetf2_native_kernel< ntx, 15><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 16: zgetf2_native_kernel< ntx, 16><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 17: zgetf2_native_kernel< ntx, 17><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 18: zgetf2_native_kernel< ntx, 18><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 19: zgetf2_native_kernel< ntx, 19><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 20: zgetf2_native_kernel< ntx, 20><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#if defined(PRECISION_s) || defined(PRECISION_d)
case 21: zgetf2_native_kernel< ntx, 21><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 22: zgetf2_native_kernel< ntx, 22><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 23: zgetf2_native_kernel< ntx, 23><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 24: zgetf2_native_kernel< ntx, 24><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 25: zgetf2_native_kernel< ntx, 25><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 26: zgetf2_native_kernel< ntx, 26><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 27: zgetf2_native_kernel< ntx, 27><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 28: zgetf2_native_kernel< ntx, 28><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 29: zgetf2_native_kernel< ntx, 29><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 30: zgetf2_native_kernel< ntx, 30><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 31: zgetf2_native_kernel< ntx, 31><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 32: zgetf2_native_kernel< ntx, 32><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 33: zgetf2_native_kernel< ntx, 33><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 34: zgetf2_native_kernel< ntx, 34><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 35: zgetf2_native_kernel< ntx, 35><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 36: zgetf2_native_kernel< ntx, 36><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 37: zgetf2_native_kernel< ntx, 37><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 38: zgetf2_native_kernel< ntx, 38><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 39: zgetf2_native_kernel< ntx, 39><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 40: zgetf2_native_kernel< ntx, 40><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 41: zgetf2_native_kernel< ntx, 41><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 42: zgetf2_native_kernel< ntx, 42><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 43: zgetf2_native_kernel< ntx, 43><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 44: zgetf2_native_kernel< ntx, 44><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 45: zgetf2_native_kernel< ntx, 45><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 46: zgetf2_native_kernel< ntx, 46><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#endif // defined(PRECISION_s) || defined(PRECISION_d)
#if defined(PRECISION_s)
case 47: zgetf2_native_kernel< ntx, 47><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 48: zgetf2_native_kernel< ntx, 48><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 49: zgetf2_native_kernel< ntx, 49><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 50: zgetf2_native_kernel< ntx, 50><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 51: zgetf2_native_kernel< ntx, 51><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 52: zgetf2_native_kernel< ntx, 52><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 53: zgetf2_native_kernel< ntx, 53><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 54: zgetf2_native_kernel< ntx, 54><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 55: zgetf2_native_kernel< ntx, 55><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 56: zgetf2_native_kernel< ntx, 56><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 57: zgetf2_native_kernel< ntx, 57><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 58: zgetf2_native_kernel< ntx, 58><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 59: zgetf2_native_kernel< ntx, 59><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 60: zgetf2_native_kernel< ntx, 60><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 61: zgetf2_native_kernel< ntx, 61><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 62: zgetf2_native_kernel< ntx, 62><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 63: zgetf2_native_kernel< ntx, 63><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 64: zgetf2_native_kernel< ntx, 64><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 65: zgetf2_native_kernel< ntx, 65><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 66: zgetf2_native_kernel< ntx, 66><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 67: zgetf2_native_kernel< ntx, 67><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 68: zgetf2_native_kernel< ntx, 68><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 69: zgetf2_native_kernel< ntx, 69><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 70: zgetf2_native_kernel< ntx, 70><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 71: zgetf2_native_kernel< ntx, 71><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 72: zgetf2_native_kernel< ntx, 72><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 73: zgetf2_native_kernel< ntx, 73><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 74: zgetf2_native_kernel< ntx, 74><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 75: zgetf2_native_kernel< ntx, 75><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 76: zgetf2_native_kernel< ntx, 76><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 77: zgetf2_native_kernel< ntx, 77><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 78: zgetf2_native_kernel< ntx, 78><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 79: zgetf2_native_kernel< ntx, 79><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 80: zgetf2_native_kernel< ntx, 80><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#endif // defined(PRECISION_s)
default: printf("size not supported \n");
}
return 0;
}
|
3fa4ede10dd78785fbcac966b7dd2d2b17934389.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#define N 512
void host_add(int *a, int *b, int *c) {
for(int idx=0;idx<N;idx++)
c[idx] = a[idx] + b[idx];
}
__global__ void device_add(int *a, int *b, int *c) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
//basically just fills the array with index.
void fill_array(int *data) {
for(int idx=0;idx<N;idx++)
data[idx] = idx;
}
void print_output(int *a, int *b, int*c) {
for(int idx=0;idx<N;idx++)
printf("\n %d + %d = %d", a[idx] , b[idx], c[idx]);
}
int main(void) {
int *a, *b, *c;
int *d_a, *d_b, *d_c; // device copies of a, b, c
int threads_per_block=0, no_of_blocks=0;
int size = N * sizeof(int);
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size); fill_array(a);
b = (int *)malloc(size); fill_array(b);
c = (int *)malloc(size);
// Alloc space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
threads_per_block = 4;
no_of_blocks = N/threads_per_block;
hipLaunchKernelGGL(( device_add), dim3(no_of_blocks),dim3(threads_per_block), 0, 0, d_a,d_b,d_c);
// Copy result back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
print_output(a,b,c);
free(a); free(b); free(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
} | 3fa4ede10dd78785fbcac966b7dd2d2b17934389.cu | #include<stdio.h>
#include<stdlib.h>
#define N 512
void host_add(int *a, int *b, int *c) {
for(int idx=0;idx<N;idx++)
c[idx] = a[idx] + b[idx];
}
__global__ void device_add(int *a, int *b, int *c) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
//basically just fills the array with index.
void fill_array(int *data) {
for(int idx=0;idx<N;idx++)
data[idx] = idx;
}
void print_output(int *a, int *b, int*c) {
for(int idx=0;idx<N;idx++)
printf("\n %d + %d = %d", a[idx] , b[idx], c[idx]);
}
int main(void) {
int *a, *b, *c;
int *d_a, *d_b, *d_c; // device copies of a, b, c
int threads_per_block=0, no_of_blocks=0;
int size = N * sizeof(int);
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size); fill_array(a);
b = (int *)malloc(size); fill_array(b);
c = (int *)malloc(size);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
threads_per_block = 4;
no_of_blocks = N/threads_per_block;
device_add<<<no_of_blocks,threads_per_block>>>(d_a,d_b,d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
print_output(a,b,c);
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} |
671d6e6abbf401437321ccb7dde20a3d94638539.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//==================================================
// bvh.cu
//
// Copyright (c) 2016 Benjamin Hepp.
// Author: Benjamin Hepp
// Created on: Jan 16, 2017
//==================================================
#include <bh/cuda_utils.h>
#include <vector>
#include <cstdio>
#include <cstdlib>
#include <deque>
#include <iostream>
#include <bh/utilities.h>
#include "bvh.cuh"
#include <bh/cuda_utils.h>
#include <stdlib.h>
#include <assert.h>
using std::printf;
namespace bvh {
template <typename FloatT>
CudaTree<FloatT>* CudaTree<FloatT>::createCopyFromHostTree(
NodeType* root,
const std::size_t num_of_nodes,
const std::size_t tree_depth) {
CudaTree* cuda_tree = new CudaTree(tree_depth);
const std::size_t memory_size = sizeof(NodeType) * num_of_nodes;
std::cout << "Allocating " << (memory_size / 1024. / 1024.) << " MB of GPU memory" << std::endl;
cuda_tree->d_nodes_ = bh::CudaUtils::template allocate<NodeType>(num_of_nodes);
std::deque<NodeType*> node_queue;
node_queue.push_front(root);
std::size_t node_counter = 0;
std::size_t copied_node_counter = 0;
const std::size_t report_threshold = num_of_nodes / 20;
std::size_t report_counter = 0;
const std::size_t node_cache_size = num_of_nodes / 20;
std::vector<CudaNode<FloatT>> node_cache;
node_cache.reserve(node_cache_size);
while (!node_queue.empty()) {
NodeType* node = node_queue.back();
node_queue.pop_back();
CudaNode<FloatT> cuda_node;
cuda_node.bounding_box_ = node->bounding_box_;
cuda_node.ptr_ = static_cast<void*>(node);
BH_ASSERT(cuda_node.ptr_ != nullptr);
if (node->hasLeftChild()) {
node_queue.push_front(node->getLeftChild());
const std::size_t left_child_index = node_counter + node_queue.size();
cuda_node.left_child_ = &cuda_tree->d_nodes_[left_child_index];
}
else {
cuda_node.left_child_ = nullptr;
}
if (node->hasRightChild()) {
node_queue.push_front(node->getRightChild());
std::size_t right_child_index = node_counter + node_queue.size();
cuda_node.right_child_ = &cuda_tree->d_nodes_[right_child_index];
}
else {
cuda_node.right_child_ = nullptr;
}
// bh::CudaUtils::copyToDevice(cuda_node, &tree->d_nodes_[node_counter]);
node_cache.push_back(cuda_node);
if (node_cache.size() == node_cache_size) {
bh::CudaUtils::copyArrayToDevice(node_cache, &cuda_tree->d_nodes_[copied_node_counter]);
copied_node_counter += node_cache.size();
node_cache.clear();
}
++node_counter;
++report_counter;
if (report_counter >= report_threshold) {
std::cout << "Copied " << node_counter << " nodes [" << (100 * node_counter / (FloatT)num_of_nodes) << " %]" << std::endl;
report_counter = 0;
}
}
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
return cuda_tree;
}
template <typename FloatT>
__device__ bool intersectsIterativeCuda(
const typename CudaTree<FloatT>::CudaRayType& ray,
const FloatT t_min,
const FloatT t_max,
typename CudaTree<FloatT>::CudaIntersectionIterativeStackEntry* stack,
std::size_t stack_size,
const std::size_t max_stack_size,
typename CudaTree<FloatT>::CudaIntersectionResult* d_result) {
CudaRayData<FloatT> ray_data;
ray_data.origin = ray.origin;
ray_data.direction = ray.direction;
ray_data.inv_direction = ray.direction.cwiseInverse();
while (stack_size > 0 && stack_size <= max_stack_size) {
printf("stack_size=%d\n", stack_size);
--stack_size;
typename CudaTree<FloatT>::CudaIntersectionIterativeStackEntry &entry = stack[stack_size];
FloatT t_ray;
const bool intersects = entry.node->getBoundingBox().intersectsCuda(ray_data, &t_ray, t_min, t_max);
if (!intersects) {
continue;
}
FloatT intersection_dist_sq = t_ray * t_ray;
if (intersection_dist_sq > d_result->dist_sq) {
continue;
}
if (entry.node->isLeaf()) {
const bool inside_bounding_box = entry.node->getBoundingBox().isInside(ray_data.origin);
if (inside_bounding_box) {
// If already inside the bounding box we want the intersection point to be the start of the ray.
t_ray = 0;
intersection_dist_sq = 0;
}
if (intersection_dist_sq <= d_result->dist_sq) {
d_result->intersection = ray_data.origin + ray_data.direction * t_ray;
d_result->node = static_cast<void *>(entry.node->getPtr());
d_result->depth = entry.depth;
d_result->dist_sq = intersection_dist_sq;
}
}
else {
if (entry.node->hasLeftChild()) {
stack[stack_size].node = entry.node->getLeftChild();
stack[stack_size].depth = entry.depth + 1;
++stack_size;
}
if (entry.node->hasRightChild()) {
stack[stack_size].node = entry.node->getRightChild();
stack[stack_size].depth = entry.depth + 1;
++stack_size;
}
}
}
const bool success = stack_size == 0;
return success;
// if (entry.state == CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::NotVisited) {
// entry.intersects = false;
// const bool inside_bounding_box = entry.node->getBoundingBox().isInside(data.ray.origin);
// if (inside_bounding_box) {
//
// }
// const bool outside_bounding_box = entry.node->getBoundingBox().isOutside(data.ray.origin);
// CudaVector3<FloatT> intersection;
// FloatT intersection_dist_sq;
// bool early_break = false;
// if (outside_bounding_box) {
// // Check if ray intersects current node
// const bool intersects = entry.node->getBoundingBox().intersectsCuda(data.ray, &intersection);
// // std::cout << "intersects: " << intersects << std::endl;
// if (intersects) {
// intersection_dist_sq = (data.ray.origin - intersection).squaredNorm();
// if (intersection_dist_sq > d_result->dist_sq) {
// early_break = true;
// }
// }
// else {
// early_break = true;
// }
// }
// if (early_break) {
// --stack_size;
// }
// else {
// if (entry.node->isLeaf()) {
// if (!outside_bounding_box) {
// // If already inside the bounding box we want the intersection point to be the start of the ray.
// intersection = data.ray.origin;
// intersection_dist_sq = 0;
// }
// d_result->intersection = intersection;
// d_result->node = static_cast<void*>(entry.node->getPtr());
// d_result->depth = entry.depth;
// d_result->dist_sq = intersection_dist_sq;
// entry.intersects = true;
// --stack_size;
// }
// else {
// if (entry.node->hasLeftChild()) {
// stack[stack_size].node = entry.node->getLeftChild();
// stack[stack_size].depth = entry.depth + 1;
// stack[stack_size].state = CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::NotVisited;
// ++stack_size;
// entry.state = CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::PushedLeftChild;
// }
// }
// }
// }
// else if (entry.state == CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::PushedLeftChild) {
// // This stack entry was processed before. The next stack entries contain the child results
// entry.intersects = stack[stack_size + 0].intersects;
// if (entry.node->hasRightChild()) {
// stack[stack_size].node = entry.node->getRightChild();
// stack[stack_size].depth = entry.depth + 1;
// stack[stack_size].state = CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::NotVisited;
// ++stack_size;
// entry.state = CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::PushedRightChild;
// }
// }
// else if (entry.state == CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::PushedRightChild) {
// // This stack entry was processed before. The next stack entries contain the child results
// if (!entry.intersects) {
// entry.intersects = stack[stack_size].intersects;
// }
// --stack_size;
// }
// }
// return stack[0].intersects;
}
#if WITH_CUDA_RECURSION
template <typename FloatT>
__device__ bool intersectsRecursiveCuda(
const typename CudaTree<FloatT>::CudaIntersectionData data,
typename CudaTree<FloatT>::NodeType* cur_node,
const std::size_t cur_depth,
typename CudaTree<FloatT>::CudaIntersectionResult* d_result) {
bool outside_bounding_box = cur_node->getBoundingBox().isOutside(data.ray.origin);
CudaVector3<FloatT> intersection;
FloatT intersection_dist_sq;
if (outside_bounding_box) {
// Check if ray intersects current node
const bool intersects = cur_node->getBoundingBox().intersectsCuda(data.ray, &intersection);
if (intersects) {
intersection_dist_sq = (data.ray.origin - intersection).squaredNorm();
if (intersection_dist_sq > d_result->dist_sq) {
return false;
}
}
else {
return false;
}
}
if (cur_node->isLeaf()) {
if (!outside_bounding_box) {
// If already inside the bounding box we want the intersection point to be the start of the ray (if the bbox extent > min_dist).
intersection = data.ray.origin;
intersection_dist_sq = 0;
}
d_result->intersection = intersection;
d_result->node = static_cast<void*>(cur_node->getPtr());
d_result->depth = cur_depth;
d_result->dist_sq = intersection_dist_sq;
return true;
}
bool intersects_left = false;
bool intersects_right = false;
if (cur_node->hasLeftChild()) {
intersects_left = intersectsRecursiveCuda<FloatT>(data, cur_node->getLeftChild(), cur_depth + 1, d_result);
}
if (cur_node->hasRightChild()) {
intersects_right = intersectsRecursiveCuda<FloatT>(data, cur_node->getRightChild(), cur_depth + 1, d_result);
}
return intersects_left || intersects_right;
}
#endif
#if WITH_CUDA_RECURSION
template <typename FloatT>
__global__ void intersectsRecursiveCudaKernel(
const typename CudaTree<FloatT>::CudaRayType* d_rays,
const std::size_t num_of_rays,
const FloatT min_range,
const FloatT max_range,
typename CudaTree<FloatT>::NodeType* d_root,
typename CudaTree<FloatT>::CudaIntersectionResult* d_results) {
const std::size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < num_of_rays) {
const typename CudaTree<FloatT>::CudaRayType& ray = d_rays[index];
typename CudaTree<FloatT>::CudaIntersectionData data;
data.ray.origin = ray.origin;
data.ray.direction = ray.direction;
data.ray.inv_direction = ray.direction.cwiseInverse();
data.min_range_sq = min_range * min_range;
typename CudaTree<FloatT>::CudaIntersectionResult& result = d_results[index];
result.dist_sq = max_range > 0 ? max_range * max_range : FLT_MAX;
result.node = nullptr;
const std::size_t cur_depth = 0;
// printf("Calling intersectsRecursiveCuda\n");
intersectsRecursiveCuda<FloatT>(data, d_root, cur_depth, &result);
}
}
#endif
template <typename FloatT>
__global__ void intersectsIterativeCudaKernel(
const typename CudaTree<FloatT>::CudaRayType* d_rays,
const std::size_t num_of_rays,
const FloatT min_range,
const FloatT max_range,
typename CudaTree<FloatT>::NodeType* d_root,
typename CudaTree<FloatT>::CudaIntersectionIterativeStackEntry* stacks,
const std::size_t max_stack_size,
bool* d_success,
typename CudaTree<FloatT>::CudaIntersectionResult* d_results) {
*d_success = true;
const std::size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < num_of_rays) {
const typename CudaTree<FloatT>::CudaRayType& ray = d_rays[index];
typename CudaTree<FloatT>::CudaIntersectionResult& result = d_results[index];
result.dist_sq = max_range > 0 ? max_range * max_range : FLT_MAX;
result.node = nullptr;
typename CudaTree<FloatT>::CudaIntersectionIterativeStackEntry* stack = &stacks[index * max_stack_size];
stack[0].depth = 0;
stack[0].node = d_root;
const std::size_t stack_size = 1;
// for (std::size_t i = 0; i < max_stack_size; ++i) {
// stack[i].state = CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::NotVisited;
// }
const bool thread_success = intersectsIterativeCuda<FloatT>(ray, min_range, max_range, stack, stack_size, max_stack_size, &result);
if (!thread_success) {
*d_success = thread_success;
}
assert(thread_success);
}
}
template <typename FloatT>
__device__ CudaVector3<FloatT> getCameraRay(
const CudaMatrix4x4<FloatT>& intrinsics,
const FloatT x,
const FloatT y) {
CudaVector3<FloatT> ray_direction;
ray_direction(0) = (x - intrinsics(0, 2)) / intrinsics(0, 0);
ray_direction(1) = (y - intrinsics(1, 2)) / intrinsics(1, 1);
ray_direction(2) = 1;
return ray_direction;
}
template <typename FloatT>
__device__ CudaRay<FloatT> getCameraRay(
const CudaMatrix4x4<FloatT>& intrinsics,
const CudaMatrix3x4<FloatT>& extrinsics,
const FloatT x,
const FloatT y) {
CudaRay<FloatT> ray;
ray.origin = extrinsics.col(3);
CudaVector3<FloatT> direction_camera = getCameraRay(intrinsics, x, y);
CudaMatrix3x3<FloatT> rotation = extrinsics.template block<0, 0, 3, 3>();
ray.direction = rotation * direction_camera;
return ray;
}
template <typename FloatT>
__global__ void raycastIterativeCudaKernel(
const CudaMatrix4x4<FloatT> intrinsics,
const CudaMatrix3x4<FloatT> extrinsics,
typename CudaTree<FloatT>::CudaRayType* d_rays,
const std::size_t num_of_rays,
const std::size_t x_start, const std::size_t x_end,
const std::size_t y_start, const std::size_t y_end,
const FloatT min_range,
const FloatT max_range,
typename CudaTree<FloatT>::NodeType* d_root,
typename CudaTree<FloatT>::CudaIntersectionIterativeStackEntry* stacks,
const std::size_t max_stack_size,
bool* d_success,
typename CudaTree<FloatT>::CudaIntersectionResult* d_results) {
*d_success = true;
const std::size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const std::size_t yi = index / (x_end - x_start);
const std::size_t xi = index % (x_end - x_start);
// const std::size_t yi = blockIdx.x;
// const std::size_t xi = threadIdx.x;
const FloatT yf = y_start + yi;
const FloatT xf = x_start + xi;
if (xi < (x_end - x_start) && yi < (y_end - y_start)) {
typename CudaTree<FloatT>::CudaRayType& ray = d_rays[index];
ray = getCameraRay(intrinsics, extrinsics, xf, yf);
typename CudaTree<FloatT>::CudaIntersectionResult& result = d_results[index];
result.dist_sq = max_range > 0 ? max_range * max_range : FLT_MAX;
result.node = nullptr;
// printf("Calling intersectsRecursiveCuda\n");
typename CudaTree<FloatT>::CudaIntersectionIterativeStackEntry* stack = &stacks[index * max_stack_size];
stack[0].depth = 0;
stack[0].node = d_root;
const std::size_t stack_size = 1;
// for (std::size_t i = 0; i < max_stack_size; ++i) {
// stack[i].state = CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::NotVisited;
// }
const bool thread_success = intersectsIterativeCuda<FloatT>(ray, min_range, max_range, stack, stack_size, max_stack_size, &result);
if (!thread_success) {
*d_success = thread_success;
}
assert(thread_success);
}
}
template <typename FloatT>
__global__ void raycastWithScreenCoordinatesIterativeCudaKernel(
const CudaMatrix4x4<FloatT> intrinsics,
const CudaMatrix3x4<FloatT> extrinsics,
typename CudaTree<FloatT>::CudaRayType* d_rays,
const std::size_t num_of_rays,
const std::size_t x_start, const std::size_t x_end,
const std::size_t y_start, const std::size_t y_end,
const FloatT min_range,
const FloatT max_range,
typename CudaTree<FloatT>::NodeType* d_root,
typename CudaTree<FloatT>::CudaIntersectionIterativeStackEntry* stacks,
const std::size_t max_stack_size,
bool* d_success,
typename CudaTree<FloatT>::CudaIntersectionResultWithScreenCoordinates* d_results_with_screen_coordinates) {
*d_success = true;
const std::size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const std::size_t yi = index / (x_end - x_start);
const std::size_t xi = index % (x_end - x_start);
// const std::size_t yi = blockIdx.x;
// const std::size_t xi = threadIdx.x;
const FloatT yf = y_start + yi;
const FloatT xf = x_start + xi;
if (xi < (x_end - x_start) && yi < (y_end - y_start)) {
typename CudaTree<FloatT>::CudaRayType& ray = d_rays[index];
ray = getCameraRay(intrinsics, extrinsics, xf, yf);
typename CudaTree<FloatT>::CudaIntersectionResultWithScreenCoordinates& result = d_results_with_screen_coordinates[index];
result.intersection_result.dist_sq = max_range > 0 ? max_range * max_range : FLT_MAX;
result.intersection_result.node = nullptr;
// printf("Calling intersectsRecursiveCuda\n");
typename CudaTree<FloatT>::CudaIntersectionIterativeStackEntry* stack = &stacks[index * max_stack_size];
stack[0].depth = 0;
stack[0].node = d_root;
const std::size_t stack_size = 1;
// for (std::size_t i = 0; i < max_stack_size; ++i) {
// stack[i].state = CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::NotVisited;
// }
result.screen_coordinates(0) = xf;
result.screen_coordinates(1) = yf;
const bool thread_success = intersectsIterativeCuda<FloatT>(ray, min_range, max_range, stack, stack_size, max_stack_size, &result.intersection_result);
printf("index=%d, success=%d\n", index, thread_success);
if (!thread_success) {
*d_success = thread_success;
}
assert(thread_success);
}
}
#if WITH_CUDA_RECURSION
template <typename FloatT>
__global__ void raycastRecursiveCudaKernel(
const CudaMatrix4x4<FloatT> intrinsics,
const CudaMatrix3x4<FloatT> extrinsics,
typename CudaTree<FloatT>::CudaRayType* d_rays,
const std::size_t num_of_rays,
const std::size_t x_start, const std::size_t x_end,
const std::size_t y_start, const std::size_t y_end,
const FloatT min_range,
const FloatT max_range,
typename CudaTree<FloatT>::NodeType* d_root,
typename CudaTree<FloatT>::CudaIntersectionResult* d_results) {
const std::size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const std::size_t yi = index / (x_end - x_start);
const std::size_t xi = index % (x_end - x_start);
// const std::size_t yi = blockIdx.x;
// const std::size_t xi = threadIdx.x;
const FloatT yf = y_start + yi;
const FloatT xf = x_start + xi;
if (xi < (x_end - x_start) && yi < (y_end - y_start)) {
typename CudaTree<FloatT>::CudaRayType& ray = d_rays[index];
ray = getCameraRay(intrinsics, extrinsics, xf, yf);
typename CudaTree<FloatT>::CudaIntersectionData data;
data.ray.origin = ray.origin;
data.ray.direction = ray.direction;
data.ray.inv_direction = ray.direction.cwiseInverse();
data.min_range_sq = min_range * min_range;
typename CudaTree<FloatT>::CudaIntersectionResult& result = d_results[index];
result.dist_sq = max_range > 0 ? max_range * max_range : FLT_MAX;
result.node = nullptr;
const std::size_t cur_depth = 0;
// printf("Calling intersectsRecursiveCuda\n");
intersectsRecursiveCuda<FloatT>(data, d_root, cur_depth, &result);
}
}
template <typename FloatT>
__global__ void raycastWithScreenCoordinatesRecursiveCudaKernel(
const CudaMatrix4x4<FloatT> intrinsics,
const CudaMatrix3x4<FloatT> extrinsics,
typename CudaTree<FloatT>::CudaRayType* d_rays,
const std::size_t num_of_rays,
const std::size_t x_start, const std::size_t x_end,
const std::size_t y_start, const std::size_t y_end,
const FloatT min_range,
const FloatT max_range,
typename CudaTree<FloatT>::NodeType* d_root,
typename CudaTree<FloatT>::CudaIntersectionResultWithScreenCoordinates* d_results_with_screen_coordinates) {
const std::size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const std::size_t yi = index / (x_end - x_start);
const std::size_t xi = index % (x_end - x_start);
// const std::size_t yi = blockIdx.x;
// const std::size_t xi = threadIdx.x;
const FloatT yf = y_start + yi;
const FloatT xf = x_start + xi;
if (xi < (x_end - x_start) && yi < (y_end - y_start)) {
typename CudaTree<FloatT>::CudaRayType& ray = d_rays[index];
ray = getCameraRay(intrinsics, extrinsics, xf, yf);
typename CudaTree<FloatT>::CudaIntersectionData data;
data.ray.origin = ray.origin;
data.ray.direction = ray.direction;
data.ray.inv_direction = ray.direction.cwiseInverse();
data.min_range_sq = min_range * min_range;
typename CudaTree<FloatT>::CudaIntersectionResultWithScreenCoordinates& result = d_results_with_screen_coordinates[index];
result.intersection_result.dist_sq = max_range > 0 ? max_range * max_range : FLT_MAX;
result.intersection_result.node = nullptr;
const std::size_t cur_depth = 0;
// printf("Calling intersectsRecursiveCuda\n");
result.screen_coordinates(0) = xf;
result.screen_coordinates(1) = yf;
intersectsRecursiveCuda<FloatT>(data, d_root, cur_depth, &result.intersection_result);
}
}
#endif
#if WITH_CUDA_RECURSION
template <typename FloatT>
std::vector<typename CudaTree<FloatT>::CudaIntersectionResult>
CudaTree<FloatT>::intersectsRecursive(const std::vector<CudaRayType>& rays, FloatT min_range /*= 0*/, FloatT max_range /*= -1*/) {
if (rays.empty()) {
return std::vector<CudaIntersectionResult>();
}
reserveDeviceRaysAndResults(rays.size());
bh::CudaUtils::copyArrayToDevice(rays, d_rays_);
const std::size_t grid_size = (rays.size() + kThreadsPerBlock - 1) / kThreadsPerBlock;
const std::size_t block_size = ::min(kThreadsPerBlock, rays.size());
BH_ASSERT(grid_size > 0);
BH_ASSERT(block_size > 0);
BH_ASSERT(getRoot() != nullptr);
// std::cout << "grid_size=" << grid_size << ", block_size=" << block_size << std::endl;
hipLaunchKernelGGL(( intersectsRecursiveCudaKernel<FloatT>), dim3(grid_size), dim3(block_size), 0, 0,
d_rays_, rays.size(),
min_range, max_range,
getRoot(),
d_results_);
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
std::vector<CudaIntersectionResult> cuda_results(rays.size());
bh::CudaUtils::copyArrayFromDevice(d_results_, &cuda_results);
return cuda_results;
}
#endif
template <typename FloatT>
std::vector<typename CudaTree<FloatT>::CudaIntersectionResult>
CudaTree<FloatT>::intersectsIterative(const std::vector<CudaRayType>& rays, FloatT min_range /*= 0*/, FloatT max_range /*= -1*/) {
if (rays.empty()) {
return std::vector<CudaIntersectionResult>();
}
reserveDeviceRaysAndResults(rays.size());
bh::CudaUtils::copyArrayToDevice(rays, d_rays_);
bool* d_success = bh::CudaUtils::allocate<bool>();
const std::size_t grid_size = (rays.size() + kThreadsPerBlock - 1) / kThreadsPerBlock;
const std::size_t block_size = ::min(kThreadsPerBlock, rays.size());
BH_ASSERT(grid_size > 0);
BH_ASSERT(block_size > 0);
BH_ASSERT(getRoot() != nullptr);
// std::cout << "grid_size=" << grid_size << ", block_size=" << block_size << std::endl;
hipLaunchKernelGGL(( intersectsIterativeCudaKernel<FloatT>), dim3(grid_size), dim3(block_size), 0, 0,
d_rays_, rays.size(),
min_range, max_range,
getRoot(),
d_stacks_,
2 * (tree_depth_ + 1),
d_success,
d_results_);
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
const bool success = bh::CudaUtils::copyFromDevice(d_success);
bh::CudaUtils::deallocate(&d_success);
if (!success) {
throw bh::Error("Iterative raycast failed. Maximum stack size was exceeded.");
}
std::vector<CudaIntersectionResult> cuda_results(rays.size());
bh::CudaUtils::copyArrayFromDevice(d_results_, &cuda_results);
return cuda_results;
}
#if WITH_CUDA_RECURSION
template <typename FloatT>
std::vector<typename CudaTree<FloatT>::CudaIntersectionResult>
CudaTree<FloatT>::raycastRecursive(
const CudaMatrix4x4<FloatT>& intrinsics,
const CudaMatrix3x4<FloatT>& extrinsics,
const std::size_t x_start, const std::size_t x_end,
const std::size_t y_start, const std::size_t y_end,
FloatT min_range /*= 0*/, FloatT max_range /*= -1*/,
const bool fail_on_error /*= false*/) {
const std::size_t num_of_rays = (y_end - y_start) * (x_end - x_start);
reserveDeviceRaysAndResults(num_of_rays);
const std::size_t grid_size = y_end - y_start;
const std::size_t block_size = x_end - x_start;
BH_ASSERT(grid_size > 0);
BH_ASSERT(block_size > 0);
BH_ASSERT(getRoot() != nullptr);
// std::cout << "grid_size=" << grid_size << ", block_size=" << block_size << std::endl;
NodeType* root = getRoot();
hipLaunchKernelGGL(( raycastRecursiveCudaKernel<FloatT>), dim3(grid_size), dim3(block_size), 0, 0,
intrinsics, extrinsics,
d_rays_, num_of_rays,
x_start, x_end,
y_start, y_end,
min_range, max_range,
root,
d_results_);
if (fail_on_error) {
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
}
else {
hipError_t err = hipDeviceSynchronize();
if (hipSuccess != err) { \
fprintf(stderr, "CUDA error in file '%s' in line %i: %s\n",
__FILE__, __LINE__, hipGetErrorString(err));
throw bh::CudaError(err);
}
}
std::vector<CudaIntersectionResult> cuda_results(num_of_rays);
bh::CudaUtils::copyArrayFromDevice(d_results_, &cuda_results);
return cuda_results;
}
template <typename FloatT>
std::vector<typename CudaTree<FloatT>::CudaIntersectionResultWithScreenCoordinates>
CudaTree<FloatT>::raycastWithScreenCoordinatesRecursive(
const CudaMatrix4x4<FloatT>& intrinsics,
const CudaMatrix3x4<FloatT>& extrinsics,
const std::size_t x_start, const std::size_t x_end,
const std::size_t y_start, const std::size_t y_end,
FloatT min_range /*= 0*/, FloatT max_range /*= -1*/,
const bool fail_on_error /*= false*/) {
const std::size_t num_of_rays = (y_end - y_start) * (x_end - x_start);
reserveDeviceRaysAndResults(num_of_rays);
const std::size_t grid_size = bh::CudaUtils::computeGridSize(num_of_rays, kThreadsPerBlock);
const std::size_t block_size = bh::CudaUtils::computeBlockSize(num_of_rays, kThreadsPerBlock);
BH_ASSERT(grid_size > 0);
BH_ASSERT(block_size > 0);
BH_ASSERT(getRoot() != nullptr);
// std::cout << "grid_size=" << grid_size << ", block_size=" << block_size << std::endl;
NodeType* root = getRoot();
hipLaunchKernelGGL(( raycastWithScreenCoordinatesRecursiveCudaKernel<FloatT>), dim3(grid_size), dim3(block_size), 0, 0,
intrinsics, extrinsics,
d_rays_, num_of_rays,
x_start, x_end,
y_start, y_end,
min_range, max_range,
root,
d_results_with_screen_coordinates_);
if (fail_on_error) {
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
}
else {
hipError_t err = hipDeviceSynchronize();
if (hipSuccess != err) {
fprintf(stderr, "CUDA error in file '%s' in line %i: %s\n",
__FILE__, __LINE__, hipGetErrorString(err));
fprintf(stderr, "grid_size=%d, block_size=%d\n", grid_size, block_size);
for (size_t row = 0; row < intrinsics.Rows; ++row) {
fprintf(stderr, "intrinsics(%d)=", row);
for (size_t col = 0; col < intrinsics.Cols; ++col) {
if (col > 0) {
fprintf(stderr, ", ");
}
fprintf(stderr, "%f", intrinsics(row, col));
}
fprintf(stderr, "\n");
}
for (size_t row = 0; row < extrinsics.Rows; ++row) {
fprintf(stderr, "extrinsics(%d)=", row);
for (size_t col = 0; col < extrinsics.Cols; ++col) {
if (col > 0) {
fprintf(stderr, ", ");
}
fprintf(stderr, "%f", extrinsics(row, col));
}
fprintf(stderr, "\n");
}
fprintf(stderr, "x_start=%l, x_end=%l, y_start=%l, y_end=%l\n", x_start, x_end, y_start, y_end);
fprintf(stderr, "min_range=%f, fail_on_error=%d\n", min_range, fail_on_error);
throw bh::CudaError(err);
}
}
std::vector<CudaIntersectionResultWithScreenCoordinates> cuda_results(num_of_rays);
bh::CudaUtils::copyArrayFromDevice(d_results_with_screen_coordinates_, &cuda_results);
return cuda_results;
}
#endif
template <typename FloatT>
std::vector<typename CudaTree<FloatT>::CudaIntersectionResult>
CudaTree<FloatT>::raycastIterative(
const CudaMatrix4x4<FloatT>& intrinsics,
const CudaMatrix3x4<FloatT>& extrinsics,
const std::size_t x_start, const std::size_t x_end,
const std::size_t y_start, const std::size_t y_end,
FloatT min_range /*= 0*/, FloatT max_range /*= -1*/,
const bool fail_on_error /*= false*/) {
const std::size_t num_of_rays = (y_end - y_start) * (x_end - x_start);
reserveDeviceRaysAndResults(num_of_rays);
const std::size_t grid_size = y_end - y_start;
const std::size_t block_size = x_end - x_start;
BH_ASSERT(grid_size > 0);
BH_ASSERT(block_size > 0);
BH_ASSERT(getRoot() != nullptr);
// std::cout << "grid_size=" << grid_size << ", block_size=" << block_size << std::endl;
bool* d_success = bh::CudaUtils::allocate<bool>();
NodeType* root = getRoot();
hipLaunchKernelGGL(( raycastIterativeCudaKernel<FloatT>), dim3(grid_size), dim3(block_size), 0, 0,
intrinsics,
extrinsics,
d_rays_, num_of_rays,
x_start, x_end,
y_start, y_end,
min_range, max_range,
root,
d_stacks_,
2 * (tree_depth_ + 1),
d_success,
d_results_);
if (fail_on_error) {
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
}
else {
hipError_t err = hipDeviceSynchronize();
if (hipSuccess != err) { \
fprintf(stderr, "CUDA error in file '%s' in line %i: %s\n",
__FILE__, __LINE__, hipGetErrorString(err));
throw bh::CudaError(err);
}
}
const bool success = bh::CudaUtils::copyFromDevice(d_success);
bh::CudaUtils::deallocate(&d_success);
if (!success) {
throw bh::Error("Iterative raycast failed. Maximum stack size was exceeded.");
}
std::vector<CudaIntersectionResult> cuda_results(num_of_rays);
bh::CudaUtils::copyArrayFromDevice(d_results_, &cuda_results);
return cuda_results;
}
template <typename FloatT>
std::vector<typename CudaTree<FloatT>::CudaIntersectionResultWithScreenCoordinates>
CudaTree<FloatT>::raycastWithScreenCoordinatesIterative(
const CudaMatrix4x4<FloatT>& intrinsics,
const CudaMatrix3x4<FloatT>& extrinsics,
const std::size_t x_start, const std::size_t x_end,
const std::size_t y_start, const std::size_t y_end,
FloatT min_range /*= 0*/, FloatT max_range /*= -1*/,
const bool fail_on_error /*= false*/) {
const std::size_t num_of_rays = (y_end - y_start) * (x_end - x_start);
reserveDeviceRaysAndResults(num_of_rays);
const std::size_t grid_size = y_end - y_start;
const std::size_t block_size = x_end - x_start;
BH_ASSERT(grid_size > 0);
BH_ASSERT(block_size > 0);
BH_ASSERT(getRoot() != nullptr);
// std::cout << "grid_size=" << grid_size << ", block_size=" << block_size << std::endl;
bool* d_success = bh::CudaUtils::allocate<bool>();
NodeType* root = getRoot();
hipLaunchKernelGGL(( raycastWithScreenCoordinatesIterativeCudaKernel<FloatT>), dim3(grid_size), dim3(block_size), 0, 0,
intrinsics,
extrinsics,
d_rays_, num_of_rays,
x_start, x_end,
y_start, y_end,
min_range, max_range,
root,
d_stacks_,
2 * (tree_depth_ + 1),
d_success,
d_results_with_screen_coordinates_);
if (fail_on_error) {
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
}
else {
hipError_t err = hipDeviceSynchronize();
if (hipSuccess != err) { \
fprintf(stderr, "CUDA error in file '%s' in line %i: %s\n",
__FILE__, __LINE__, hipGetErrorString(err));
throw bh::CudaError(err);
}
}
const bool success = bh::CudaUtils::copyFromDevice(d_success);
bh::CudaUtils::deallocate(&d_success);
if (!success) {
throw bh::Error("Iterative raycast failed. Maximum stack size was exceeded.");
}
std::vector<CudaIntersectionResultWithScreenCoordinates> cuda_results(num_of_rays);
bh::CudaUtils::copyArrayFromDevice(d_results_with_screen_coordinates_, &cuda_results);
return cuda_results;
}
template <typename FloatT>
void CudaTree<FloatT>::reserveDeviceRaysAndResults(const std::size_t num_of_rays) {
if (num_of_rays > d_rays_size_) {
if (d_rays_ != nullptr) {
bh::CudaUtils::deallocate(&d_rays_);
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
}
d_rays_ = bh::CudaUtils::template allocate<CudaRayType>(num_of_rays);
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
d_rays_size_ = num_of_rays;
}
if (num_of_rays > d_results_size_) {
if (d_results_ != nullptr) {
bh::CudaUtils::deallocate(&d_results_);
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
}
d_results_ = bh::CudaUtils::template allocate<CudaIntersectionResult>(num_of_rays);
d_results_with_screen_coordinates_ = bh::CudaUtils::template allocate<CudaIntersectionResultWithScreenCoordinates>(num_of_rays);
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
d_results_size_ = num_of_rays;
}
if (num_of_rays > d_stacks_size_) {
if (d_stacks_ != nullptr) {
bh::CudaUtils::deallocate(&d_stacks_);
}
d_stacks_ = bh::CudaUtils::template allocate<CudaIntersectionIterativeStackEntry>(num_of_rays * 2 * (tree_depth_ + 1));
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
d_stacks_size_ = num_of_rays;
}
}
template
class CudaTree<float>;
}
| 671d6e6abbf401437321ccb7dde20a3d94638539.cu | //==================================================
// bvh.cu
//
// Copyright (c) 2016 Benjamin Hepp.
// Author: Benjamin Hepp
// Created on: Jan 16, 2017
//==================================================
#include <bh/cuda_utils.h>
#include <vector>
#include <cstdio>
#include <cstdlib>
#include <deque>
#include <iostream>
#include <bh/utilities.h>
#include "bvh.cuh"
#include <bh/cuda_utils.h>
#include <stdlib.h>
#include <assert.h>
using std::printf;
namespace bvh {
template <typename FloatT>
CudaTree<FloatT>* CudaTree<FloatT>::createCopyFromHostTree(
NodeType* root,
const std::size_t num_of_nodes,
const std::size_t tree_depth) {
CudaTree* cuda_tree = new CudaTree(tree_depth);
const std::size_t memory_size = sizeof(NodeType) * num_of_nodes;
std::cout << "Allocating " << (memory_size / 1024. / 1024.) << " MB of GPU memory" << std::endl;
cuda_tree->d_nodes_ = bh::CudaUtils::template allocate<NodeType>(num_of_nodes);
std::deque<NodeType*> node_queue;
node_queue.push_front(root);
std::size_t node_counter = 0;
std::size_t copied_node_counter = 0;
const std::size_t report_threshold = num_of_nodes / 20;
std::size_t report_counter = 0;
const std::size_t node_cache_size = num_of_nodes / 20;
std::vector<CudaNode<FloatT>> node_cache;
node_cache.reserve(node_cache_size);
while (!node_queue.empty()) {
NodeType* node = node_queue.back();
node_queue.pop_back();
CudaNode<FloatT> cuda_node;
cuda_node.bounding_box_ = node->bounding_box_;
cuda_node.ptr_ = static_cast<void*>(node);
BH_ASSERT(cuda_node.ptr_ != nullptr);
if (node->hasLeftChild()) {
node_queue.push_front(node->getLeftChild());
const std::size_t left_child_index = node_counter + node_queue.size();
cuda_node.left_child_ = &cuda_tree->d_nodes_[left_child_index];
}
else {
cuda_node.left_child_ = nullptr;
}
if (node->hasRightChild()) {
node_queue.push_front(node->getRightChild());
std::size_t right_child_index = node_counter + node_queue.size();
cuda_node.right_child_ = &cuda_tree->d_nodes_[right_child_index];
}
else {
cuda_node.right_child_ = nullptr;
}
// bh::CudaUtils::copyToDevice(cuda_node, &tree->d_nodes_[node_counter]);
node_cache.push_back(cuda_node);
if (node_cache.size() == node_cache_size) {
bh::CudaUtils::copyArrayToDevice(node_cache, &cuda_tree->d_nodes_[copied_node_counter]);
copied_node_counter += node_cache.size();
node_cache.clear();
}
++node_counter;
++report_counter;
if (report_counter >= report_threshold) {
std::cout << "Copied " << node_counter << " nodes [" << (100 * node_counter / (FloatT)num_of_nodes) << " %]" << std::endl;
report_counter = 0;
}
}
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
return cuda_tree;
}
template <typename FloatT>
__device__ bool intersectsIterativeCuda(
const typename CudaTree<FloatT>::CudaRayType& ray,
const FloatT t_min,
const FloatT t_max,
typename CudaTree<FloatT>::CudaIntersectionIterativeStackEntry* stack,
std::size_t stack_size,
const std::size_t max_stack_size,
typename CudaTree<FloatT>::CudaIntersectionResult* d_result) {
CudaRayData<FloatT> ray_data;
ray_data.origin = ray.origin;
ray_data.direction = ray.direction;
ray_data.inv_direction = ray.direction.cwiseInverse();
while (stack_size > 0 && stack_size <= max_stack_size) {
printf("stack_size=%d\n", stack_size);
--stack_size;
typename CudaTree<FloatT>::CudaIntersectionIterativeStackEntry &entry = stack[stack_size];
FloatT t_ray;
const bool intersects = entry.node->getBoundingBox().intersectsCuda(ray_data, &t_ray, t_min, t_max);
if (!intersects) {
continue;
}
FloatT intersection_dist_sq = t_ray * t_ray;
if (intersection_dist_sq > d_result->dist_sq) {
continue;
}
if (entry.node->isLeaf()) {
const bool inside_bounding_box = entry.node->getBoundingBox().isInside(ray_data.origin);
if (inside_bounding_box) {
// If already inside the bounding box we want the intersection point to be the start of the ray.
t_ray = 0;
intersection_dist_sq = 0;
}
if (intersection_dist_sq <= d_result->dist_sq) {
d_result->intersection = ray_data.origin + ray_data.direction * t_ray;
d_result->node = static_cast<void *>(entry.node->getPtr());
d_result->depth = entry.depth;
d_result->dist_sq = intersection_dist_sq;
}
}
else {
if (entry.node->hasLeftChild()) {
stack[stack_size].node = entry.node->getLeftChild();
stack[stack_size].depth = entry.depth + 1;
++stack_size;
}
if (entry.node->hasRightChild()) {
stack[stack_size].node = entry.node->getRightChild();
stack[stack_size].depth = entry.depth + 1;
++stack_size;
}
}
}
const bool success = stack_size == 0;
return success;
// if (entry.state == CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::NotVisited) {
// entry.intersects = false;
// const bool inside_bounding_box = entry.node->getBoundingBox().isInside(data.ray.origin);
// if (inside_bounding_box) {
//
// }
// const bool outside_bounding_box = entry.node->getBoundingBox().isOutside(data.ray.origin);
// CudaVector3<FloatT> intersection;
// FloatT intersection_dist_sq;
// bool early_break = false;
// if (outside_bounding_box) {
// // Check if ray intersects current node
// const bool intersects = entry.node->getBoundingBox().intersectsCuda(data.ray, &intersection);
// // std::cout << "intersects: " << intersects << std::endl;
// if (intersects) {
// intersection_dist_sq = (data.ray.origin - intersection).squaredNorm();
// if (intersection_dist_sq > d_result->dist_sq) {
// early_break = true;
// }
// }
// else {
// early_break = true;
// }
// }
// if (early_break) {
// --stack_size;
// }
// else {
// if (entry.node->isLeaf()) {
// if (!outside_bounding_box) {
// // If already inside the bounding box we want the intersection point to be the start of the ray.
// intersection = data.ray.origin;
// intersection_dist_sq = 0;
// }
// d_result->intersection = intersection;
// d_result->node = static_cast<void*>(entry.node->getPtr());
// d_result->depth = entry.depth;
// d_result->dist_sq = intersection_dist_sq;
// entry.intersects = true;
// --stack_size;
// }
// else {
// if (entry.node->hasLeftChild()) {
// stack[stack_size].node = entry.node->getLeftChild();
// stack[stack_size].depth = entry.depth + 1;
// stack[stack_size].state = CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::NotVisited;
// ++stack_size;
// entry.state = CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::PushedLeftChild;
// }
// }
// }
// }
// else if (entry.state == CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::PushedLeftChild) {
// // This stack entry was processed before. The next stack entries contain the child results
// entry.intersects = stack[stack_size + 0].intersects;
// if (entry.node->hasRightChild()) {
// stack[stack_size].node = entry.node->getRightChild();
// stack[stack_size].depth = entry.depth + 1;
// stack[stack_size].state = CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::NotVisited;
// ++stack_size;
// entry.state = CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::PushedRightChild;
// }
// }
// else if (entry.state == CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::PushedRightChild) {
// // This stack entry was processed before. The next stack entries contain the child results
// if (!entry.intersects) {
// entry.intersects = stack[stack_size].intersects;
// }
// --stack_size;
// }
// }
// return stack[0].intersects;
}
#if WITH_CUDA_RECURSION
template <typename FloatT>
__device__ bool intersectsRecursiveCuda(
const typename CudaTree<FloatT>::CudaIntersectionData data,
typename CudaTree<FloatT>::NodeType* cur_node,
const std::size_t cur_depth,
typename CudaTree<FloatT>::CudaIntersectionResult* d_result) {
bool outside_bounding_box = cur_node->getBoundingBox().isOutside(data.ray.origin);
CudaVector3<FloatT> intersection;
FloatT intersection_dist_sq;
if (outside_bounding_box) {
// Check if ray intersects current node
const bool intersects = cur_node->getBoundingBox().intersectsCuda(data.ray, &intersection);
if (intersects) {
intersection_dist_sq = (data.ray.origin - intersection).squaredNorm();
if (intersection_dist_sq > d_result->dist_sq) {
return false;
}
}
else {
return false;
}
}
if (cur_node->isLeaf()) {
if (!outside_bounding_box) {
// If already inside the bounding box we want the intersection point to be the start of the ray (if the bbox extent > min_dist).
intersection = data.ray.origin;
intersection_dist_sq = 0;
}
d_result->intersection = intersection;
d_result->node = static_cast<void*>(cur_node->getPtr());
d_result->depth = cur_depth;
d_result->dist_sq = intersection_dist_sq;
return true;
}
bool intersects_left = false;
bool intersects_right = false;
if (cur_node->hasLeftChild()) {
intersects_left = intersectsRecursiveCuda<FloatT>(data, cur_node->getLeftChild(), cur_depth + 1, d_result);
}
if (cur_node->hasRightChild()) {
intersects_right = intersectsRecursiveCuda<FloatT>(data, cur_node->getRightChild(), cur_depth + 1, d_result);
}
return intersects_left || intersects_right;
}
#endif
#if WITH_CUDA_RECURSION
template <typename FloatT>
__global__ void intersectsRecursiveCudaKernel(
const typename CudaTree<FloatT>::CudaRayType* d_rays,
const std::size_t num_of_rays,
const FloatT min_range,
const FloatT max_range,
typename CudaTree<FloatT>::NodeType* d_root,
typename CudaTree<FloatT>::CudaIntersectionResult* d_results) {
const std::size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < num_of_rays) {
const typename CudaTree<FloatT>::CudaRayType& ray = d_rays[index];
typename CudaTree<FloatT>::CudaIntersectionData data;
data.ray.origin = ray.origin;
data.ray.direction = ray.direction;
data.ray.inv_direction = ray.direction.cwiseInverse();
data.min_range_sq = min_range * min_range;
typename CudaTree<FloatT>::CudaIntersectionResult& result = d_results[index];
result.dist_sq = max_range > 0 ? max_range * max_range : FLT_MAX;
result.node = nullptr;
const std::size_t cur_depth = 0;
// printf("Calling intersectsRecursiveCuda\n");
intersectsRecursiveCuda<FloatT>(data, d_root, cur_depth, &result);
}
}
#endif
template <typename FloatT>
__global__ void intersectsIterativeCudaKernel(
const typename CudaTree<FloatT>::CudaRayType* d_rays,
const std::size_t num_of_rays,
const FloatT min_range,
const FloatT max_range,
typename CudaTree<FloatT>::NodeType* d_root,
typename CudaTree<FloatT>::CudaIntersectionIterativeStackEntry* stacks,
const std::size_t max_stack_size,
bool* d_success,
typename CudaTree<FloatT>::CudaIntersectionResult* d_results) {
*d_success = true;
const std::size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < num_of_rays) {
const typename CudaTree<FloatT>::CudaRayType& ray = d_rays[index];
typename CudaTree<FloatT>::CudaIntersectionResult& result = d_results[index];
result.dist_sq = max_range > 0 ? max_range * max_range : FLT_MAX;
result.node = nullptr;
typename CudaTree<FloatT>::CudaIntersectionIterativeStackEntry* stack = &stacks[index * max_stack_size];
stack[0].depth = 0;
stack[0].node = d_root;
const std::size_t stack_size = 1;
// for (std::size_t i = 0; i < max_stack_size; ++i) {
// stack[i].state = CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::NotVisited;
// }
const bool thread_success = intersectsIterativeCuda<FloatT>(ray, min_range, max_range, stack, stack_size, max_stack_size, &result);
if (!thread_success) {
*d_success = thread_success;
}
assert(thread_success);
}
}
template <typename FloatT>
__device__ CudaVector3<FloatT> getCameraRay(
const CudaMatrix4x4<FloatT>& intrinsics,
const FloatT x,
const FloatT y) {
CudaVector3<FloatT> ray_direction;
ray_direction(0) = (x - intrinsics(0, 2)) / intrinsics(0, 0);
ray_direction(1) = (y - intrinsics(1, 2)) / intrinsics(1, 1);
ray_direction(2) = 1;
return ray_direction;
}
template <typename FloatT>
__device__ CudaRay<FloatT> getCameraRay(
const CudaMatrix4x4<FloatT>& intrinsics,
const CudaMatrix3x4<FloatT>& extrinsics,
const FloatT x,
const FloatT y) {
CudaRay<FloatT> ray;
ray.origin = extrinsics.col(3);
CudaVector3<FloatT> direction_camera = getCameraRay(intrinsics, x, y);
CudaMatrix3x3<FloatT> rotation = extrinsics.template block<0, 0, 3, 3>();
ray.direction = rotation * direction_camera;
return ray;
}
template <typename FloatT>
__global__ void raycastIterativeCudaKernel(
const CudaMatrix4x4<FloatT> intrinsics,
const CudaMatrix3x4<FloatT> extrinsics,
typename CudaTree<FloatT>::CudaRayType* d_rays,
const std::size_t num_of_rays,
const std::size_t x_start, const std::size_t x_end,
const std::size_t y_start, const std::size_t y_end,
const FloatT min_range,
const FloatT max_range,
typename CudaTree<FloatT>::NodeType* d_root,
typename CudaTree<FloatT>::CudaIntersectionIterativeStackEntry* stacks,
const std::size_t max_stack_size,
bool* d_success,
typename CudaTree<FloatT>::CudaIntersectionResult* d_results) {
*d_success = true;
const std::size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const std::size_t yi = index / (x_end - x_start);
const std::size_t xi = index % (x_end - x_start);
// const std::size_t yi = blockIdx.x;
// const std::size_t xi = threadIdx.x;
const FloatT yf = y_start + yi;
const FloatT xf = x_start + xi;
if (xi < (x_end - x_start) && yi < (y_end - y_start)) {
typename CudaTree<FloatT>::CudaRayType& ray = d_rays[index];
ray = getCameraRay(intrinsics, extrinsics, xf, yf);
typename CudaTree<FloatT>::CudaIntersectionResult& result = d_results[index];
result.dist_sq = max_range > 0 ? max_range * max_range : FLT_MAX;
result.node = nullptr;
// printf("Calling intersectsRecursiveCuda\n");
typename CudaTree<FloatT>::CudaIntersectionIterativeStackEntry* stack = &stacks[index * max_stack_size];
stack[0].depth = 0;
stack[0].node = d_root;
const std::size_t stack_size = 1;
// for (std::size_t i = 0; i < max_stack_size; ++i) {
// stack[i].state = CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::NotVisited;
// }
const bool thread_success = intersectsIterativeCuda<FloatT>(ray, min_range, max_range, stack, stack_size, max_stack_size, &result);
if (!thread_success) {
*d_success = thread_success;
}
assert(thread_success);
}
}
template <typename FloatT>
__global__ void raycastWithScreenCoordinatesIterativeCudaKernel(
const CudaMatrix4x4<FloatT> intrinsics,
const CudaMatrix3x4<FloatT> extrinsics,
typename CudaTree<FloatT>::CudaRayType* d_rays,
const std::size_t num_of_rays,
const std::size_t x_start, const std::size_t x_end,
const std::size_t y_start, const std::size_t y_end,
const FloatT min_range,
const FloatT max_range,
typename CudaTree<FloatT>::NodeType* d_root,
typename CudaTree<FloatT>::CudaIntersectionIterativeStackEntry* stacks,
const std::size_t max_stack_size,
bool* d_success,
typename CudaTree<FloatT>::CudaIntersectionResultWithScreenCoordinates* d_results_with_screen_coordinates) {
*d_success = true;
const std::size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const std::size_t yi = index / (x_end - x_start);
const std::size_t xi = index % (x_end - x_start);
// const std::size_t yi = blockIdx.x;
// const std::size_t xi = threadIdx.x;
const FloatT yf = y_start + yi;
const FloatT xf = x_start + xi;
if (xi < (x_end - x_start) && yi < (y_end - y_start)) {
typename CudaTree<FloatT>::CudaRayType& ray = d_rays[index];
ray = getCameraRay(intrinsics, extrinsics, xf, yf);
typename CudaTree<FloatT>::CudaIntersectionResultWithScreenCoordinates& result = d_results_with_screen_coordinates[index];
result.intersection_result.dist_sq = max_range > 0 ? max_range * max_range : FLT_MAX;
result.intersection_result.node = nullptr;
// printf("Calling intersectsRecursiveCuda\n");
typename CudaTree<FloatT>::CudaIntersectionIterativeStackEntry* stack = &stacks[index * max_stack_size];
stack[0].depth = 0;
stack[0].node = d_root;
const std::size_t stack_size = 1;
// for (std::size_t i = 0; i < max_stack_size; ++i) {
// stack[i].state = CudaTree<FloatT>::CudaIntersectionIterativeStackEntry::NotVisited;
// }
result.screen_coordinates(0) = xf;
result.screen_coordinates(1) = yf;
const bool thread_success = intersectsIterativeCuda<FloatT>(ray, min_range, max_range, stack, stack_size, max_stack_size, &result.intersection_result);
printf("index=%d, success=%d\n", index, thread_success);
if (!thread_success) {
*d_success = thread_success;
}
assert(thread_success);
}
}
#if WITH_CUDA_RECURSION
template <typename FloatT>
__global__ void raycastRecursiveCudaKernel(
const CudaMatrix4x4<FloatT> intrinsics,
const CudaMatrix3x4<FloatT> extrinsics,
typename CudaTree<FloatT>::CudaRayType* d_rays,
const std::size_t num_of_rays,
const std::size_t x_start, const std::size_t x_end,
const std::size_t y_start, const std::size_t y_end,
const FloatT min_range,
const FloatT max_range,
typename CudaTree<FloatT>::NodeType* d_root,
typename CudaTree<FloatT>::CudaIntersectionResult* d_results) {
const std::size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const std::size_t yi = index / (x_end - x_start);
const std::size_t xi = index % (x_end - x_start);
// const std::size_t yi = blockIdx.x;
// const std::size_t xi = threadIdx.x;
const FloatT yf = y_start + yi;
const FloatT xf = x_start + xi;
if (xi < (x_end - x_start) && yi < (y_end - y_start)) {
typename CudaTree<FloatT>::CudaRayType& ray = d_rays[index];
ray = getCameraRay(intrinsics, extrinsics, xf, yf);
typename CudaTree<FloatT>::CudaIntersectionData data;
data.ray.origin = ray.origin;
data.ray.direction = ray.direction;
data.ray.inv_direction = ray.direction.cwiseInverse();
data.min_range_sq = min_range * min_range;
typename CudaTree<FloatT>::CudaIntersectionResult& result = d_results[index];
result.dist_sq = max_range > 0 ? max_range * max_range : FLT_MAX;
result.node = nullptr;
const std::size_t cur_depth = 0;
// printf("Calling intersectsRecursiveCuda\n");
intersectsRecursiveCuda<FloatT>(data, d_root, cur_depth, &result);
}
}
template <typename FloatT>
__global__ void raycastWithScreenCoordinatesRecursiveCudaKernel(
const CudaMatrix4x4<FloatT> intrinsics,
const CudaMatrix3x4<FloatT> extrinsics,
typename CudaTree<FloatT>::CudaRayType* d_rays,
const std::size_t num_of_rays,
const std::size_t x_start, const std::size_t x_end,
const std::size_t y_start, const std::size_t y_end,
const FloatT min_range,
const FloatT max_range,
typename CudaTree<FloatT>::NodeType* d_root,
typename CudaTree<FloatT>::CudaIntersectionResultWithScreenCoordinates* d_results_with_screen_coordinates) {
const std::size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const std::size_t yi = index / (x_end - x_start);
const std::size_t xi = index % (x_end - x_start);
// const std::size_t yi = blockIdx.x;
// const std::size_t xi = threadIdx.x;
const FloatT yf = y_start + yi;
const FloatT xf = x_start + xi;
if (xi < (x_end - x_start) && yi < (y_end - y_start)) {
typename CudaTree<FloatT>::CudaRayType& ray = d_rays[index];
ray = getCameraRay(intrinsics, extrinsics, xf, yf);
typename CudaTree<FloatT>::CudaIntersectionData data;
data.ray.origin = ray.origin;
data.ray.direction = ray.direction;
data.ray.inv_direction = ray.direction.cwiseInverse();
data.min_range_sq = min_range * min_range;
typename CudaTree<FloatT>::CudaIntersectionResultWithScreenCoordinates& result = d_results_with_screen_coordinates[index];
result.intersection_result.dist_sq = max_range > 0 ? max_range * max_range : FLT_MAX;
result.intersection_result.node = nullptr;
const std::size_t cur_depth = 0;
// printf("Calling intersectsRecursiveCuda\n");
result.screen_coordinates(0) = xf;
result.screen_coordinates(1) = yf;
intersectsRecursiveCuda<FloatT>(data, d_root, cur_depth, &result.intersection_result);
}
}
#endif
#if WITH_CUDA_RECURSION
template <typename FloatT>
std::vector<typename CudaTree<FloatT>::CudaIntersectionResult>
CudaTree<FloatT>::intersectsRecursive(const std::vector<CudaRayType>& rays, FloatT min_range /*= 0*/, FloatT max_range /*= -1*/) {
if (rays.empty()) {
return std::vector<CudaIntersectionResult>();
}
reserveDeviceRaysAndResults(rays.size());
bh::CudaUtils::copyArrayToDevice(rays, d_rays_);
const std::size_t grid_size = (rays.size() + kThreadsPerBlock - 1) / kThreadsPerBlock;
const std::size_t block_size = std::min(kThreadsPerBlock, rays.size());
BH_ASSERT(grid_size > 0);
BH_ASSERT(block_size > 0);
BH_ASSERT(getRoot() != nullptr);
// std::cout << "grid_size=" << grid_size << ", block_size=" << block_size << std::endl;
intersectsRecursiveCudaKernel<FloatT><<<grid_size, block_size>>>(
d_rays_, rays.size(),
min_range, max_range,
getRoot(),
d_results_);
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
std::vector<CudaIntersectionResult> cuda_results(rays.size());
bh::CudaUtils::copyArrayFromDevice(d_results_, &cuda_results);
return cuda_results;
}
#endif
template <typename FloatT>
std::vector<typename CudaTree<FloatT>::CudaIntersectionResult>
CudaTree<FloatT>::intersectsIterative(const std::vector<CudaRayType>& rays, FloatT min_range /*= 0*/, FloatT max_range /*= -1*/) {
if (rays.empty()) {
return std::vector<CudaIntersectionResult>();
}
reserveDeviceRaysAndResults(rays.size());
bh::CudaUtils::copyArrayToDevice(rays, d_rays_);
bool* d_success = bh::CudaUtils::allocate<bool>();
const std::size_t grid_size = (rays.size() + kThreadsPerBlock - 1) / kThreadsPerBlock;
const std::size_t block_size = std::min(kThreadsPerBlock, rays.size());
BH_ASSERT(grid_size > 0);
BH_ASSERT(block_size > 0);
BH_ASSERT(getRoot() != nullptr);
// std::cout << "grid_size=" << grid_size << ", block_size=" << block_size << std::endl;
intersectsIterativeCudaKernel<FloatT><<<grid_size, block_size>>>(
d_rays_, rays.size(),
min_range, max_range,
getRoot(),
d_stacks_,
2 * (tree_depth_ + 1),
d_success,
d_results_);
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
const bool success = bh::CudaUtils::copyFromDevice(d_success);
bh::CudaUtils::deallocate(&d_success);
if (!success) {
throw bh::Error("Iterative raycast failed. Maximum stack size was exceeded.");
}
std::vector<CudaIntersectionResult> cuda_results(rays.size());
bh::CudaUtils::copyArrayFromDevice(d_results_, &cuda_results);
return cuda_results;
}
#if WITH_CUDA_RECURSION
template <typename FloatT>
std::vector<typename CudaTree<FloatT>::CudaIntersectionResult>
CudaTree<FloatT>::raycastRecursive(
const CudaMatrix4x4<FloatT>& intrinsics,
const CudaMatrix3x4<FloatT>& extrinsics,
const std::size_t x_start, const std::size_t x_end,
const std::size_t y_start, const std::size_t y_end,
FloatT min_range /*= 0*/, FloatT max_range /*= -1*/,
const bool fail_on_error /*= false*/) {
const std::size_t num_of_rays = (y_end - y_start) * (x_end - x_start);
reserveDeviceRaysAndResults(num_of_rays);
const std::size_t grid_size = y_end - y_start;
const std::size_t block_size = x_end - x_start;
BH_ASSERT(grid_size > 0);
BH_ASSERT(block_size > 0);
BH_ASSERT(getRoot() != nullptr);
// std::cout << "grid_size=" << grid_size << ", block_size=" << block_size << std::endl;
NodeType* root = getRoot();
raycastRecursiveCudaKernel<FloatT><<<grid_size, block_size>>>(
intrinsics, extrinsics,
d_rays_, num_of_rays,
x_start, x_end,
y_start, y_end,
min_range, max_range,
root,
d_results_);
if (fail_on_error) {
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
}
else {
cudaError err = cudaDeviceSynchronize();
if (cudaSuccess != err) { \
fprintf(stderr, "CUDA error in file '%s' in line %i: %s\n",
__FILE__, __LINE__, cudaGetErrorString(err));
throw bh::CudaError(err);
}
}
std::vector<CudaIntersectionResult> cuda_results(num_of_rays);
bh::CudaUtils::copyArrayFromDevice(d_results_, &cuda_results);
return cuda_results;
}
template <typename FloatT>
std::vector<typename CudaTree<FloatT>::CudaIntersectionResultWithScreenCoordinates>
CudaTree<FloatT>::raycastWithScreenCoordinatesRecursive(
const CudaMatrix4x4<FloatT>& intrinsics,
const CudaMatrix3x4<FloatT>& extrinsics,
const std::size_t x_start, const std::size_t x_end,
const std::size_t y_start, const std::size_t y_end,
FloatT min_range /*= 0*/, FloatT max_range /*= -1*/,
const bool fail_on_error /*= false*/) {
const std::size_t num_of_rays = (y_end - y_start) * (x_end - x_start);
reserveDeviceRaysAndResults(num_of_rays);
const std::size_t grid_size = bh::CudaUtils::computeGridSize(num_of_rays, kThreadsPerBlock);
const std::size_t block_size = bh::CudaUtils::computeBlockSize(num_of_rays, kThreadsPerBlock);
BH_ASSERT(grid_size > 0);
BH_ASSERT(block_size > 0);
BH_ASSERT(getRoot() != nullptr);
// std::cout << "grid_size=" << grid_size << ", block_size=" << block_size << std::endl;
NodeType* root = getRoot();
raycastWithScreenCoordinatesRecursiveCudaKernel<FloatT><<<grid_size, block_size>>>(
intrinsics, extrinsics,
d_rays_, num_of_rays,
x_start, x_end,
y_start, y_end,
min_range, max_range,
root,
d_results_with_screen_coordinates_);
if (fail_on_error) {
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
}
else {
cudaError err = cudaDeviceSynchronize();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA error in file '%s' in line %i: %s\n",
__FILE__, __LINE__, cudaGetErrorString(err));
fprintf(stderr, "grid_size=%d, block_size=%d\n", grid_size, block_size);
for (size_t row = 0; row < intrinsics.Rows; ++row) {
fprintf(stderr, "intrinsics(%d)=", row);
for (size_t col = 0; col < intrinsics.Cols; ++col) {
if (col > 0) {
fprintf(stderr, ", ");
}
fprintf(stderr, "%f", intrinsics(row, col));
}
fprintf(stderr, "\n");
}
for (size_t row = 0; row < extrinsics.Rows; ++row) {
fprintf(stderr, "extrinsics(%d)=", row);
for (size_t col = 0; col < extrinsics.Cols; ++col) {
if (col > 0) {
fprintf(stderr, ", ");
}
fprintf(stderr, "%f", extrinsics(row, col));
}
fprintf(stderr, "\n");
}
fprintf(stderr, "x_start=%l, x_end=%l, y_start=%l, y_end=%l\n", x_start, x_end, y_start, y_end);
fprintf(stderr, "min_range=%f, fail_on_error=%d\n", min_range, fail_on_error);
throw bh::CudaError(err);
}
}
std::vector<CudaIntersectionResultWithScreenCoordinates> cuda_results(num_of_rays);
bh::CudaUtils::copyArrayFromDevice(d_results_with_screen_coordinates_, &cuda_results);
return cuda_results;
}
#endif
template <typename FloatT>
std::vector<typename CudaTree<FloatT>::CudaIntersectionResult>
CudaTree<FloatT>::raycastIterative(
const CudaMatrix4x4<FloatT>& intrinsics,
const CudaMatrix3x4<FloatT>& extrinsics,
const std::size_t x_start, const std::size_t x_end,
const std::size_t y_start, const std::size_t y_end,
FloatT min_range /*= 0*/, FloatT max_range /*= -1*/,
const bool fail_on_error /*= false*/) {
const std::size_t num_of_rays = (y_end - y_start) * (x_end - x_start);
reserveDeviceRaysAndResults(num_of_rays);
const std::size_t grid_size = y_end - y_start;
const std::size_t block_size = x_end - x_start;
BH_ASSERT(grid_size > 0);
BH_ASSERT(block_size > 0);
BH_ASSERT(getRoot() != nullptr);
// std::cout << "grid_size=" << grid_size << ", block_size=" << block_size << std::endl;
bool* d_success = bh::CudaUtils::allocate<bool>();
NodeType* root = getRoot();
raycastIterativeCudaKernel<FloatT><<<grid_size, block_size>>>(
intrinsics,
extrinsics,
d_rays_, num_of_rays,
x_start, x_end,
y_start, y_end,
min_range, max_range,
root,
d_stacks_,
2 * (tree_depth_ + 1),
d_success,
d_results_);
if (fail_on_error) {
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
}
else {
cudaError err = cudaDeviceSynchronize();
if (cudaSuccess != err) { \
fprintf(stderr, "CUDA error in file '%s' in line %i: %s\n",
__FILE__, __LINE__, cudaGetErrorString(err));
throw bh::CudaError(err);
}
}
const bool success = bh::CudaUtils::copyFromDevice(d_success);
bh::CudaUtils::deallocate(&d_success);
if (!success) {
throw bh::Error("Iterative raycast failed. Maximum stack size was exceeded.");
}
std::vector<CudaIntersectionResult> cuda_results(num_of_rays);
bh::CudaUtils::copyArrayFromDevice(d_results_, &cuda_results);
return cuda_results;
}
template <typename FloatT>
std::vector<typename CudaTree<FloatT>::CudaIntersectionResultWithScreenCoordinates>
CudaTree<FloatT>::raycastWithScreenCoordinatesIterative(
const CudaMatrix4x4<FloatT>& intrinsics,
const CudaMatrix3x4<FloatT>& extrinsics,
const std::size_t x_start, const std::size_t x_end,
const std::size_t y_start, const std::size_t y_end,
FloatT min_range /*= 0*/, FloatT max_range /*= -1*/,
const bool fail_on_error /*= false*/) {
const std::size_t num_of_rays = (y_end - y_start) * (x_end - x_start);
reserveDeviceRaysAndResults(num_of_rays);
const std::size_t grid_size = y_end - y_start;
const std::size_t block_size = x_end - x_start;
BH_ASSERT(grid_size > 0);
BH_ASSERT(block_size > 0);
BH_ASSERT(getRoot() != nullptr);
// std::cout << "grid_size=" << grid_size << ", block_size=" << block_size << std::endl;
bool* d_success = bh::CudaUtils::allocate<bool>();
NodeType* root = getRoot();
raycastWithScreenCoordinatesIterativeCudaKernel<FloatT><<<grid_size, block_size>>>(
intrinsics,
extrinsics,
d_rays_, num_of_rays,
x_start, x_end,
y_start, y_end,
min_range, max_range,
root,
d_stacks_,
2 * (tree_depth_ + 1),
d_success,
d_results_with_screen_coordinates_);
if (fail_on_error) {
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
}
else {
cudaError err = cudaDeviceSynchronize();
if (cudaSuccess != err) { \
fprintf(stderr, "CUDA error in file '%s' in line %i: %s\n",
__FILE__, __LINE__, cudaGetErrorString(err));
throw bh::CudaError(err);
}
}
const bool success = bh::CudaUtils::copyFromDevice(d_success);
bh::CudaUtils::deallocate(&d_success);
if (!success) {
throw bh::Error("Iterative raycast failed. Maximum stack size was exceeded.");
}
std::vector<CudaIntersectionResultWithScreenCoordinates> cuda_results(num_of_rays);
bh::CudaUtils::copyArrayFromDevice(d_results_with_screen_coordinates_, &cuda_results);
return cuda_results;
}
template <typename FloatT>
void CudaTree<FloatT>::reserveDeviceRaysAndResults(const std::size_t num_of_rays) {
if (num_of_rays > d_rays_size_) {
if (d_rays_ != nullptr) {
bh::CudaUtils::deallocate(&d_rays_);
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
}
d_rays_ = bh::CudaUtils::template allocate<CudaRayType>(num_of_rays);
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
d_rays_size_ = num_of_rays;
}
if (num_of_rays > d_results_size_) {
if (d_results_ != nullptr) {
bh::CudaUtils::deallocate(&d_results_);
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
}
d_results_ = bh::CudaUtils::template allocate<CudaIntersectionResult>(num_of_rays);
d_results_with_screen_coordinates_ = bh::CudaUtils::template allocate<CudaIntersectionResultWithScreenCoordinates>(num_of_rays);
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
d_results_size_ = num_of_rays;
}
if (num_of_rays > d_stacks_size_) {
if (d_stacks_ != nullptr) {
bh::CudaUtils::deallocate(&d_stacks_);
}
d_stacks_ = bh::CudaUtils::template allocate<CudaIntersectionIterativeStackEntry>(num_of_rays * 2 * (tree_depth_ + 1));
CUDA_DEVICE_SYNCHRONIZE();
CUDA_CHECK_ERROR();
d_stacks_size_ = num_of_rays;
}
}
template
class CudaTree<float>;
}
|
c631e4ad48efb5981374796fce3dfed4289d98fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "deltaCV/gpu/cudaImg.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <opencv2/opencv.hpp>
//
__global__ void erode(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols,
short int erodeElementRows,
short int erodeElementCols)
{
int xdx = threadIdx.x + __umul24(blockIdx.x, blockDim.x);
int ydx = threadIdx.y + __umul24(blockIdx.y, blockDim.y);
int tid = xdx + ydx * imgCols;
char val = dataIn[tid];
dataOut[tid] = dataIn[tid];
if(xdx > erodeElementCols-1 && xdx < imgCols-erodeElementCols
&& ydx>erodeElementRows && ydx < imgRows-erodeElementRows)
{
for (int i = -erodeElementRows; i < erodeElementRows+1; ++i) { //
for (int j = -erodeElementCols; j < erodeElementCols+1; ++j) { //
char temp_val = dataIn[(ydx+i)*imgCols+(xdx+j)];
if(temp_val < val)
{
dataOut[tid] = temp_val;
}
}
}
}
}
//
__global__ void dilate(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols,
short int dilateElementRows,
short int dilateElementCols)
{
int xdx = threadIdx.x + __umul24(blockIdx.x, blockDim.x);
int ydx = threadIdx.y + __umul24(blockIdx.y, blockDim.y);
int tid = xdx + ydx * imgCols;
char val = dataIn[tid];
dataOut[tid] = dataIn[tid];
if(xdx > dilateElementCols-1 && xdx < imgCols-dilateElementCols
&& ydx>dilateElementRows && ydx < imgRows-dilateElementRows)
{
for (int i = -dilateElementRows; i < dilateElementRows+1; ++i) { //
for (int j = -dilateElementCols; j < dilateElementCols+1; ++j) { //
char temp_val = dataIn[(ydx+i)*imgCols+(xdx+j)];
if(temp_val > val)
{
dataOut[tid] = temp_val;
}
}
}
}
}
void erode_gpu( unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols,
cv::Size erodeSize,
dim3 tPerBlock,
dim3 bPerGrid)
{
hipLaunchKernelGGL(( erode), dim3(bPerGrid),dim3(tPerBlock), 0, 0, dataIn,dataOut,imgRows,imgCols,(erodeSize.height-1)/2,(erodeSize.width-1)/2);
}
void dilate_gpu(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols,
cv::Size dilateSize,
dim3 tPerBlock,
dim3 bPerGrid)
{
hipLaunchKernelGGL(( dilate), dim3(bPerGrid),dim3(tPerBlock), 0, 0, dataIn,dataOut,imgRows,imgCols,(dilateSize.height-1)/2,(dilateSize.width-1)/2);
}
| c631e4ad48efb5981374796fce3dfed4289d98fe.cu | #include "deltaCV/gpu/cudaImg.cuh"
#include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <opencv2/opencv.hpp>
//腐蚀
__global__ void erode(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols,
short int erodeElementRows,
short int erodeElementCols)
{
int xdx = threadIdx.x + __umul24(blockIdx.x, blockDim.x);
int ydx = threadIdx.y + __umul24(blockIdx.y, blockDim.y);
int tid = xdx + ydx * imgCols;
char val = dataIn[tid];
dataOut[tid] = dataIn[tid];
if(xdx > erodeElementCols-1 && xdx < imgCols-erodeElementCols
&& ydx>erodeElementRows && ydx < imgRows-erodeElementRows)
{
for (int i = -erodeElementRows; i < erodeElementRows+1; ++i) { //行
for (int j = -erodeElementCols; j < erodeElementCols+1; ++j) { //列
char temp_val = dataIn[(ydx+i)*imgCols+(xdx+j)];
if(temp_val < val)
{
dataOut[tid] = temp_val;
}
}
}
}
}
//膨胀
__global__ void dilate(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols,
short int dilateElementRows,
short int dilateElementCols)
{
int xdx = threadIdx.x + __umul24(blockIdx.x, blockDim.x);
int ydx = threadIdx.y + __umul24(blockIdx.y, blockDim.y);
int tid = xdx + ydx * imgCols;
char val = dataIn[tid];
dataOut[tid] = dataIn[tid];
if(xdx > dilateElementCols-1 && xdx < imgCols-dilateElementCols
&& ydx>dilateElementRows && ydx < imgRows-dilateElementRows)
{
for (int i = -dilateElementRows; i < dilateElementRows+1; ++i) { //行
for (int j = -dilateElementCols; j < dilateElementCols+1; ++j) { //列
char temp_val = dataIn[(ydx+i)*imgCols+(xdx+j)];
if(temp_val > val)
{
dataOut[tid] = temp_val;
}
}
}
}
}
void erode_gpu( unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols,
cv::Size erodeSize,
dim3 tPerBlock,
dim3 bPerGrid)
{
erode<<<bPerGrid,tPerBlock>>>(dataIn,dataOut,imgRows,imgCols,(erodeSize.height-1)/2,(erodeSize.width-1)/2);
}
void dilate_gpu(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols,
cv::Size dilateSize,
dim3 tPerBlock,
dim3 bPerGrid)
{
dilate<<<bPerGrid,tPerBlock>>>(dataIn,dataOut,imgRows,imgCols,(dilateSize.height-1)/2,(dilateSize.width-1)/2);
}
|
be18fc25f2694cafbde8da480b7dec3e16686ef2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int tx = threadIdx.x + blockDim.x * blockIdx.x;
if (tx < len) out[tx] = in1[tx] + in2[tx];
return;
}
int main(int argc, char ** argv) {
wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
float * d11, *d21;
float * d12, *d22;
float * dout1, *dout2;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
const int segsize = 4096;
hipMalloc((void **)&d11, segsize * sizeof(float));
hipMalloc((void **)&d12, segsize * sizeof(float));
hipMalloc((void **)&dout1, segsize * sizeof(float));
hipMalloc((void **)&d21, segsize * sizeof(float));
hipMalloc((void **)&d22, segsize * sizeof(float));
hipMalloc((void **)&dout2, segsize * sizeof(float));
hipStream_t s1, s2, s3, s4;
hipStreamCreate(&s1);
hipStreamCreate(&s2);
hipStreamCreate(&s3);
hipStreamCreate(&s4);
// use one stream currently
int number[2];
for (int i = 0; i < inputLength; i += 2 * segsize) {
// handle boundary conditions in case (inputLength % segsize != 0)
for (int j = 0; j < 2; ++j) {
if (i + j * segsize + segsize <= inputLength) number[j] = segsize;
else if (i + j * segsize < inputLength) number[j] = inputLength - i - j * segsize;
else number[j] = 0;
}
hipMemcpyAsync(d11, hostInput1 + i, number[0] * sizeof(float), hipMemcpyHostToDevice, s1);
hipMemcpyAsync(d12, hostInput2 + i, number[0] * sizeof(float), hipMemcpyHostToDevice, s1);
hipMemcpyAsync(d21, hostInput1 + i + segsize * 1, number[1] * sizeof(float), hipMemcpyHostToDevice, s2);
hipMemcpyAsync(d22, hostInput2 + i + segsize * 1, number[1] * sizeof(float), hipMemcpyHostToDevice, s2);
hipLaunchKernelGGL(( vecAdd), dim3((number[0] - 1) / 256 + 1), dim3(256), 0, s1, d11, d12, dout1, number[0]);
hipMemcpyAsync(hostOutput + i, dout1, number[0] * sizeof(float), hipMemcpyDeviceToHost, s1);
hipLaunchKernelGGL(( vecAdd), dim3((number[1] - 1) / 256 + 1), dim3(256), 0, s2, d21, d22, dout2, number[1]);
hipMemcpyAsync(hostOutput + i + segsize * 1, dout2, number[1] * sizeof(float), hipMemcpyDeviceToHost, s2);
}
hipFree(d11); hipFree(d21);
hipFree(d12); hipFree(d22);
hipFree(dout1); hipFree(dout2);
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
| be18fc25f2694cafbde8da480b7dec3e16686ef2.cu | #include <wb.h>
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int tx = threadIdx.x + blockDim.x * blockIdx.x;
if (tx < len) out[tx] = in1[tx] + in2[tx];
return;
}
int main(int argc, char ** argv) {
wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
float * d11, *d21;
float * d12, *d22;
float * dout1, *dout2;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
const int segsize = 4096;
cudaMalloc((void **)&d11, segsize * sizeof(float));
cudaMalloc((void **)&d12, segsize * sizeof(float));
cudaMalloc((void **)&dout1, segsize * sizeof(float));
cudaMalloc((void **)&d21, segsize * sizeof(float));
cudaMalloc((void **)&d22, segsize * sizeof(float));
cudaMalloc((void **)&dout2, segsize * sizeof(float));
cudaStream_t s1, s2, s3, s4;
cudaStreamCreate(&s1);
cudaStreamCreate(&s2);
cudaStreamCreate(&s3);
cudaStreamCreate(&s4);
// use one stream currently
int number[2];
for (int i = 0; i < inputLength; i += 2 * segsize) {
// handle boundary conditions in case (inputLength % segsize != 0)
for (int j = 0; j < 2; ++j) {
if (i + j * segsize + segsize <= inputLength) number[j] = segsize;
else if (i + j * segsize < inputLength) number[j] = inputLength - i - j * segsize;
else number[j] = 0;
}
cudaMemcpyAsync(d11, hostInput1 + i, number[0] * sizeof(float), cudaMemcpyHostToDevice, s1);
cudaMemcpyAsync(d12, hostInput2 + i, number[0] * sizeof(float), cudaMemcpyHostToDevice, s1);
cudaMemcpyAsync(d21, hostInput1 + i + segsize * 1, number[1] * sizeof(float), cudaMemcpyHostToDevice, s2);
cudaMemcpyAsync(d22, hostInput2 + i + segsize * 1, number[1] * sizeof(float), cudaMemcpyHostToDevice, s2);
vecAdd<<<(number[0] - 1) / 256 + 1, 256, 0, s1>>>(d11, d12, dout1, number[0]);
cudaMemcpyAsync(hostOutput + i, dout1, number[0] * sizeof(float), cudaMemcpyDeviceToHost, s1);
vecAdd<<<(number[1] - 1) / 256 + 1, 256, 0, s2>>>(d21, d22, dout2, number[1]);
cudaMemcpyAsync(hostOutput + i + segsize * 1, dout2, number[1] * sizeof(float), cudaMemcpyDeviceToHost, s2);
}
cudaFree(d11); cudaFree(d21);
cudaFree(d12); cudaFree(d22);
cudaFree(dout1); cudaFree(dout2);
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
|
6a1f527e4928cf72049b969da274d9abe8dd1964.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file roi_pooling.cu
* \brief roi pooling operator
* \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
*/
#include "./roi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
namespace mshadow {
namespace cuda {
template<typename Dtype>
__global__ void ROIPoolForwardKernel(const int count, const Dtype* bottom_data,
const float spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data,
Dtype* argmax_data) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
if (roi_batch_ind < 0) {
top_data[index] = 0;
argmax_data[index] = 0;
continue;
}
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = (Dtype)maxidx;
}
}
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
const Dtype *bottom_data = data.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *top_data = out.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward");
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
hipLaunchKernelGGL(( ROIPoolForwardKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream,
count, bottom_data, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_rois, top_data, argmax_data);
}
template<typename Dtype>
__global__ void ROIPoolBackwardAccKernel(const int count, const Dtype* top_diff,
const Dtype* argmax_data, const int num_rois,
const float spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
Dtype* bottom_diff, const Dtype* bottom_rois) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const Dtype* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (static_cast<int>(offset_argmax_data[ph * pooled_width + pw]) == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] += gradient;
}
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
const Dtype *top_diff = out_grad.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *bottom_diff = in_grad.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = in_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward");
hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
hipLaunchKernelGGL(( ROIPoolBackwardAccKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream,
count, top_diff, argmax_data, num_rois, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_diff, bottom_rois);
}
} // namespace cuda
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
cuda::ROIPoolForward(out, data, bbox, max_idx, spatial_scale);
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
cuda::ROIPoolBackwardAcc(in_grad, out_grad, bbox, max_idx, spatial_scale);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(ROIPoolingParam param, int dtype) {
Operator* op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
| 6a1f527e4928cf72049b969da274d9abe8dd1964.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file roi_pooling.cu
* \brief roi pooling operator
* \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
*/
#include "./roi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
namespace mshadow {
namespace cuda {
template<typename Dtype>
__global__ void ROIPoolForwardKernel(const int count, const Dtype* bottom_data,
const float spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data,
Dtype* argmax_data) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
if (roi_batch_ind < 0) {
top_data[index] = 0;
argmax_data[index] = 0;
continue;
}
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = (Dtype)maxidx;
}
}
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
const Dtype *bottom_data = data.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *top_data = out.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward");
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
ROIPoolForwardKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>(
count, bottom_data, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_rois, top_data, argmax_data);
}
template<typename Dtype>
__global__ void ROIPoolBackwardAccKernel(const int count, const Dtype* top_diff,
const Dtype* argmax_data, const int num_rois,
const float spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
Dtype* bottom_diff, const Dtype* bottom_rois) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const Dtype* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (static_cast<int>(offset_argmax_data[ph * pooled_width + pw]) == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] += gradient;
}
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
const Dtype *top_diff = out_grad.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *bottom_diff = in_grad.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = in_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward");
cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
ROIPoolBackwardAccKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>(
count, top_diff, argmax_data, num_rois, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_diff, bottom_rois);
}
} // namespace cuda
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
cuda::ROIPoolForward(out, data, bbox, max_idx, spatial_scale);
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
cuda::ROIPoolBackwardAcc(in_grad, out_grad, bbox, max_idx, spatial_scale);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(ROIPoolingParam param, int dtype) {
Operator* op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
|
d16f01386b532bba1f118dc39af9923cd2b733da.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#define N 8
#define BLOCK_SIZE 2
__global__ void matrixSum(float *d_a, float *d_b, float *d_c){
int globalIndex = blockIdx.y * BLOCK_SIZE * N + blockIdx.x * BLOCK_SIZE + threadIdx.y * N + threadIdx.x;
d_c[globalIndex] = d_a[globalIndex] + d_b[globalIndex];
}
int main(){
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
int memSize = N * N * sizeof(float);
h_a = (float*) malloc(memSize);
h_b = (float*) malloc(memSize);
h_c = (float*) malloc(memSize);
hipMalloc((void**) &d_a, memSize);
hipMalloc((void**) &d_b, memSize);
hipMalloc((void**) &d_c, memSize);
for (int i = 0; i < N * N; ++i) {
h_a[i] = h_b[i] = 1.0f;
}
hipMemcpy(d_a, h_a, memSize, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, memSize, hipMemcpyHostToDevice);
hipMemcpy(d_c, h_c, memSize, hipMemcpyHostToDevice);
dim3 block(N / BLOCK_SIZE, N / BLOCK_SIZE);
dim3 thread(BLOCK_SIZE, BLOCK_SIZE);
hipLaunchKernelGGL(( matrixSum), dim3(block), dim3(thread) , 0, 0, d_a, d_b, d_c);
hipMemcpy(h_c, d_c, memSize, hipMemcpyDeviceToHost);
printf("El resultado es: \n");
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
printf("%f ", h_c[i]);
}
printf("\n");
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
} | d16f01386b532bba1f118dc39af9923cd2b733da.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#define N 8
#define BLOCK_SIZE 2
__global__ void matrixSum(float *d_a, float *d_b, float *d_c){
int globalIndex = blockIdx.y * BLOCK_SIZE * N + blockIdx.x * BLOCK_SIZE + threadIdx.y * N + threadIdx.x;
d_c[globalIndex] = d_a[globalIndex] + d_b[globalIndex];
}
int main(){
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
int memSize = N * N * sizeof(float);
h_a = (float*) malloc(memSize);
h_b = (float*) malloc(memSize);
h_c = (float*) malloc(memSize);
cudaMalloc((void**) &d_a, memSize);
cudaMalloc((void**) &d_b, memSize);
cudaMalloc((void**) &d_c, memSize);
for (int i = 0; i < N * N; ++i) {
h_a[i] = h_b[i] = 1.0f;
}
cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, h_c, memSize, cudaMemcpyHostToDevice);
dim3 block(N / BLOCK_SIZE, N / BLOCK_SIZE);
dim3 thread(BLOCK_SIZE, BLOCK_SIZE);
matrixSum<<< block, thread >>>(d_a, d_b, d_c);
cudaMemcpy(h_c, d_c, memSize, cudaMemcpyDeviceToHost);
printf("El resultado es: \n");
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
printf("%f ", h_c[i]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
} |
4df638e2f08fd795f5c9efdbcb91852d244c4f3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************/
/* */
/* (C) 2010 Texas Advanced Computing Center. All rights reserved. */
/* For information, contact Frank Willmore: [email protected] */
/* */
/******************************************************************************/
#include <stdio.h>
__device__ char d_string[65536][256];
__global__ void toUpper()
{
if ((d_string[blockIdx.x][threadIdx.x] <= 122) && (d_string[blockIdx.x][threadIdx.x]) >=97)
d_string[blockIdx.x][threadIdx.x] -= 32;
}
int main(int argc, char* argv[])
{
char line[65536][256];
int n_lines;
for (n_lines=0; !feof(stdin); n_lines++) fgets(&line[n_lines][0], 256, stdin);
hipMemcpyToSymbol(d_string, line, sizeof(line), 0, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( toUpper), dim3(n_lines), dim3(256) , 0, 0, );
hipMemcpyFromSymbol(line, d_string, sizeof(line), 0, hipMemcpyDeviceToHost);
for (int i=0; i<n_lines; i++) printf("%s", line[i]);
}
| 4df638e2f08fd795f5c9efdbcb91852d244c4f3c.cu | /******************************************************************************/
/* */
/* (C) 2010 Texas Advanced Computing Center. All rights reserved. */
/* For information, contact Frank Willmore: [email protected] */
/* */
/******************************************************************************/
#include <stdio.h>
__device__ char d_string[65536][256];
__global__ void toUpper()
{
if ((d_string[blockIdx.x][threadIdx.x] <= 122) && (d_string[blockIdx.x][threadIdx.x]) >=97)
d_string[blockIdx.x][threadIdx.x] -= 32;
}
int main(int argc, char* argv[])
{
char line[65536][256];
int n_lines;
for (n_lines=0; !feof(stdin); n_lines++) fgets(&line[n_lines][0], 256, stdin);
cudaMemcpyToSymbol(d_string, line, sizeof(line), 0, cudaMemcpyHostToDevice);
toUpper<<< n_lines, 256 >>>();
cudaMemcpyFromSymbol(line, d_string, sizeof(line), 0, cudaMemcpyDeviceToHost);
for (int i=0; i<n_lines; i++) printf("%s", line[i]);
}
|
47c852ad098ee89be7b658b95600b59609f2aa2d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include "hip/hip_runtime.h"
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <time.h>
#include <hip/hip_runtime_api.h>
/*Realization of the multiplication between a vector and a matrix, on the GPU.
It realizes multiplication between a vector of 4 element and a matrix (4x4),
so as result is expected a vector of 4 element.
We initialized the input array on the host (CPU) and trasferred to device (GPU).
Then, CPU launches the kernel which is elaborated on GPU.
Finally, the results is trasferred from GPU to CPU and printed out*/
//old kernel
#define N 4
__global__ void dot(int *a, int *b, int *c) {
// Shared memory for results of multiplication
__shared__ int temp[N];
temp[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
// Thread 0 sums the pairwise products
if (0 == threadIdx.x) {
int sum = 0;
for (int i = 0; i < N; i++)
sum += temp[i];
*c = sum;
}
}
__global__ void vector_matrix_mult(int *d_mat, int *d_out0, int *d_out1, int *d_out2, int *d_out3, const int M)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int tjd = threadIdx.y + blockIdx.y*blockDim.y;
if (tid < M) {
d_out0[tid] = d_mat[N*tid];
d_out1[tid] = d_mat[N*tid + 1];
d_out2[tid] = d_mat[N*tid + 2];
d_out3[tid] = d_mat[N*tid + 3];
}
}
int main(int argc, char ** argv) {
float elapsed = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
const int M = N;
const int ARRAY_SIZE = N;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
const int MATRIX_SIZE = ARRAY_SIZE*ARRAY_SIZE;
const int MATRIX_BYTES = MATRIX_SIZE * sizeof(int);
// generate the input vector and input matrix on the host
int *c0, *c1, *c2, *c3;
int *d_c0, *d_c1, *d_c2, *d_c3;
int *h_out0, *h_out1, *h_out2, *h_out3;
int *h_vec;
//allocate space for the variables on the host
h_out0 = (int *)malloc(ARRAY_BYTES);
h_out1 = (int *)malloc(ARRAY_BYTES);
h_out2 = (int *)malloc(ARRAY_BYTES);
h_out3 = (int *)malloc(ARRAY_BYTES);
h_vec = (int*)malloc(ARRAY_BYTES);
h_vec[0] = 0;
h_vec[1] = 0;
h_vec[2] = 0;
h_vec[3] = 1;
int h_mat[ARRAY_SIZE][ARRAY_SIZE] = { 2, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 };
c0 = (int *)malloc(sizeof(int));
c1 = (int *)malloc(sizeof(int));
c2 = (int *)malloc(sizeof(int));
c3 = (int *)malloc(sizeof(int));
*c0 = 0;
*c1 = 0;
*c2 = 0;
*c3 = 0;
//declare GPU memory pointers
int * d_vec;
int * d_mat;
int * d_out0;
int * d_out1;
int * d_out2;
int * d_out3;
//allocate GPU memory
hipMalloc((void **)&d_c0, sizeof(int));
hipMalloc((void **)&d_c1, sizeof(int));
hipMalloc((void **)&d_c2, sizeof(int));
hipMalloc((void **)&d_c3, sizeof(int));
hipMalloc((void**)&d_vec, ARRAY_BYTES);
hipMalloc((void**)&d_mat, MATRIX_BYTES);
hipMalloc((void**)&d_out0, ARRAY_BYTES);
hipMalloc((void**)&d_out1, ARRAY_BYTES);
hipMalloc((void**)&d_out2, ARRAY_BYTES);
hipMalloc((void**)&d_out3, ARRAY_BYTES);
//transfer the input from CPU to GPU
hipMemcpy(d_vec, h_vec, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_out0, h_out0, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_out1, h_out1, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_out2, h_out2, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_out3, h_out3, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_mat, h_mat, MATRIX_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_c0, c0, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_c1, c1, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_c2, c2, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_c3, c3, sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(start, 0);
//launch the kernel
vector_matrix_mult << <1, ARRAY_SIZE >> > (d_mat, d_out0, d_out1, d_out2, d_out3, M);
dot << <1, ARRAY_SIZE >> > (d_vec, d_out0, d_c0);
dot << <1, ARRAY_SIZE >> > (d_vec, d_out1, d_c1);
dot << <1, ARRAY_SIZE >> > (d_vec, d_out2, d_c2);
dot << <1, ARRAY_SIZE >> > (d_vec, d_out3, d_c3);
hipEventRecord(stop, 0);
//trasfer the results from GPU to CPU
hipMemcpy(h_out0, d_out0, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(h_out1, d_out1, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(h_out2, d_out2, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(h_out3, d_out3, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(c0, d_c0, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(c1, d_c1, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(c2, d_c2, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(c3, d_c3, sizeof(int), hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
//print out the resulting array
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%d", h_out0[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%d", h_out1[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%d", h_out2[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%d", h_out3[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
printf("\n""\n""\n");
printf("%d" "\t", *c0);
printf("%d" "\t", *c1);
printf("%d" "\t", *c2);
printf("%d" "\t", *c3);
printf("The GPU time elapsed is %.6f ms \"", elapsed);
//free GPU location memory
hipFree(d_vec);
hipFree(d_c0);
hipFree(d_c1);
hipFree(d_c2);
hipFree(d_c3);
hipFree(d_mat);
hipFree(d_out0);
hipFree(d_out1);
hipFree(d_out2);
hipFree(d_out3);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| 47c852ad098ee89be7b658b95600b59609f2aa2d.cu | #include <stdio.h>
#include <iostream>
#include "cuda_runtime.h"
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <time.h>
#include <cuda_runtime_api.h>
/*Realization of the multiplication between a vector and a matrix, on the GPU.
It realizes multiplication between a vector of 4 element and a matrix (4x4),
so as result is expected a vector of 4 element.
We initialized the input array on the host (CPU) and trasferred to device (GPU).
Then, CPU launches the kernel which is elaborated on GPU.
Finally, the results is trasferred from GPU to CPU and printed out*/
//old kernel
#define N 4
__global__ void dot(int *a, int *b, int *c) {
// Shared memory for results of multiplication
__shared__ int temp[N];
temp[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
// Thread 0 sums the pairwise products
if (0 == threadIdx.x) {
int sum = 0;
for (int i = 0; i < N; i++)
sum += temp[i];
*c = sum;
}
}
__global__ void vector_matrix_mult(int *d_mat, int *d_out0, int *d_out1, int *d_out2, int *d_out3, const int M)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int tjd = threadIdx.y + blockIdx.y*blockDim.y;
if (tid < M) {
d_out0[tid] = d_mat[N*tid];
d_out1[tid] = d_mat[N*tid + 1];
d_out2[tid] = d_mat[N*tid + 2];
d_out3[tid] = d_mat[N*tid + 3];
}
}
int main(int argc, char ** argv) {
float elapsed = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
const int M = N;
const int ARRAY_SIZE = N;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
const int MATRIX_SIZE = ARRAY_SIZE*ARRAY_SIZE;
const int MATRIX_BYTES = MATRIX_SIZE * sizeof(int);
// generate the input vector and input matrix on the host
int *c0, *c1, *c2, *c3;
int *d_c0, *d_c1, *d_c2, *d_c3;
int *h_out0, *h_out1, *h_out2, *h_out3;
int *h_vec;
//allocate space for the variables on the host
h_out0 = (int *)malloc(ARRAY_BYTES);
h_out1 = (int *)malloc(ARRAY_BYTES);
h_out2 = (int *)malloc(ARRAY_BYTES);
h_out3 = (int *)malloc(ARRAY_BYTES);
h_vec = (int*)malloc(ARRAY_BYTES);
h_vec[0] = 0;
h_vec[1] = 0;
h_vec[2] = 0;
h_vec[3] = 1;
int h_mat[ARRAY_SIZE][ARRAY_SIZE] = { 2, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 };
c0 = (int *)malloc(sizeof(int));
c1 = (int *)malloc(sizeof(int));
c2 = (int *)malloc(sizeof(int));
c3 = (int *)malloc(sizeof(int));
*c0 = 0;
*c1 = 0;
*c2 = 0;
*c3 = 0;
//declare GPU memory pointers
int * d_vec;
int * d_mat;
int * d_out0;
int * d_out1;
int * d_out2;
int * d_out3;
//allocate GPU memory
cudaMalloc((void **)&d_c0, sizeof(int));
cudaMalloc((void **)&d_c1, sizeof(int));
cudaMalloc((void **)&d_c2, sizeof(int));
cudaMalloc((void **)&d_c3, sizeof(int));
cudaMalloc((void**)&d_vec, ARRAY_BYTES);
cudaMalloc((void**)&d_mat, MATRIX_BYTES);
cudaMalloc((void**)&d_out0, ARRAY_BYTES);
cudaMalloc((void**)&d_out1, ARRAY_BYTES);
cudaMalloc((void**)&d_out2, ARRAY_BYTES);
cudaMalloc((void**)&d_out3, ARRAY_BYTES);
//transfer the input from CPU to GPU
cudaMemcpy(d_vec, h_vec, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_out0, h_out0, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_out1, h_out1, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_out2, h_out2, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_out3, h_out3, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_mat, h_mat, MATRIX_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_c0, c0, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c1, c1, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c2, c2, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c3, c3, sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
//launch the kernel
vector_matrix_mult << <1, ARRAY_SIZE >> > (d_mat, d_out0, d_out1, d_out2, d_out3, M);
dot << <1, ARRAY_SIZE >> > (d_vec, d_out0, d_c0);
dot << <1, ARRAY_SIZE >> > (d_vec, d_out1, d_c1);
dot << <1, ARRAY_SIZE >> > (d_vec, d_out2, d_c2);
dot << <1, ARRAY_SIZE >> > (d_vec, d_out3, d_c3);
cudaEventRecord(stop, 0);
//trasfer the results from GPU to CPU
cudaMemcpy(h_out0, d_out0, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(h_out1, d_out1, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(h_out2, d_out2, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(h_out3, d_out3, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(c0, d_c0, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(c1, d_c1, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(c2, d_c2, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(c3, d_c3, sizeof(int), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
//print out the resulting array
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%d", h_out0[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%d", h_out1[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%d", h_out2[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%d", h_out3[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
printf("\n""\n""\n");
printf("%d" "\t", *c0);
printf("%d" "\t", *c1);
printf("%d" "\t", *c2);
printf("%d" "\t", *c3);
printf("The GPU time elapsed is %.6f ms \"", elapsed);
//free GPU location memory
cudaFree(d_vec);
cudaFree(d_c0);
cudaFree(d_c1);
cudaFree(d_c2);
cudaFree(d_c3);
cudaFree(d_mat);
cudaFree(d_out0);
cudaFree(d_out1);
cudaFree(d_out2);
cudaFree(d_out3);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
e92b9efd11e19c1c864ddae7f879d3898be6099b.hip | // !!! This is a file automatically generated by hipify!!!
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2019 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Eric Brandt, Asher Elmquist
// =============================================================================
//
// =============================================================================
#include <hip/hip_runtime.h>
#include "image_ops.cuh"
#include "chrono_sensor/optix/shaders/device_utils.h"
#include <iostream>
namespace chrono {
namespace sensor {
__global__ void image_gauss_kernel_vert(unsigned char* buf, int w, int h, int c, int f_width, float* dweights) {
int index = (blockDim.x * blockIdx.x + threadIdx.x); // index into output buffer
// only run for each output pixel
if (index < w * h * c) {
// float f_std = (float)f / 2.f;
// int f_width = (int)(2.f * 3.14f * f_std);
int channel = index % c;
int col = index / c % w;
int row = index / c / w;
float sum = 0;
for (int i = -f_width; i <= f_width; i++) {
int index_in = channel + col * c + abs(row + i) * w * c;
if (row + i >= h)
index_in = channel + col * c + (2 * h - (row + i + 1)) * w * c;
// float weight = exp(-i * i / (2 * f_std * f_std)) / sqrtf(2.f * 3.14f * f_std * f_std);
sum += dweights[i + f_width] * ((float)buf[index_in]);
// sum += ((float)buf[index_in]);
}
sum = fminf(255.f,fmaxf(0.f,sum));
buf[index] = (unsigned char)(sum);
}
}
__global__ void image_gauss_kernel_horiz(unsigned char* buf, int w, int h, int c, int f_width, float* dweights) {
int index = (blockDim.x * blockIdx.x + threadIdx.x); // index into output buffer
// only run for each output pixel
if (index < w * h * c) {
int channel = index % c;
int col = index / c % w;
int row = index / c / w;
float sum = 0;
for (int i = -f_width; i <= f_width; i++) {
int index_in = channel + abs(col + i) * c + row * w * c;
if (col + i >= w)
index_in = channel + (2 * w - (col + i + 1)) * c + row * w * c;
sum += dweights[i + f_width] * ((float)buf[index_in]);
}
sum = fminf(255.f,fmaxf(0.f,sum));
buf[index] = (unsigned char)(sum);
}
}
// merge pixels by the factor
__global__ void image_alias_kernel(unsigned char* bufIn,
unsigned char* bufOut,
int w_out,
int h_out,
int factor,
int pix_size) {
int out_index = (blockDim.x * blockIdx.x + threadIdx.x); // index into output buffer
int w_in = w_out * factor;
int h_in = h_out * factor;
//
// // only run for each output pixel
if (out_index < w_out * h_out * pix_size) {
int idc_out = out_index % pix_size;
int idx_out = (out_index / pix_size) % w_out;
int idy_out = (out_index / pix_size) / w_out;
float mean = 0.0;
for (int i = -1; i < factor + 1; i++) {
for (int j = -1; j < factor + 1; j++) {
int idc_in = idc_out;
int idx_in = idx_out * factor + j;
int idy_in = idy_out * factor + i;
// reflect when out of range
if (idx_in < 0)
idx_in = -idx_in - 1;
else if (idx_in >= w_in)
idx_in = 2 * w_in - (idx_in + 1);
if (idy_in < 0)
idy_in = -idy_in - 1;
else if (idy_in >= h_in)
idy_in = 2 * h_in - (idy_in + 1);
int in_index = idy_in * w_in * pix_size + idx_in * pix_size + idc_in;
mean += (float)bufIn[in_index];
}
}
// bufOut[out_index] = (unsigned char)(mean / (factor * factor));
bufOut[out_index] = (unsigned char)(mean / ((factor + 2) * (factor + 2)));
if (idc_out == 3) {
bufOut[out_index] = 255;
}
// bufOut[out_index] = (unsigned char)(25 * idc_out);
}
}
// merge pixels by the factor
__global__ void image_alias_float_kernel(float* bufIn, float* bufOut, int w_out, int h_out, int factor, int pix_size) {
int out_index = (blockDim.x * blockIdx.x + threadIdx.x); // index into output buffer
int w_in = w_out * factor;
//
// // only run for each output pixel
if (out_index < w_out * h_out * pix_size) {
int idc_out = out_index % pix_size;
int idx_out = (out_index / pix_size) % w_out;
int idy_out = (out_index / pix_size) / w_out;
float mean = 0.f;
for (int i = 0; i < factor; i++) {
for (int j = 0; j < factor; j++) {
int idc_in = idc_out;
int idx_in = idx_out * factor + j;
int idy_in = idy_out * factor + i;
int in_index = idy_in * w_in * pix_size + idx_in * pix_size + idc_in;
mean += bufIn[in_index];
}
}
bufOut[out_index] = mean / (factor * factor);
}
}
// merge pixels by the factor
__global__ void image_half4_to_uchar4_kernel(__half* bufIn, unsigned char* bufOut, int N) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x); // index into output buffer
if (idx < N) {
bufOut[idx] = (unsigned char)(clamp(__half2float(bufIn[idx]), 0.f, 1.f) * 255.f);
}
}
void cuda_image_gauss_blur_char(void* buf, int w, int h, int c, int factor, hipStream_t& stream) {
const int nThreads = 512;
int nBlocks = (w * h * c + nThreads - 1) / nThreads;
float f_std = (float)factor / 4.f;
int f_width = (int)(3.14f * f_std);
int entries = 2 * f_width + 1;
float* weights = new float[entries];
for (int i = 0; i <= 2 * f_width; i++) {
int offset = i - f_width;
weights[i] = exp(-offset * offset / (2 * f_std * f_std)) / sqrtf(2.f * 3.14f * f_std * f_std);
}
float* dweights;
hipMalloc(&dweights, entries * sizeof(float));
hipMemcpy(dweights, weights, entries * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( image_gauss_kernel_vert), dim3(nBlocks), dim3(nThreads), 0, stream, (unsigned char*)buf, w, h, c, f_width, dweights);
hipLaunchKernelGGL(( image_gauss_kernel_horiz), dim3(nBlocks), dim3(nThreads), 0, stream, (unsigned char*)buf, w, h, c, f_width, dweights);
hipFree(dweights);
delete[] weights;
}
void cuda_image_alias(void* bufIn, void* bufOut, int w_out, int h_out, int factor, int pix_size, hipStream_t& stream) {
const int nThreads = 512;
int nBlocks = (w_out * h_out * pix_size + nThreads - 1) / nThreads;
hipLaunchKernelGGL(( image_alias_kernel), dim3(nBlocks), dim3(nThreads), 0, stream, (unsigned char*)bufIn, (unsigned char*)bufOut, w_out, h_out,
factor, pix_size);
}
void cuda_image_alias_float(void* bufIn,
void* bufOut,
int w_out,
int h_out,
int factor,
int pix_size,
hipStream_t& stream) {
const int nThreads = 512;
int nBlocks = (w_out * h_out * pix_size + nThreads - 1) / nThreads;
hipLaunchKernelGGL(( image_alias_float_kernel), dim3(nBlocks), dim3(nThreads), 0, stream, (float*)bufIn, (float*)bufOut, w_out, h_out, factor,
pix_size);
}
void cuda_image_half4_to_uchar4(void* bufIn, void* bufOut, int w, int h, hipStream_t& stream) {
const int nThreads = 512;
int nBlocks = (w * h * 4 + nThreads - 1) / nThreads;
hipLaunchKernelGGL(( image_half4_to_uchar4_kernel), dim3(nBlocks), dim3(nThreads), 0, stream, (__half*)bufIn, (unsigned char*)bufOut, w * h * 4);
}
} // namespace sensor
} // namespace chrono
| e92b9efd11e19c1c864ddae7f879d3898be6099b.cu | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2019 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Eric Brandt, Asher Elmquist
// =============================================================================
//
// =============================================================================
#include <cuda.h>
#include "image_ops.cuh"
#include "chrono_sensor/optix/shaders/device_utils.h"
#include <iostream>
namespace chrono {
namespace sensor {
__global__ void image_gauss_kernel_vert(unsigned char* buf, int w, int h, int c, int f_width, float* dweights) {
int index = (blockDim.x * blockIdx.x + threadIdx.x); // index into output buffer
// only run for each output pixel
if (index < w * h * c) {
// float f_std = (float)f / 2.f;
// int f_width = (int)(2.f * 3.14f * f_std);
int channel = index % c;
int col = index / c % w;
int row = index / c / w;
float sum = 0;
for (int i = -f_width; i <= f_width; i++) {
int index_in = channel + col * c + abs(row + i) * w * c;
if (row + i >= h)
index_in = channel + col * c + (2 * h - (row + i + 1)) * w * c;
// float weight = exp(-i * i / (2 * f_std * f_std)) / sqrtf(2.f * 3.14f * f_std * f_std);
sum += dweights[i + f_width] * ((float)buf[index_in]);
// sum += ((float)buf[index_in]);
}
sum = fminf(255.f,fmaxf(0.f,sum));
buf[index] = (unsigned char)(sum);
}
}
__global__ void image_gauss_kernel_horiz(unsigned char* buf, int w, int h, int c, int f_width, float* dweights) {
int index = (blockDim.x * blockIdx.x + threadIdx.x); // index into output buffer
// only run for each output pixel
if (index < w * h * c) {
int channel = index % c;
int col = index / c % w;
int row = index / c / w;
float sum = 0;
for (int i = -f_width; i <= f_width; i++) {
int index_in = channel + abs(col + i) * c + row * w * c;
if (col + i >= w)
index_in = channel + (2 * w - (col + i + 1)) * c + row * w * c;
sum += dweights[i + f_width] * ((float)buf[index_in]);
}
sum = fminf(255.f,fmaxf(0.f,sum));
buf[index] = (unsigned char)(sum);
}
}
// merge pixels by the factor
__global__ void image_alias_kernel(unsigned char* bufIn,
unsigned char* bufOut,
int w_out,
int h_out,
int factor,
int pix_size) {
int out_index = (blockDim.x * blockIdx.x + threadIdx.x); // index into output buffer
int w_in = w_out * factor;
int h_in = h_out * factor;
//
// // only run for each output pixel
if (out_index < w_out * h_out * pix_size) {
int idc_out = out_index % pix_size;
int idx_out = (out_index / pix_size) % w_out;
int idy_out = (out_index / pix_size) / w_out;
float mean = 0.0;
for (int i = -1; i < factor + 1; i++) {
for (int j = -1; j < factor + 1; j++) {
int idc_in = idc_out;
int idx_in = idx_out * factor + j;
int idy_in = idy_out * factor + i;
// reflect when out of range
if (idx_in < 0)
idx_in = -idx_in - 1;
else if (idx_in >= w_in)
idx_in = 2 * w_in - (idx_in + 1);
if (idy_in < 0)
idy_in = -idy_in - 1;
else if (idy_in >= h_in)
idy_in = 2 * h_in - (idy_in + 1);
int in_index = idy_in * w_in * pix_size + idx_in * pix_size + idc_in;
mean += (float)bufIn[in_index];
}
}
// bufOut[out_index] = (unsigned char)(mean / (factor * factor));
bufOut[out_index] = (unsigned char)(mean / ((factor + 2) * (factor + 2)));
if (idc_out == 3) {
bufOut[out_index] = 255;
}
// bufOut[out_index] = (unsigned char)(25 * idc_out);
}
}
// merge pixels by the factor
__global__ void image_alias_float_kernel(float* bufIn, float* bufOut, int w_out, int h_out, int factor, int pix_size) {
int out_index = (blockDim.x * blockIdx.x + threadIdx.x); // index into output buffer
int w_in = w_out * factor;
//
// // only run for each output pixel
if (out_index < w_out * h_out * pix_size) {
int idc_out = out_index % pix_size;
int idx_out = (out_index / pix_size) % w_out;
int idy_out = (out_index / pix_size) / w_out;
float mean = 0.f;
for (int i = 0; i < factor; i++) {
for (int j = 0; j < factor; j++) {
int idc_in = idc_out;
int idx_in = idx_out * factor + j;
int idy_in = idy_out * factor + i;
int in_index = idy_in * w_in * pix_size + idx_in * pix_size + idc_in;
mean += bufIn[in_index];
}
}
bufOut[out_index] = mean / (factor * factor);
}
}
// merge pixels by the factor
__global__ void image_half4_to_uchar4_kernel(__half* bufIn, unsigned char* bufOut, int N) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x); // index into output buffer
if (idx < N) {
bufOut[idx] = (unsigned char)(clamp(__half2float(bufIn[idx]), 0.f, 1.f) * 255.f);
}
}
void cuda_image_gauss_blur_char(void* buf, int w, int h, int c, int factor, CUstream& stream) {
const int nThreads = 512;
int nBlocks = (w * h * c + nThreads - 1) / nThreads;
float f_std = (float)factor / 4.f;
int f_width = (int)(3.14f * f_std);
int entries = 2 * f_width + 1;
float* weights = new float[entries];
for (int i = 0; i <= 2 * f_width; i++) {
int offset = i - f_width;
weights[i] = exp(-offset * offset / (2 * f_std * f_std)) / sqrtf(2.f * 3.14f * f_std * f_std);
}
float* dweights;
cudaMalloc(&dweights, entries * sizeof(float));
cudaMemcpy(dweights, weights, entries * sizeof(float), cudaMemcpyHostToDevice);
image_gauss_kernel_vert<<<nBlocks, nThreads, 0, stream>>>((unsigned char*)buf, w, h, c, f_width, dweights);
image_gauss_kernel_horiz<<<nBlocks, nThreads, 0, stream>>>((unsigned char*)buf, w, h, c, f_width, dweights);
cudaFree(dweights);
delete[] weights;
}
void cuda_image_alias(void* bufIn, void* bufOut, int w_out, int h_out, int factor, int pix_size, CUstream& stream) {
const int nThreads = 512;
int nBlocks = (w_out * h_out * pix_size + nThreads - 1) / nThreads;
image_alias_kernel<<<nBlocks, nThreads, 0, stream>>>((unsigned char*)bufIn, (unsigned char*)bufOut, w_out, h_out,
factor, pix_size);
}
void cuda_image_alias_float(void* bufIn,
void* bufOut,
int w_out,
int h_out,
int factor,
int pix_size,
CUstream& stream) {
const int nThreads = 512;
int nBlocks = (w_out * h_out * pix_size + nThreads - 1) / nThreads;
image_alias_float_kernel<<<nBlocks, nThreads, 0, stream>>>((float*)bufIn, (float*)bufOut, w_out, h_out, factor,
pix_size);
}
void cuda_image_half4_to_uchar4(void* bufIn, void* bufOut, int w, int h, CUstream& stream) {
const int nThreads = 512;
int nBlocks = (w * h * 4 + nThreads - 1) / nThreads;
image_half4_to_uchar4_kernel<<<nBlocks, nThreads, 0, stream>>>((__half*)bufIn, (unsigned char*)bufOut, w * h * 4);
}
} // namespace sensor
} // namespace chrono
|
1dae24e698fbbf2c3dd9d20ce168c80ebf4be937.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
__global__ void kernel( void ) {
}
int main( void ) {
hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, );
printf( "Hello, World!\n" );
return 0;
}
| 1dae24e698fbbf2c3dd9d20ce168c80ebf4be937.cu | #include <iostream>
__global__ void kernel( void ) {
}
int main( void ) {
kernel<<<1,1>>>();
printf( "Hello, World!\n" );
return 0;
}
|
f0f6ebe1c2455239c4a71554ae4c24ce2c26b7e4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <chrono>
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "CUDA Error: %s: %s.\n", msg, hipGetErrorString(err) );
exit(EXIT_FAILURE);
}
}
#define BLOCKSIZE 32
__global__ void dgemm(double* A, double* B, double* C, int N)
{
size_t myRow = blockIdx.y*blockDim.y+threadIdx.y;
size_t myCol = blockIdx.x*blockDim.x+threadIdx.x;
if (myRow < N && myCol < N)
for (size_t i = 0; i < N; i++)
C[myRow * N + myCol] += A[myRow * N + i] * B[i * N + myCol];
}
int main(int argc, char** argv)
{
double *A, *B, *C;
double *dA, *dB, *dC;
size_t N = 12288;
A = (double*) malloc (sizeof(double)*N*N);
B = (double*) malloc (sizeof(double)*N*N);
C = (double*) malloc (sizeof(double)*N*N);
for (size_t i = 0; i < N; i++)
for (size_t j = 0; j < N; j++)
{
A[i*N + j] = sin(i);
B[i*N + j] = cos(j);
}
hipMalloc(&dA, sizeof(double)*N*N); checkCUDAError("Error allocating dA");
hipMalloc(&dB, sizeof(double)*N*N); checkCUDAError("Error allocating dB");
hipMalloc(&dC, sizeof(double)*N*N); checkCUDAError("Error allocating dC");
hipMemcpy(dA, A, sizeof(double)*N*N, hipMemcpyHostToDevice); checkCUDAError("Error copying A");
hipMemcpy(dB, B, sizeof(double)*N*N, hipMemcpyHostToDevice); checkCUDAError("Error copying B");
dim3 threadsPerBlock(BLOCKSIZE, BLOCKSIZE);
dim3 blocksPerGrid(N/BLOCKSIZE, N/BLOCKSIZE);
auto startTime = std::chrono::system_clock::now();
hipLaunchKernelGGL(( dgemm), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, dA, dB, dC, N); checkCUDAError("Failed Kernel Launch");
hipDeviceSynchronize();
auto endTime = std::chrono::system_clock::now();
hipMemcpy(C, dC, sizeof(double)*N*N, hipMemcpyDeviceToHost);
double checkSum = 0.0;
for (size_t i = 0; i < N; i++)
for (size_t j = 0; j < N; j++)
checkSum += C[i*N + j];
printf("[GPU] Checksum: %f - Elapsed Time: %fs\n", checkSum, std::chrono::duration<double>(endTime-startTime).count());
return 0;
}
| f0f6ebe1c2455239c4a71554ae4c24ce2c26b7e4.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <chrono>
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "CUDA Error: %s: %s.\n", msg, cudaGetErrorString(err) );
exit(EXIT_FAILURE);
}
}
#define BLOCKSIZE 32
__global__ void dgemm(double* A, double* B, double* C, int N)
{
size_t myRow = blockIdx.y*blockDim.y+threadIdx.y;
size_t myCol = blockIdx.x*blockDim.x+threadIdx.x;
if (myRow < N && myCol < N)
for (size_t i = 0; i < N; i++)
C[myRow * N + myCol] += A[myRow * N + i] * B[i * N + myCol];
}
int main(int argc, char** argv)
{
double *A, *B, *C;
double *dA, *dB, *dC;
size_t N = 12288;
A = (double*) malloc (sizeof(double)*N*N);
B = (double*) malloc (sizeof(double)*N*N);
C = (double*) malloc (sizeof(double)*N*N);
for (size_t i = 0; i < N; i++)
for (size_t j = 0; j < N; j++)
{
A[i*N + j] = sin(i);
B[i*N + j] = cos(j);
}
cudaMalloc(&dA, sizeof(double)*N*N); checkCUDAError("Error allocating dA");
cudaMalloc(&dB, sizeof(double)*N*N); checkCUDAError("Error allocating dB");
cudaMalloc(&dC, sizeof(double)*N*N); checkCUDAError("Error allocating dC");
cudaMemcpy(dA, A, sizeof(double)*N*N, cudaMemcpyHostToDevice); checkCUDAError("Error copying A");
cudaMemcpy(dB, B, sizeof(double)*N*N, cudaMemcpyHostToDevice); checkCUDAError("Error copying B");
dim3 threadsPerBlock(BLOCKSIZE, BLOCKSIZE);
dim3 blocksPerGrid(N/BLOCKSIZE, N/BLOCKSIZE);
auto startTime = std::chrono::system_clock::now();
dgemm<<<blocksPerGrid,threadsPerBlock>>>(dA, dB, dC, N); checkCUDAError("Failed Kernel Launch");
cudaDeviceSynchronize();
auto endTime = std::chrono::system_clock::now();
cudaMemcpy(C, dC, sizeof(double)*N*N, cudaMemcpyDeviceToHost);
double checkSum = 0.0;
for (size_t i = 0; i < N; i++)
for (size_t j = 0; j < N; j++)
checkSum += C[i*N + j];
printf("[GPU] Checksum: %f - Elapsed Time: %fs\n", checkSum, std::chrono::duration<double>(endTime-startTime).count());
return 0;
}
|
623bec37201afc751317cd5a3f4df49125af32d1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrixAddKernel3.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *ans = NULL;
hipMalloc(&ans, XSIZE*YSIZE);
float *M = NULL;
hipMalloc(&M, XSIZE*YSIZE);
float *N = NULL;
hipMalloc(&N, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrixAddKernel3), dim3(gridBlock),dim3(threadBlock), 0, 0, ans,M,N,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrixAddKernel3), dim3(gridBlock),dim3(threadBlock), 0, 0, ans,M,N,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrixAddKernel3), dim3(gridBlock),dim3(threadBlock), 0, 0, ans,M,N,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 623bec37201afc751317cd5a3f4df49125af32d1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrixAddKernel3.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *ans = NULL;
cudaMalloc(&ans, XSIZE*YSIZE);
float *M = NULL;
cudaMalloc(&M, XSIZE*YSIZE);
float *N = NULL;
cudaMalloc(&N, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrixAddKernel3<<<gridBlock,threadBlock>>>(ans,M,N,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrixAddKernel3<<<gridBlock,threadBlock>>>(ans,M,N,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrixAddKernel3<<<gridBlock,threadBlock>>>(ans,M,N,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
33252075fddfccd061750a56e455374a4eaf83cd.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////
// GPU version of Monte Carlo algorithm using NVIDIA's CURAND library
////////////////////////////////////////////////////////////////////////
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <device_launch_parameters.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////
// CUDA global constants
////////////////////////////////////////////////////////////////////////
__constant__ int N;
__constant__ float T, r, sigma, rho, alpha, dt, con1, con2;
////////////////////////////////////////////////////////////////////////
// kernel routine
////////////////////////////////////////////////////////////////////////
__global__ void pathcalc(float *d_z, float *d_v)
{
float s1, s2, y1, y2, payoff;
int ind;
// move array pointers to correct position
// version 1
// ind = threadIdx.x + 2*N*blockIdx.x*blockDim.x;
// version 2
ind = 2*N*threadIdx.x + 2*N*blockIdx.x*blockDim.x;
// path calculation
s1 = 1.0f;
s2 = 1.0f;
for (int n=0; n<N; n++) {
y1 = d_z[ind];
// version 1
// ind += blockDim.x; // shift pointer to next element
// version 2
ind += 1;
y2 = rho*y1 + alpha*d_z[ind];
// version 1
// ind += blockDim.x; // shift pointer to next element
// version 2
ind += 1;
s1 = s1*(con1 + con2*y1);
s2 = s2*(con1 + con2*y2);
}
// put payoff value into device array
payoff = 0.0f;
if ( fabs(s1-1.0f)<0.1f && fabs(s2-1.0f)<0.1f ) payoff = exp(-r*T);
d_v[threadIdx.x + blockIdx.x*blockDim.x] = payoff;
}
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv){
int NPATH=960000, h_N=100;
float h_T, h_r, h_sigma, h_rho, h_alpha, h_dt, h_con1, h_con2;
float *h_v, *d_v, *d_z;
double sum1, sum2;
// initialise card
findCudaDevice(argc, argv);
// initialise CUDA timing
float milli;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// allocate memory on host and device
h_v = (float *)malloc(sizeof(float)*NPATH);
checkCudaErrors( hipMalloc((void **)&d_v, sizeof(float)*NPATH) );
checkCudaErrors( hipMalloc((void **)&d_z, sizeof(float)*2*h_N*NPATH) );
// define constants and transfer to GPU
h_T = 1.0f;
h_r = 0.05f;
h_sigma = 0.1f;
h_rho = 0.5f;
h_alpha = sqrt(1.0f-h_rho*h_rho);
h_dt = 1.0f/h_N;
h_con1 = 1.0f + h_r*h_dt;
h_con2 = sqrt(h_dt)*h_sigma;
checkCudaErrors( hipMemcpyToSymbol(N, &h_N, sizeof(h_N)) );
checkCudaErrors( hipMemcpyToSymbol(T, &h_T, sizeof(h_T)) );
checkCudaErrors( hipMemcpyToSymbol(r, &h_r, sizeof(h_r)) );
checkCudaErrors( hipMemcpyToSymbol(sigma,&h_sigma,sizeof(h_sigma)) );
checkCudaErrors( hipMemcpyToSymbol(rho, &h_rho, sizeof(h_rho)) );
checkCudaErrors( hipMemcpyToSymbol(alpha,&h_alpha,sizeof(h_alpha)) );
checkCudaErrors( hipMemcpyToSymbol(dt, &h_dt, sizeof(h_dt)) );
checkCudaErrors( hipMemcpyToSymbol(con1, &h_con1, sizeof(h_con1)) );
checkCudaErrors( hipMemcpyToSymbol(con2, &h_con2, sizeof(h_con2)) );
// random number generation
hipEventRecord(start);
hiprandGenerator_t gen;
checkCudaErrors( hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT) );
checkCudaErrors( hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL) );
checkCudaErrors( hiprandGenerateNormal(gen, d_z, 2*h_N*NPATH, 0.0f, 1.0f) );
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("CURAND normal RNG execution time (ms): %f, samples/sec: %e \n",
milli, 2.0*h_N*NPATH/(0.001*milli));
// execute kernel and time it
hipEventRecord(start);
hipLaunchKernelGGL(( pathcalc), dim3(NPATH/64), dim3(64), 0, 0, d_z, d_v);
getLastCudaError("pathcalc execution failed\n");
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("Monte Carlo kernel execution time (ms): %f \n",milli);
// copy back results
checkCudaErrors( hipMemcpy(h_v, d_v, sizeof(float)*NPATH,
hipMemcpyDeviceToHost) );
// compute average
sum1 = 0.0;
sum2 = 0.0;
for (int i=0; i<NPATH; i++) {
sum1 += h_v[i];
sum2 += h_v[i]*h_v[i];
}
printf("\nAverage value and standard deviation of error = %13.8f %13.8f\n\n",
sum1/NPATH, sqrt((sum2/NPATH - (sum1/NPATH)*(sum1/NPATH))/NPATH) );
// Tidy up library
checkCudaErrors( hiprandDestroyGenerator(gen) );
// Release memory and exit cleanly
free(h_v);
checkCudaErrors( hipFree(d_v) );
checkCudaErrors( hipFree(d_z) );
// CUDA exit -- needed to flush printf write buffer
hipDeviceReset();
}
| 33252075fddfccd061750a56e455374a4eaf83cd.cu |
////////////////////////////////////////////////////////////////////////
// GPU version of Monte Carlo algorithm using NVIDIA's CURAND library
////////////////////////////////////////////////////////////////////////
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <curand.h>
#include <device_launch_parameters.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////
// CUDA global constants
////////////////////////////////////////////////////////////////////////
__constant__ int N;
__constant__ float T, r, sigma, rho, alpha, dt, con1, con2;
////////////////////////////////////////////////////////////////////////
// kernel routine
////////////////////////////////////////////////////////////////////////
__global__ void pathcalc(float *d_z, float *d_v)
{
float s1, s2, y1, y2, payoff;
int ind;
// move array pointers to correct position
// version 1
// ind = threadIdx.x + 2*N*blockIdx.x*blockDim.x;
// version 2
ind = 2*N*threadIdx.x + 2*N*blockIdx.x*blockDim.x;
// path calculation
s1 = 1.0f;
s2 = 1.0f;
for (int n=0; n<N; n++) {
y1 = d_z[ind];
// version 1
// ind += blockDim.x; // shift pointer to next element
// version 2
ind += 1;
y2 = rho*y1 + alpha*d_z[ind];
// version 1
// ind += blockDim.x; // shift pointer to next element
// version 2
ind += 1;
s1 = s1*(con1 + con2*y1);
s2 = s2*(con1 + con2*y2);
}
// put payoff value into device array
payoff = 0.0f;
if ( fabs(s1-1.0f)<0.1f && fabs(s2-1.0f)<0.1f ) payoff = exp(-r*T);
d_v[threadIdx.x + blockIdx.x*blockDim.x] = payoff;
}
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv){
int NPATH=960000, h_N=100;
float h_T, h_r, h_sigma, h_rho, h_alpha, h_dt, h_con1, h_con2;
float *h_v, *d_v, *d_z;
double sum1, sum2;
// initialise card
findCudaDevice(argc, argv);
// initialise CUDA timing
float milli;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// allocate memory on host and device
h_v = (float *)malloc(sizeof(float)*NPATH);
checkCudaErrors( cudaMalloc((void **)&d_v, sizeof(float)*NPATH) );
checkCudaErrors( cudaMalloc((void **)&d_z, sizeof(float)*2*h_N*NPATH) );
// define constants and transfer to GPU
h_T = 1.0f;
h_r = 0.05f;
h_sigma = 0.1f;
h_rho = 0.5f;
h_alpha = sqrt(1.0f-h_rho*h_rho);
h_dt = 1.0f/h_N;
h_con1 = 1.0f + h_r*h_dt;
h_con2 = sqrt(h_dt)*h_sigma;
checkCudaErrors( cudaMemcpyToSymbol(N, &h_N, sizeof(h_N)) );
checkCudaErrors( cudaMemcpyToSymbol(T, &h_T, sizeof(h_T)) );
checkCudaErrors( cudaMemcpyToSymbol(r, &h_r, sizeof(h_r)) );
checkCudaErrors( cudaMemcpyToSymbol(sigma,&h_sigma,sizeof(h_sigma)) );
checkCudaErrors( cudaMemcpyToSymbol(rho, &h_rho, sizeof(h_rho)) );
checkCudaErrors( cudaMemcpyToSymbol(alpha,&h_alpha,sizeof(h_alpha)) );
checkCudaErrors( cudaMemcpyToSymbol(dt, &h_dt, sizeof(h_dt)) );
checkCudaErrors( cudaMemcpyToSymbol(con1, &h_con1, sizeof(h_con1)) );
checkCudaErrors( cudaMemcpyToSymbol(con2, &h_con2, sizeof(h_con2)) );
// random number generation
cudaEventRecord(start);
curandGenerator_t gen;
checkCudaErrors( curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT) );
checkCudaErrors( curandSetPseudoRandomGeneratorSeed(gen, 1234ULL) );
checkCudaErrors( curandGenerateNormal(gen, d_z, 2*h_N*NPATH, 0.0f, 1.0f) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("CURAND normal RNG execution time (ms): %f, samples/sec: %e \n",
milli, 2.0*h_N*NPATH/(0.001*milli));
// execute kernel and time it
cudaEventRecord(start);
pathcalc<<<NPATH/64, 64>>>(d_z, d_v);
getLastCudaError("pathcalc execution failed\n");
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("Monte Carlo kernel execution time (ms): %f \n",milli);
// copy back results
checkCudaErrors( cudaMemcpy(h_v, d_v, sizeof(float)*NPATH,
cudaMemcpyDeviceToHost) );
// compute average
sum1 = 0.0;
sum2 = 0.0;
for (int i=0; i<NPATH; i++) {
sum1 += h_v[i];
sum2 += h_v[i]*h_v[i];
}
printf("\nAverage value and standard deviation of error = %13.8f %13.8f\n\n",
sum1/NPATH, sqrt((sum2/NPATH - (sum1/NPATH)*(sum1/NPATH))/NPATH) );
// Tidy up library
checkCudaErrors( curandDestroyGenerator(gen) );
// Release memory and exit cleanly
free(h_v);
checkCudaErrors( cudaFree(d_v) );
checkCudaErrors( cudaFree(d_z) );
// CUDA exit -- needed to flush printf write buffer
cudaDeviceReset();
}
|
f1fe5cbd0b5f818f9a18e753d37f9f02538ce1d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Write GPU code to perform the step(s) involved in counting sort.
Add additional kernels and device functions as needed. */
__global__ void counting_sort_kernel(int *input_array, int *sorted_array, int *histogram, int *scan, int num_elements, int range)
{
extern __shared__ int temp[];
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
int blockID = threadIdx.x;
int pout = 0, pin = 1;
int n = range + 1;
temp[blockID] = (blockID > 0) ? histogram[blockID - 1] : 0;
int offset;
for (offset = 1; offset < n; offset *= 2) {
pout = 1 - pout;
pin = 1 - pout;
__syncthreads();
temp[pout * n + blockID] = temp[pin * n + blockID];
if (blockID >= offset)
temp[pout * n + blockID] += temp[pin * n + blockID - offset];
}
__syncthreads();
scan[blockID] = temp[pout * n + blockID];
int j;
int start_idx = scan[threadID];
if (histogram[threadID] != 0)
for (j = 0; j < histogram[threadID]; j++)
sorted_array[start_idx + j] = threadID;
return;
}
__global__ void histogram_kernel_fast(int *input_data, int *histogram, int num_elements, int histogram_size)
{
extern __shared__ unsigned int s[];
if(threadIdx.x < histogram_size)
s[threadIdx.x] = 0;
__syncthreads();
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while (offset < num_elements) {
atomicAdd(&s[input_data[offset]], 1);
offset += stride;
}
__syncthreads();
if(threadIdx.x < histogram_size)
atomicAdd(&(histogram[threadIdx.x]), s[threadIdx.x]);
} | f1fe5cbd0b5f818f9a18e753d37f9f02538ce1d6.cu | /* Write GPU code to perform the step(s) involved in counting sort.
Add additional kernels and device functions as needed. */
__global__ void counting_sort_kernel(int *input_array, int *sorted_array, int *histogram, int *scan, int num_elements, int range)
{
extern __shared__ int temp[];
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
int blockID = threadIdx.x;
int pout = 0, pin = 1;
int n = range + 1;
temp[blockID] = (blockID > 0) ? histogram[blockID - 1] : 0;
int offset;
for (offset = 1; offset < n; offset *= 2) {
pout = 1 - pout;
pin = 1 - pout;
__syncthreads();
temp[pout * n + blockID] = temp[pin * n + blockID];
if (blockID >= offset)
temp[pout * n + blockID] += temp[pin * n + blockID - offset];
}
__syncthreads();
scan[blockID] = temp[pout * n + blockID];
int j;
int start_idx = scan[threadID];
if (histogram[threadID] != 0)
for (j = 0; j < histogram[threadID]; j++)
sorted_array[start_idx + j] = threadID;
return;
}
__global__ void histogram_kernel_fast(int *input_data, int *histogram, int num_elements, int histogram_size)
{
extern __shared__ unsigned int s[];
if(threadIdx.x < histogram_size)
s[threadIdx.x] = 0;
__syncthreads();
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while (offset < num_elements) {
atomicAdd(&s[input_data[offset]], 1);
offset += stride;
}
__syncthreads();
if(threadIdx.x < histogram_size)
atomicAdd(&(histogram[threadIdx.x]), s[threadIdx.x]);
} |
04b3a4a5f83d37da5f816162e8f5366a5ed1ff03.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/********************************************************************
*@version-0.1
*@author Liyuqian- [email protected]
*
*:
*
*
*
*
*
*
* 1
* 2
*
* 3
* 410240000bytes
* 5
* 6
* 7
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include <cutil.h>
#include <string.h>
#include <locale.h>
#include "dLoadTrie.h"
#include "loadDocs.h"
#define WORD_SIZE 30
#define WWORD_NUM 15
#ifndef __GLOBALVAR__
#define __GLOBALVAR__
__device__ unsigned char d_Status[318609];//
__device__ int d_Check[318608]; //
__device__ unsigned int d_Base[318608]; //
__device__ unsigned int d_CharsHash[65535];
#endif
#if __DEVICE_EMULATION__
bool InitCUDA(void){return true;}
#else
bool InitCUDA(void)
{
int count = 0;
int i = 0;
hipGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
for(i = 0; i < count; i++) {
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
hipSetDevice(i);
printf("CUDA initialized.\n");
return true;
}
#endif
/************************************************************************/
/* */
/************************************************************************/
/***/
__device__ void gGetAllWords(unsigned short *w_chars,int posFrom,int posTo,unsigned short *output){
int outputIndex=0;
int t=0,i=posFrom,start=posFrom,end=0,charHashCode=0;
unsigned char stats='0';
int baseValue = 0;
int checkValue = 0;
for (; i <posTo; i++) {
end++;
charHashCode = d_CharsHash[w_chars[i]];
if( charHashCode<1 ) stats='0';
else{
checkValue=baseValue;
baseValue = d_Base[checkValue] + charHashCode;
if (d_Check[baseValue] == checkValue || d_Check[baseValue] == -1)
stats= d_Status[baseValue];
else
stats='0';
}
switch (stats) {
case '0':
i = start;
start++;
end = 0;
baseValue = 0;
break;
case '2':
for(t=0;t<end;t++){
output[outputIndex++]=w_chars[t+start];
}
output[outputIndex++]=49;
break;
case '3':
for(t=0;t<end;t++){
output[outputIndex++]=w_chars[t+start];
}
output[outputIndex++]=49;
i = start;
start++;
end = 0;
baseValue = 0;
break;
}//end of switch
}//end of for
}
/***/
__device__ void gMaxFrontWords(unsigned short * w_chars,int posFrom,int posTo,unsigned short * output) {
int outputIndex=0;
int t=0,i=posFrom,start=posFrom,end=0,charHashCode=0;
unsigned char stats='0';
int tempEnd = 0;
int baseValue = 0;
int checkValue = 0;
bool hasEnd = false;
int wlen=posTo-posFrom;
for(;i<posTo;i++){
end++;
charHashCode = d_CharsHash[w_chars[i]];
if( charHashCode<1 ) stats='0';
else{
checkValue=baseValue;
baseValue = d_Base[checkValue] + charHashCode;
if (d_Check[baseValue] == checkValue || d_Check[baseValue] == -1)
stats= d_Status[baseValue];
else
stats='0';
}
switch (stats) {
case '0':
if (hasEnd) {
for(t=0;t<tempEnd;t++){
output[outputIndex++]=w_chars[t+start];
}
output[outputIndex++]=49;
hasEnd = false;
baseValue = 0;
start = start + tempEnd ;
i = start-1;
tempEnd = 0;
end = 0;
break;
} else {
baseValue = 0;
tempEnd = 0;
i = start;
start++;
end = 0;
}
break;
case '2':
tempEnd = end;
hasEnd = true;
break;
case '3':
for(t=0;t<end;t++){
output[outputIndex++]=w_chars[t+start];
}
output[outputIndex++]=49;//1
hasEnd = false;
baseValue = 0;
tempEnd = 0;
start = i ;
end = 0;
break;
}
if (i == wlen - 1) {
if (hasEnd) {
for(t=0;t<tempEnd;t++){
output[outputIndex++]=w_chars[t+start];
}
output[outputIndex++]=49;
hasEnd = false;
baseValue = 0;
start = start + tempEnd;
i = start-1;
tempEnd = 0;
end = 0;
break;
}
}
}
}
/***/
__device__ void gMinFrontWords(unsigned short * w_chars,int posFrom,int posTo,unsigned short * output){
int outputIndex=0;
int t=0,i=posFrom,start=posFrom,end=0,charHashCode=0;
unsigned char stats='0';
int baseValue = 0;
int checkValue = 0;
for (; i < posTo; i++) {
end++;
charHashCode = d_CharsHash[w_chars[i]];
if( charHashCode<1 ) stats='0';
else{
checkValue=baseValue;
baseValue = d_Base[checkValue] + charHashCode;
if (d_Check[baseValue] == checkValue || d_Check[baseValue] == -1)
stats= d_Status[baseValue];
else
stats='0';
}
switch (stats) {
case '0':
baseValue = 0;
i = start;
start++;
end = 0;
break;
case '2':
for(t=0;t<end;t++) {
output[outputIndex++]=w_chars[t+start];
}
output[outputIndex++]=49;
baseValue = 0;
start = i+1;
end = 0;
break;
case '3':
for(t=0;t<end;t++){
output[outputIndex++]=w_chars[t+start];
}
output[outputIndex++]=49;
baseValue = 0;
start = i+1;
end = 0;
break;
}
}
}
/**
*
* block
*/
__global__ void gBatchSearchKernel(HostDocs * inputDocs,HostDocsTotalTokens *outputTokens){
int bid=blockIdx.x; //id
int tid=blockIdx.x*blockDim.x+threadIdx.x;//id
int docsize=inputDocs->DocStreamSize[bid];//
int average=docsize/blockDim.x;//
int start=threadIdx.x*average;//
int end=start+average;//
//gGetAllWords(inputDocs->DocStream[bid],start,end,outputTokens->ThreadsTokens[tid]);
//gMaxFrontWords(inputDocs->DocStream[bid],start,end,outputTokens->ThreadsTokens[tid]);
gMinFrontWords(inputDocs->DocStream[bid],start,end,outputTokens->ThreadsTokens[tid]);
}
/**test load doc*/
__global__ void testLoad(HostDocs * inputDocs,unsigned short * writeadoc){
for(int i=0;i<100000;i++)
writeadoc[i]=inputDocs->DocStream[1][i];
}
/**
blockblockTREAD_PER_BLOCK
thread block_num* TREAD_PER_BLOCK
MAX_TOKEN_PERTHREAD==100 100
*/
void runCUDADATrie(char * inputFold,char * outputFold){
// make double trie
if( h_initCUDADATrie())
printf("InitCUDADAtrie success.\n\n");
else
printf("*** initCUDADATrie failed!\n\n");
//inputFoldDOC_BATCH_SIZE==96
HostDocs *hdocs = loadBatchDocs(inputFold);
printHostDocs("docs",hdocs);
printf("\nCopy docs to GPU...\n");
HostDocs *ddocs;
unsigned short **CPU_ARRAY;
CPU_ARRAY =(unsigned short **)malloc(sizeof(unsigned short*)*DOC_BATCH_SIZE);
memset(CPU_ARRAY,0,sizeof(unsigned short*)*DOC_BATCH_SIZE);
int docSize=0,docsNum=hdocs->DocCount;
for(int i=0;i<docsNum;i++){
docSize=hdocs->DocStreamSize[i];
cutilSafeCall( hipMalloc((void **)&CPU_ARRAY[i],sizeof(unsigned short)*docSize));
cutilSafeCall( hipMemset(CPU_ARRAY[i],0,sizeof(unsigned short)*(docSize)));
cutilSafeCall( hipMemcpy(CPU_ARRAY[i],hdocs->DocStream[i],sizeof(unsigned short)*docSize,hipMemcpyHostToDevice));
}
cutilSafeCall(hipMalloc( (void**)&ddocs,sizeof(HostDocs)));
cutilSafeCall(hipMemcpy(ddocs->DocStream,CPU_ARRAY,sizeof(unsigned short*)*DOC_BATCH_SIZE,hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(ddocs->DocStreamSize,hdocs->DocStreamSize,sizeof(unsigned short)*DOC_BATCH_SIZE,hipMemcpyHostToDevice));
printf("End of copy\n\n");
//printHostDocs("d_docs test",bdocs);
//cpu
HostDocsTotalTokens *hDocAllTokens;
int tokensTotalMemSize=TOTAL_THREADS_NUM*MAX_TOKEN_PERTHREAD;//128*96*100
hDocAllTokens=(HostDocsTotalTokens*)malloc(sizeof(HostDocsTotalTokens));
hDocAllTokens->threadsNum=0;
memset(hDocAllTokens->ThreadsTokens,0,sizeof(unsigned short)*tokensTotalMemSize);
//
HostDocsTotalTokens *dDocAllTokens;
CUDA_SAFE_CALL(hipMalloc( (void**)&dDocAllTokens,sizeof(HostDocsTotalTokens)));
int tNum=docsNum*TREAD_PER_BLOCK;//2*128
cutilSafeCall(hipMemcpy( &dDocAllTokens->threadsNum,&tNum,sizeof(unsigned short),hipMemcpyHostToDevice));
cutilSafeCall(hipMemset( dDocAllTokens->ThreadsTokens,0,sizeof(unsigned short)*tokensTotalMemSize));
int blockNum=docsNum;//
int threadsPerBlock=TREAD_PER_BLOCK;//
dim3 dimBlock(threadsPerBlock,1,1);
dim3 dimGrid(blockNum,1);
printf("start kernel...\n");
unsigned int timer = 0;
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
//
/**test load code*/
/*
unsigned short *writeDoc;
size_t docMemSize=sizeof(unsigned short)*MAX_DOC_SIZE;
cutilSafeCall(hipMalloc((void**)&writeDoc,docMemSize));
cutilSafeCall(hipMemset(writeDoc,0,docMemSize));
unsigned short *readDoc;
readDoc=(unsigned short*)malloc(docMemSize);
memset(readDoc,0,docMemSize);
printf("init..\n");
for(int i=0;i<10;i++)
printf("%4d: %wc\n",i,readDoc[i]);
*/
hipLaunchKernelGGL(( gBatchSearchKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, ddocs,dDocAllTokens);
//testLoad<<<1,1>>>(ddocs,writeDoc);
cutilCheckMsg("Kernel execution failed\n");
hipDeviceSynchronize();
cutilCheckError( cutStopTimer( timer));
printf("Kernel processing time: %f (ms)\n", cutGetTimerValue( timer));
cutilCheckError( cutDeleteTimer( timer));
printf("end of kernel\n");
//test load code
/*
cutilSafeCall(hipMemcpy(readDoc,writeDoc,docMemSize,hipMemcpyDeviceToHost));
printf("the contrent:\n");
for(int i=0;i<10;i++)
printf("%4d : %wc\n",i,readDoc[i]);
printf("%ws\n",readDoc);
*/
cutilSafeCall(hipMemcpy(hDocAllTokens,dDocAllTokens,sizeof(HostDocsTotalTokens),hipMemcpyDeviceToHost));
writeDocsTotalTokens("keneal docs total tokens: minWords",outputFold,hDocAllTokens);
//
free(hdocs);
free(hDocAllTokens);
cutilSafeCall(hipFree(ddocs));
cutilSafeCall(hipFree(dDocAllTokens));
/*
cutilSafeCall(hipFree(d_Base));
cutilSafeCall(hipFree(d_Check));
cutilSafeCall(hipFree(d_Status));
cutilSafeCall(hipFree(d_CharsHash));
*/
}
/*
int main(int argc, char* argv[])
{
if(!InitCUDA()) {
return 0;
}
char *console="outputFiles/minWords_log_48_64.txt";
freopen(console,"w",stdout); //out.txt
time_t timep;
time (&timep);
printf("------------------------\n");
printf("%s\n",ctime(&timep));
char * inputFold="inputFiles/48/";
char * outputFold="outputFiles/minWords_48_64.txt";
runCUDADATrie(inputFold,outputFold);
time (&timep);
printf("%s\n",ctime(&timep));
printf("------------------------\n");
fclose(stdout);//
CUT_EXIT(argc, argv);
fclose(stdout);//
return 0;
}
*/ | 04b3a4a5f83d37da5f816162e8f5366a5ed1ff03.cu | /********************************************************************
*@version-0.1
*@author Liyuqian-李雨前 [email protected]
*华中科技大学计算机学院 智能与分布式计算实验室
*注意: 除华中科技大学计算机学院智能与分布式计算外,
*任何个人、团队、研究结构、商业单位等不能对本算法进行专利申请或者撰写
*本算法的论文。
*任何个人、团队、研究结构、商业单位都可以对本算法进行使用、修改、扩展、传播。
*使用本算法不当造成的损失,责任有使用者自行负责。
*
* 使用提示:
* 1。本词典详细构造没有公开,有疑问请与作者联系
* 2。随代码附带词典信息与代码具有很强的相关系性,任何不正确修改都可能导致
* 分词异常。
* 3。使用本代码过程中,带来的损失,作者一概不负责
* 4。在启动运行前,需要足够的堆栈空间,例如10240000bytes
* 5。优化版本和 非优化版本,在编译时,选择一个就行,不支持同时编译
* 6。三种分词可以选择对应不同的输出目录。
* 7。其他未尽描述的问题,请与作者联系。
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include <cutil.h>
#include <string.h>
#include <locale.h>
#include "dLoadTrie.h"
#include "loadDocs.h"
#define WORD_SIZE 30
#define WWORD_NUM 15
#ifndef __GLOBALVAR__
#define __GLOBALVAR__
__device__ unsigned char d_Status[318609];//全局变量
__device__ int d_Check[318608]; //全局变量
__device__ unsigned int d_Base[318608]; //全局变量
__device__ unsigned int d_CharsHash[65535];
#endif
#if __DEVICE_EMULATION__
bool InitCUDA(void){return true;}
#else
bool InitCUDA(void)
{
int count = 0;
int i = 0;
cudaGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
for(i = 0; i < count; i++) {
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
cudaSetDevice(i);
printf("CUDA initialized.\n");
return true;
}
#endif
/************************************************************************/
/* 三种分词实现 */
/************************************************************************/
/**正向全匹配分词*/
__device__ void gGetAllWords(unsigned short *w_chars,int posFrom,int posTo,unsigned short *output){
int outputIndex=0;
int t=0,i=posFrom,start=posFrom,end=0,charHashCode=0;
unsigned char stats='0';
int baseValue = 0;
int checkValue = 0;
for (; i <posTo; i++) {
end++;
charHashCode = d_CharsHash[w_chars[i]];
if( charHashCode<1 ) stats='0';
else{
checkValue=baseValue;
baseValue = d_Base[checkValue] + charHashCode;
if (d_Check[baseValue] == checkValue || d_Check[baseValue] == -1)
stats= d_Status[baseValue];
else
stats='0';
}
switch (stats) {
case '0':
i = start;
start++;
end = 0;
baseValue = 0;
break;
case '2':
for(t=0;t<end;t++){
output[outputIndex++]=w_chars[t+start];
}
output[outputIndex++]=49;
break;
case '3':
for(t=0;t<end;t++){
output[outputIndex++]=w_chars[t+start];
}
output[outputIndex++]=49;
i = start;
start++;
end = 0;
baseValue = 0;
break;
}//end of switch
}//end of for
}
/**正向最大匹配分词*/
__device__ void gMaxFrontWords(unsigned short * w_chars,int posFrom,int posTo,unsigned short * output) {
int outputIndex=0;
int t=0,i=posFrom,start=posFrom,end=0,charHashCode=0;
unsigned char stats='0';
int tempEnd = 0;
int baseValue = 0;
int checkValue = 0;
bool hasEnd = false;
int wlen=posTo-posFrom;
for(;i<posTo;i++){
end++;
charHashCode = d_CharsHash[w_chars[i]];
if( charHashCode<1 ) stats='0';
else{
checkValue=baseValue;
baseValue = d_Base[checkValue] + charHashCode;
if (d_Check[baseValue] == checkValue || d_Check[baseValue] == -1)
stats= d_Status[baseValue];
else
stats='0';
}
switch (stats) {
case '0':
if (hasEnd) {
for(t=0;t<tempEnd;t++){
output[outputIndex++]=w_chars[t+start];
}
output[outputIndex++]=49;
hasEnd = false;
baseValue = 0;
start = start + tempEnd ;
i = start-1;
tempEnd = 0;
end = 0;
break;
} else {
baseValue = 0;
tempEnd = 0;
i = start;
start++;
end = 0;
}
break;
case '2':
tempEnd = end;
hasEnd = true;
break;
case '3':
for(t=0;t<end;t++){
output[outputIndex++]=w_chars[t+start];
}
output[outputIndex++]=49;//代表字符1
hasEnd = false;
baseValue = 0;
tempEnd = 0;
start = i ;
end = 0;
break;
}
if (i == wlen - 1) {
if (hasEnd) {
for(t=0;t<tempEnd;t++){
output[outputIndex++]=w_chars[t+start];
}
output[outputIndex++]=49;
hasEnd = false;
baseValue = 0;
start = start + tempEnd;
i = start-1;
tempEnd = 0;
end = 0;
break;
}
}
}
}
/**正向最小匹配分词*/
__device__ void gMinFrontWords(unsigned short * w_chars,int posFrom,int posTo,unsigned short * output){
int outputIndex=0;
int t=0,i=posFrom,start=posFrom,end=0,charHashCode=0;
unsigned char stats='0';
int baseValue = 0;
int checkValue = 0;
for (; i < posTo; i++) {
end++;
charHashCode = d_CharsHash[w_chars[i]];
if( charHashCode<1 ) stats='0';
else{
checkValue=baseValue;
baseValue = d_Base[checkValue] + charHashCode;
if (d_Check[baseValue] == checkValue || d_Check[baseValue] == -1)
stats= d_Status[baseValue];
else
stats='0';
}
switch (stats) {
case '0':
baseValue = 0;
i = start;
start++;
end = 0;
break;
case '2':
for(t=0;t<end;t++) {
output[outputIndex++]=w_chars[t+start];
}
output[outputIndex++]=49;
baseValue = 0;
start = i+1;
end = 0;
break;
case '3':
for(t=0;t<end;t++){
output[outputIndex++]=w_chars[t+start];
}
output[outputIndex++]=49;
baseValue = 0;
start = i+1;
end = 0;
break;
}
}
}
/**内核入口函数
* 功能:进行批量文档分词
* 文档按线程数平均划分,每个文档对应一个block
*/
__global__ void gBatchSearchKernel(HostDocs * inputDocs,HostDocsTotalTokens *outputTokens){
int bid=blockIdx.x; //块全局id
int tid=blockIdx.x*blockDim.x+threadIdx.x;//线程全局id
int docsize=inputDocs->DocStreamSize[bid];//快对应文档大小
int average=docsize/blockDim.x;//每个线程数据
int start=threadIdx.x*average;//包括端点
int end=start+average;//不包括端点
//gGetAllWords(inputDocs->DocStream[bid],start,end,outputTokens->ThreadsTokens[tid]);
//gMaxFrontWords(inputDocs->DocStream[bid],start,end,outputTokens->ThreadsTokens[tid]);
gMinFrontWords(inputDocs->DocStream[bid],start,end,outputTokens->ThreadsTokens[tid]);
}
/**test load doc*/
__global__ void testLoad(HostDocs * inputDocs,unsigned short * writeadoc){
for(int i=0;i<100000;i++)
writeadoc[i]=inputDocs->DocStream[1][i];
}
/**
这个版本功能:
根据加载的文档个数,启动相应的block数,每个block分配TREAD_PER_BLOCK线程
分词结果按照thread单位保存,即 block_num* TREAD_PER_BLOCK 个数组元素,
每个元素长度MAX_TOKEN_PER_THREAD==100 即每个线程分词结果最大100个中文字
*/
void runCUDADATrie(char * inputFold,char * outputFold){
// make double trie
if( h_initCUDADATrie())
printf("InitCUDADAtrie success.\n\n");
else
printf("*** initCUDADATrie failed!\n\n");
//从文件夹inputFold加载批量文档,测试用例,不要超过批量上限DOC_BATCH_SIZE==96
HostDocs *hdocs = loadBatchDocs(inputFold);
printHostDocs("docs",hdocs);
printf("\nCopy docs to GPU...\n");
HostDocs *ddocs;
unsigned short **CPU_ARRAY;
CPU_ARRAY =(unsigned short **)malloc(sizeof(unsigned short*)*DOC_BATCH_SIZE);
memset(CPU_ARRAY,0,sizeof(unsigned short*)*DOC_BATCH_SIZE);
int docSize=0,docsNum=hdocs->DocCount;
for(int i=0;i<docsNum;i++){
docSize=hdocs->DocStreamSize[i];
cutilSafeCall( cudaMalloc((void **)&CPU_ARRAY[i],sizeof(unsigned short)*docSize));
cutilSafeCall( cudaMemset(CPU_ARRAY[i],0,sizeof(unsigned short)*(docSize)));
cutilSafeCall( cudaMemcpy(CPU_ARRAY[i],hdocs->DocStream[i],sizeof(unsigned short)*docSize,cudaMemcpyHostToDevice));
}
cutilSafeCall(cudaMalloc( (void**)&ddocs,sizeof(HostDocs)));
cutilSafeCall(cudaMemcpy(ddocs->DocStream,CPU_ARRAY,sizeof(unsigned short*)*DOC_BATCH_SIZE,cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(ddocs->DocStreamSize,hdocs->DocStreamSize,sizeof(unsigned short)*DOC_BATCH_SIZE,cudaMemcpyHostToDevice));
printf("End of copy\n\n");
//printHostDocs("d_docs test",bdocs);
//cpu端接受内核输出结果
HostDocsTotalTokens *hDocAllTokens;
int tokensTotalMemSize=TOTAL_THREADS_NUM*MAX_TOKEN_PER_THREAD;//128*96*100
hDocAllTokens=(HostDocsTotalTokens*)malloc(sizeof(HostDocsTotalTokens));
hDocAllTokens->threadsNum=0;
memset(hDocAllTokens->ThreadsTokens,0,sizeof(unsigned short)*tokensTotalMemSize);
//内核输出结果
HostDocsTotalTokens *dDocAllTokens;
CUDA_SAFE_CALL(cudaMalloc( (void**)&dDocAllTokens,sizeof(HostDocsTotalTokens)));
int tNum=docsNum*TREAD_PER_BLOCK;//全部线程数目2*128
cutilSafeCall(cudaMemcpy( &dDocAllTokens->threadsNum,&tNum,sizeof(unsigned short),cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemset( dDocAllTokens->ThreadsTokens,0,sizeof(unsigned short)*tokensTotalMemSize));
int blockNum=docsNum;//工作线程块数目
int threadsPerBlock=TREAD_PER_BLOCK;//每个线程块里面的线程个数
dim3 dimBlock(threadsPerBlock,1,1);
dim3 dimGrid(blockNum,1);
printf("start kernel...\n");
unsigned int timer = 0;
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
//工作内核
/**test load code*/
/*
unsigned short *writeDoc;
size_t docMemSize=sizeof(unsigned short)*MAX_DOC_SIZE;
cutilSafeCall(cudaMalloc((void**)&writeDoc,docMemSize));
cutilSafeCall(cudaMemset(writeDoc,0,docMemSize));
unsigned short *readDoc;
readDoc=(unsigned short*)malloc(docMemSize);
memset(readDoc,0,docMemSize);
printf("init..\n");
for(int i=0;i<10;i++)
printf("%4d: %wc\n",i,readDoc[i]);
*/
gBatchSearchKernel<<<dimGrid,dimBlock>>>(ddocs,dDocAllTokens);
//testLoad<<<1,1>>>(ddocs,writeDoc);
cutilCheckMsg("Kernel execution failed\n");
cudaThreadSynchronize();
cutilCheckError( cutStopTimer( timer));
printf("Kernel processing time: %f (ms)\n", cutGetTimerValue( timer));
cutilCheckError( cutDeleteTimer( timer));
printf("end of kernel\n");
//test load code
/*
cutilSafeCall(cudaMemcpy(readDoc,writeDoc,docMemSize,cudaMemcpyDeviceToHost));
printf("the contrent:\n");
for(int i=0;i<10;i++)
printf("%4d : %wc\n",i,readDoc[i]);
printf("%ws\n",readDoc);
*/
cutilSafeCall(cudaMemcpy(hDocAllTokens,dDocAllTokens,sizeof(HostDocsTotalTokens),cudaMemcpyDeviceToHost));
writeDocsTotalTokens("keneal docs total tokens: minWords",outputFold,hDocAllTokens);
//释放资源
free(hdocs);
free(hDocAllTokens);
cutilSafeCall(cudaFree(ddocs));
cutilSafeCall(cudaFree(dDocAllTokens));
/*
cutilSafeCall(cudaFree(d_Base));
cutilSafeCall(cudaFree(d_Check));
cutilSafeCall(cudaFree(d_Status));
cutilSafeCall(cudaFree(d_CharsHash));
*/
}
/*
int main(int argc, char* argv[])
{
if(!InitCUDA()) {
return 0;
}
char *console="outputFiles/minWords_log_48_64.txt";
freopen(console,"w",stdout); //输出重定向,输出数据将保存在out.txt文件中
time_t timep;
time (&timep);
printf("------------------------\n");
printf("%s\n",ctime(&timep));
char * inputFold="inputFiles/48/";
char * outputFold="outputFiles/minWords_48_64.txt";
runCUDADATrie(inputFold,outputFold);
time (&timep);
printf("%s\n",ctime(&timep));
printf("------------------------\n");
fclose(stdout);//关闭文件
CUT_EXIT(argc, argv);
fclose(stdout);//关闭文件
return 0;
}
*/ |
5aa0b8280edc697ac503c1c452e5a295aad0fd51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void bhsm_backward(
const float *wxy,
const float *x,
const float *w,
const int *ts,
const int *paths,
const float *codes,
const int *begins,
const float *gLoss,
const int n_in,
const int max_len,
const int n_ex,
float *gx,
float *gW
) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n_ex * max_len) {
int idx = i / max_len;
int offset = i - idx * max_len;
int t = ts[idx];
int begin = begins[t];
int length = begins[t+1] - begin;
if (offset < length) {
int p = begin + offset;
int node = paths[p];
float g = -gLoss[0] * codes[p] / (1.0f + exp(wxy[i]));
int w_start = n_in * node;
int x_start = n_in * idx;
for (int j = 0; j < n_in; ++j) {
int w_i = w_start + j;
int x_i = x_start + j;
atomicAdd(gx + x_i, g * w[w_i]);
atomicAdd(gW + w_i, g * x[x_i]);
}
}
}
}
| 5aa0b8280edc697ac503c1c452e5a295aad0fd51.cu | extern "C"
__global__ void bhsm_backward(
const float *wxy,
const float *x,
const float *w,
const int *ts,
const int *paths,
const float *codes,
const int *begins,
const float *gLoss,
const int n_in,
const int max_len,
const int n_ex,
float *gx,
float *gW
) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n_ex * max_len) {
int idx = i / max_len;
int offset = i - idx * max_len;
int t = ts[idx];
int begin = begins[t];
int length = begins[t+1] - begin;
if (offset < length) {
int p = begin + offset;
int node = paths[p];
float g = -gLoss[0] * codes[p] / (1.0f + exp(wxy[i]));
int w_start = n_in * node;
int x_start = n_in * idx;
for (int j = 0; j < n_in; ++j) {
int w_i = w_start + j;
int x_i = x_start + j;
atomicAdd(gx + x_i, g * w[w_i]);
atomicAdd(gW + w_i, g * x[x_i]);
}
}
}
}
|
d40da346717c67a230d94eb00e9be3b63147603b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#if TORCH_HIP_VERSION >= 10010
#include <cstring>
#include <vector>
#include "NvInfer.h"
#include "common/bertCommon.h"
#include "common/common.cuh"
#include "common/serialize.hpp"
#include "geluPlugin.h"
using namespace nvinfer1;
namespace bert
{
// constants for approximating the normal cdf
constexpr float A = 0.5f;
constexpr float B = 0.7978845608028654f; // sqrt(2.0/M_PI)
constexpr float C = 0.035677408136300125f; // 0.044715 * sqrt(2.0/M_PI)
template <typename T, unsigned TPB>
__global__ void geluKernel(const T a, const T b, const T c, int n, const T* input, T* output)
{
const int idx = blockIdx.x * TPB + threadIdx.x;
if (idx < n)
{
const T in = input[idx];
const T cdf = a + a * tanh(in * (c * in * in + b));
output[idx] = in * cdf;
}
}
int computeGelu(hipStream_t stream, int n, const float* input, float* output)
{
constexpr int blockSize = 256;
const int gridSize = (n + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( geluKernel<float, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream, A, B, C, n, input, output);
PLUGIN_CHECK(hipPeekAtLastError());
return 0;
}
int computeGelu(hipStream_t stream, int n, const half* input, half* output)
{
constexpr int blockSize = 256;
if (0 == (n & 1))
{
const int n2 = n / 2;
const int gridSize = (n2 + blockSize - 1) / blockSize;
const half2 A2 = __floats2half2_rn(A, A);
const half2 B2 = __floats2half2_rn(B, B);
const half2 C2 = __floats2half2_rn(C, C);
const half2* input2 = reinterpret_cast<const half2*>(input);
half2* output2 = reinterpret_cast<half2*>(output);
hipLaunchKernelGGL(( geluKernel<half2, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream, A2, B2, C2, n2, input2, output2);
}
else
{
const int gridSize = (n + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( geluKernel<half, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream, A, B, C, n, input, output);
}
PLUGIN_CHECK(hipPeekAtLastError());
return 0;
}
template <typename T, int TPB>
__global__ void geluBiasKernel(const T a, const T b, const T c, T* output, const T* input, const T* bias, const int ld)
{
const int offset = blockIdx.x * ld;
for (int it = threadIdx.x; it < ld; it += TPB)
{
const int idx = it + offset;
const T in = input[idx] + bias[it];
const T cdf = a + a * tanh(in * (c * in * in + b));
output[idx] = in * cdf;
}
}
int computeGeluBias(
float* output, const float* input, const float* bias, const int ld, const int cols, hipStream_t stream)
{
hipLaunchKernelGGL(( geluBiasKernel<float, 256>), dim3(cols), dim3(256), 0, stream, A, B, C, output, input, bias, ld);
return hipPeekAtLastError();
}
int computeGeluBias(
half* output, const half* input, const half* bias, const int ld, const int cols, hipStream_t stream)
{
if (ld & 1)
{
hipLaunchKernelGGL(( geluBiasKernel<half, 256>), dim3(cols), dim3(256), 0, stream, A, B, C, output, input, bias, ld);
}
else
{
const half2 A2 = __floats2half2_rn(A, A);
const half2 B2 = __floats2half2_rn(B, B);
const half2 C2 = __floats2half2_rn(C, C);
const int ld2 = ld / 2;
const half2* input2 = reinterpret_cast<const half2*>(input);
const half2* bias2 = reinterpret_cast<const half2*>(bias);
half2* output2 = reinterpret_cast<half2*>(output);
hipLaunchKernelGGL(( geluBiasKernel<half2, 256>), dim3(cols), dim3(256), 0, stream, A2, B2, C2, output2, input2, bias2, ld2);
}
return hipPeekAtLastError();
}
} // namespace bert
#endif // TORCH_HIP_VERSION >= 10010
| d40da346717c67a230d94eb00e9be3b63147603b.cu | /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda.h>
#if CUDA_VERSION >= 10010
#include <cstring>
#include <vector>
#include "NvInfer.h"
#include "common/bertCommon.h"
#include "common/common.cuh"
#include "common/serialize.hpp"
#include "geluPlugin.h"
using namespace nvinfer1;
namespace bert
{
// constants for approximating the normal cdf
constexpr float A = 0.5f;
constexpr float B = 0.7978845608028654f; // sqrt(2.0/M_PI)
constexpr float C = 0.035677408136300125f; // 0.044715 * sqrt(2.0/M_PI)
template <typename T, unsigned TPB>
__global__ void geluKernel(const T a, const T b, const T c, int n, const T* input, T* output)
{
const int idx = blockIdx.x * TPB + threadIdx.x;
if (idx < n)
{
const T in = input[idx];
const T cdf = a + a * tanh(in * (c * in * in + b));
output[idx] = in * cdf;
}
}
int computeGelu(cudaStream_t stream, int n, const float* input, float* output)
{
constexpr int blockSize = 256;
const int gridSize = (n + blockSize - 1) / blockSize;
geluKernel<float, blockSize><<<gridSize, blockSize, 0, stream>>>(A, B, C, n, input, output);
PLUGIN_CHECK(cudaPeekAtLastError());
return 0;
}
int computeGelu(cudaStream_t stream, int n, const half* input, half* output)
{
constexpr int blockSize = 256;
if (0 == (n & 1))
{
const int n2 = n / 2;
const int gridSize = (n2 + blockSize - 1) / blockSize;
const half2 A2 = __floats2half2_rn(A, A);
const half2 B2 = __floats2half2_rn(B, B);
const half2 C2 = __floats2half2_rn(C, C);
const half2* input2 = reinterpret_cast<const half2*>(input);
half2* output2 = reinterpret_cast<half2*>(output);
geluKernel<half2, blockSize><<<gridSize, blockSize, 0, stream>>>(A2, B2, C2, n2, input2, output2);
}
else
{
const int gridSize = (n + blockSize - 1) / blockSize;
geluKernel<half, blockSize><<<gridSize, blockSize, 0, stream>>>(A, B, C, n, input, output);
}
PLUGIN_CHECK(cudaPeekAtLastError());
return 0;
}
template <typename T, int TPB>
__global__ void geluBiasKernel(const T a, const T b, const T c, T* output, const T* input, const T* bias, const int ld)
{
const int offset = blockIdx.x * ld;
for (int it = threadIdx.x; it < ld; it += TPB)
{
const int idx = it + offset;
const T in = input[idx] + bias[it];
const T cdf = a + a * tanh(in * (c * in * in + b));
output[idx] = in * cdf;
}
}
int computeGeluBias(
float* output, const float* input, const float* bias, const int ld, const int cols, cudaStream_t stream)
{
geluBiasKernel<float, 256><<<cols, 256, 0, stream>>>(A, B, C, output, input, bias, ld);
return cudaPeekAtLastError();
}
int computeGeluBias(
half* output, const half* input, const half* bias, const int ld, const int cols, cudaStream_t stream)
{
if (ld & 1)
{
geluBiasKernel<half, 256><<<cols, 256, 0, stream>>>(A, B, C, output, input, bias, ld);
}
else
{
const half2 A2 = __floats2half2_rn(A, A);
const half2 B2 = __floats2half2_rn(B, B);
const half2 C2 = __floats2half2_rn(C, C);
const int ld2 = ld / 2;
const half2* input2 = reinterpret_cast<const half2*>(input);
const half2* bias2 = reinterpret_cast<const half2*>(bias);
half2* output2 = reinterpret_cast<half2*>(output);
geluBiasKernel<half2, 256><<<cols, 256, 0, stream>>>(A2, B2, C2, output2, input2, bias2, ld2);
}
return cudaPeekAtLastError();
}
} // namespace bert
#endif // CUDA_VERSION >= 10010
|
c9a879a34d326927c23ce59aa09484c7a926ecd8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <hipcub/hipcub.hpp>
#include <launch_kernel.cuh>
namespace quda {
#ifdef GPU_GAUGE_TOOLS
// template <typename Float, typename Gauge>
template <typename Gauge>
struct GaugePlaqArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
Gauge dataOr;
double *plaq;
double *plaq_h;
GaugePlaqArg(const Gauge &dataOr, const GaugeField &data)
: dataOr(dataOr), plaq_h(static_cast<double*>(pinned_malloc(sizeof(double)))) {
#ifdef MULTI_GPU
for(int dir=0; dir<4; ++dir){
border[dir] = 2;
}
for(int dir=0; dir<4; ++dir) X[dir] = data.X()[dir] - border[dir]*2;
#else
for(int dir=0; dir<4; ++dir) X[dir] = data.X()[dir];
#endif
threads = X[0]*X[1]*X[2]*X[3];
/* if ((hipHostMalloc(&plaq_h, sizeof(double))) == hipErrorMemoryAllocation)
errorQuda ("Error allocating memory for plaquette.\n");
if ((hipMalloc(&plaq, sizeof(double))) == hipErrorMemoryAllocation)
errorQuda ("Error allocating memory for plaquette.\n");
*/
hipHostGetDevicePointer(&plaq, plaq_h, 0);
}
};
static __inline__ __device__ double atomicAdd(double *addr, double val)
{
double old=*addr, assumed;
do {
assumed = old;
old = __longlong_as_double( atomicCAS((unsigned long long int*)addr,
__double_as_longlong(assumed),
__double_as_longlong(val+assumed)));
} while( __double_as_longlong(assumed)!=__double_as_longlong(old) );
return old;
}
__device__ __host__ inline int linkIndex3(int x[], int dx[], const int X[4]) {
int y[4];
for (int i=0; i<4; i++) y[i] = (x[i] + dx[i] + X[i]) % X[i];
int idx = (((y[3]*X[2] + y[2])*X[1] + y[1])*X[0] + y[0]) >> 1;
return idx;
}
__device__ __host__ inline void getCoords3(int x[4], int cb_index, const int X[4], int parity)
{
x[3] = cb_index/(X[2]*X[1]*X[0]/2);
x[2] = (cb_index/(X[1]*X[0]/2)) % X[2];
x[1] = (cb_index/(X[0]/2)) % X[1];
x[0] = 2*(cb_index%(X[0]/2)) + ((x[3]+x[2]+x[1]+parity)&1);
return;
}
template<int blockSize, typename Float, typename Gauge>
__global__ void computePlaq(GaugePlaqArg<Gauge> arg){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= arg.threads) return;
typedef typename ComplexTypeId<Float>::Type Cmplx;
int parity = 0;
if(idx >= arg.threads/2) {
parity = 1;
idx -= arg.threads/2;
}
int X[4];
for(int dr=0; dr<4; ++dr) X[dr] = arg.X[dr];
int x[4];
getCoords3(x, idx, X, parity);
#ifdef MULTI_GPU
for(int dr=0; dr<4; ++dr) {
x[dr] += arg.border[dr];
X[dr] += 2*arg.border[dr];
}
#endif
double plaq = 0.;
int dx[4] = {0, 0, 0, 0};
for (int mu = 0; mu < 3; mu++) {
for (int nu = (mu+1); nu < 4; nu++) {
Matrix<Cmplx,3> U1, U2, U3, U4, tmpM;
arg.dataOr.load((Float*)(U1.data),linkIndex3(x,dx,X), mu, parity);
dx[mu]++;
arg.dataOr.load((Float*)(U2.data),linkIndex3(x,dx,X), nu, 1-parity);
dx[mu]--;
dx[nu]++;
arg.dataOr.load((Float*)(U3.data),linkIndex3(x,dx,X), mu, 1-parity);
dx[nu]--;
arg.dataOr.load((Float*)(U4.data),linkIndex3(x,dx,X), nu, parity);
tmpM = U1 * U2;
tmpM = tmpM * conj(U3);
tmpM = tmpM * conj(U4);
plaq += getTrace(tmpM).x;
}
}
typedef hipcub::BlockReduce<double, blockSize> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
double aggregate = BlockReduce(temp_storage).Sum(plaq);
if (threadIdx.x == 0) atomicAdd((double *) arg.plaq, aggregate);
}
template<typename Float, typename Gauge>
class GaugePlaq : Tunable {
GaugePlaqArg<Gauge> arg;
const QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return sizeof(Float); }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
GaugePlaq(GaugePlaqArg<Gauge> &arg, QudaFieldLocation location)
: arg(arg), location(location) {}
~GaugePlaq () { host_free(arg.plaq_h); }
void apply(const hipStream_t &stream){
if(location == QUDA_CUDA_FIELD_LOCATION){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
LAUNCH_KERNEL(computePlaq, tp, stream, arg, Float, Gauge);
// hipMemcpy(arg.plaq_h, arg.plaq, sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#ifdef MULTI_GPU
comm_allreduce((double*) arg.plaq_h);
const int nNodes = comm_dim(0)*comm_dim(1)*comm_dim(2)*comm_dim(3);
((double *) arg.plaq_h)[0] /= 18.*(arg.threads*nNodes);
#else
((double *) arg.plaq_h)[0] /= 18.*arg.threads;
#endif
} else {
errorQuda("CPU not supported yet\n");
//computePlaqCPU(arg);
}
}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
aux << "threads=" << arg.threads << ",prec=" << sizeof(Float);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux.str().c_str());
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune(){}
void postTune(){}
long long flops() const { return (1)*6*arg.threads; }
long long bytes() const { return (1)*6*arg.threads*sizeof(Float); } // Only correct if there is no link reconstruction
};
template<typename Float, typename Gauge>
void plaquette(const Gauge dataOr, const GaugeField& data, QudaFieldLocation location, Float &plq) {
GaugePlaqArg<Gauge> arg(dataOr, data);
GaugePlaq<Float,Gauge> gaugePlaq(arg, location);
gaugePlaq.apply(0);
hipDeviceSynchronize();
plq = ((double *) arg.plaq_h)[0];
}
template<typename Float>
Float plaquette(const GaugeField& data, QudaFieldLocation location) {
// Switching to FloatNOrder for the gauge field in order to support RECONSTRUCT_12
// Need to fix this!!
Float res;
if(data.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if(data.Reconstruct() == QUDA_RECONSTRUCT_NO) {
plaquette(FloatNOrder<Float, 18, 2, 18>(data), data, location, res);
} else if(data.Reconstruct() == QUDA_RECONSTRUCT_12){
plaquette(FloatNOrder<Float, 18, 2, 12>(data), data, location, res);
} else if(data.Reconstruct() == QUDA_RECONSTRUCT_8){
plaquette(FloatNOrder<Float, 18, 2, 8>(data), data, location, res);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else if(data.Order() == QUDA_FLOAT4_GAUGE_ORDER) {
if(data.Reconstruct() == QUDA_RECONSTRUCT_NO) {
plaquette(FloatNOrder<Float, 18, 4, 18>(data), data, location, res);
} else if(data.Reconstruct() == QUDA_RECONSTRUCT_12){
plaquette(FloatNOrder<Float, 18, 4, 12>(data), data, location, res);
} else if(data.Reconstruct() == QUDA_RECONSTRUCT_8){
plaquette(FloatNOrder<Float, 18, 4, 8>(data), data, location, res);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order\n");
}
return res;
}
#endif
double plaquette(const GaugeField& data, QudaFieldLocation location) {
#ifdef GPU_GAUGE_TOOLS
if(data.Precision() == QUDA_HALF_PRECISION) {
errorQuda("Half precision not supported\n");
}
if (data.Precision() == QUDA_SINGLE_PRECISION) {
return plaquette<float> (data, location);
} else if(data.Precision() == QUDA_DOUBLE_PRECISION) {
return plaquette<double>(data, location);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
#else
errorQuda("Gauge tools are not build");
#endif
}
}
| c9a879a34d326927c23ce59aa09484c7a926ecd8.cu | #include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <cub/cub.cuh>
#include <launch_kernel.cuh>
namespace quda {
#ifdef GPU_GAUGE_TOOLS
// template <typename Float, typename Gauge>
template <typename Gauge>
struct GaugePlaqArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
Gauge dataOr;
double *plaq;
double *plaq_h;
GaugePlaqArg(const Gauge &dataOr, const GaugeField &data)
: dataOr(dataOr), plaq_h(static_cast<double*>(pinned_malloc(sizeof(double)))) {
#ifdef MULTI_GPU
for(int dir=0; dir<4; ++dir){
border[dir] = 2;
}
for(int dir=0; dir<4; ++dir) X[dir] = data.X()[dir] - border[dir]*2;
#else
for(int dir=0; dir<4; ++dir) X[dir] = data.X()[dir];
#endif
threads = X[0]*X[1]*X[2]*X[3];
/* if ((cudaMallocHost(&plaq_h, sizeof(double))) == cudaErrorMemoryAllocation)
errorQuda ("Error allocating memory for plaquette.\n");
if ((cudaMalloc(&plaq, sizeof(double))) == cudaErrorMemoryAllocation)
errorQuda ("Error allocating memory for plaquette.\n");
*/
cudaHostGetDevicePointer(&plaq, plaq_h, 0);
}
};
static __inline__ __device__ double atomicAdd(double *addr, double val)
{
double old=*addr, assumed;
do {
assumed = old;
old = __longlong_as_double( atomicCAS((unsigned long long int*)addr,
__double_as_longlong(assumed),
__double_as_longlong(val+assumed)));
} while( __double_as_longlong(assumed)!=__double_as_longlong(old) );
return old;
}
__device__ __host__ inline int linkIndex3(int x[], int dx[], const int X[4]) {
int y[4];
for (int i=0; i<4; i++) y[i] = (x[i] + dx[i] + X[i]) % X[i];
int idx = (((y[3]*X[2] + y[2])*X[1] + y[1])*X[0] + y[0]) >> 1;
return idx;
}
__device__ __host__ inline void getCoords3(int x[4], int cb_index, const int X[4], int parity)
{
x[3] = cb_index/(X[2]*X[1]*X[0]/2);
x[2] = (cb_index/(X[1]*X[0]/2)) % X[2];
x[1] = (cb_index/(X[0]/2)) % X[1];
x[0] = 2*(cb_index%(X[0]/2)) + ((x[3]+x[2]+x[1]+parity)&1);
return;
}
template<int blockSize, typename Float, typename Gauge>
__global__ void computePlaq(GaugePlaqArg<Gauge> arg){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= arg.threads) return;
typedef typename ComplexTypeId<Float>::Type Cmplx;
int parity = 0;
if(idx >= arg.threads/2) {
parity = 1;
idx -= arg.threads/2;
}
int X[4];
for(int dr=0; dr<4; ++dr) X[dr] = arg.X[dr];
int x[4];
getCoords3(x, idx, X, parity);
#ifdef MULTI_GPU
for(int dr=0; dr<4; ++dr) {
x[dr] += arg.border[dr];
X[dr] += 2*arg.border[dr];
}
#endif
double plaq = 0.;
int dx[4] = {0, 0, 0, 0};
for (int mu = 0; mu < 3; mu++) {
for (int nu = (mu+1); nu < 4; nu++) {
Matrix<Cmplx,3> U1, U2, U3, U4, tmpM;
arg.dataOr.load((Float*)(U1.data),linkIndex3(x,dx,X), mu, parity);
dx[mu]++;
arg.dataOr.load((Float*)(U2.data),linkIndex3(x,dx,X), nu, 1-parity);
dx[mu]--;
dx[nu]++;
arg.dataOr.load((Float*)(U3.data),linkIndex3(x,dx,X), mu, 1-parity);
dx[nu]--;
arg.dataOr.load((Float*)(U4.data),linkIndex3(x,dx,X), nu, parity);
tmpM = U1 * U2;
tmpM = tmpM * conj(U3);
tmpM = tmpM * conj(U4);
plaq += getTrace(tmpM).x;
}
}
typedef cub::BlockReduce<double, blockSize> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
double aggregate = BlockReduce(temp_storage).Sum(plaq);
if (threadIdx.x == 0) atomicAdd((double *) arg.plaq, aggregate);
}
template<typename Float, typename Gauge>
class GaugePlaq : Tunable {
GaugePlaqArg<Gauge> arg;
const QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return sizeof(Float); }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
GaugePlaq(GaugePlaqArg<Gauge> &arg, QudaFieldLocation location)
: arg(arg), location(location) {}
~GaugePlaq () { host_free(arg.plaq_h); }
void apply(const cudaStream_t &stream){
if(location == QUDA_CUDA_FIELD_LOCATION){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
LAUNCH_KERNEL(computePlaq, tp, stream, arg, Float, Gauge);
// cudaMemcpy(arg.plaq_h, arg.plaq, sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
#ifdef MULTI_GPU
comm_allreduce((double*) arg.plaq_h);
const int nNodes = comm_dim(0)*comm_dim(1)*comm_dim(2)*comm_dim(3);
((double *) arg.plaq_h)[0] /= 18.*(arg.threads*nNodes);
#else
((double *) arg.plaq_h)[0] /= 18.*arg.threads;
#endif
} else {
errorQuda("CPU not supported yet\n");
//computePlaqCPU(arg);
}
}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
aux << "threads=" << arg.threads << ",prec=" << sizeof(Float);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux.str().c_str());
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune(){}
void postTune(){}
long long flops() const { return (1)*6*arg.threads; }
long long bytes() const { return (1)*6*arg.threads*sizeof(Float); } // Only correct if there is no link reconstruction
};
template<typename Float, typename Gauge>
void plaquette(const Gauge dataOr, const GaugeField& data, QudaFieldLocation location, Float &plq) {
GaugePlaqArg<Gauge> arg(dataOr, data);
GaugePlaq<Float,Gauge> gaugePlaq(arg, location);
gaugePlaq.apply(0);
cudaDeviceSynchronize();
plq = ((double *) arg.plaq_h)[0];
}
template<typename Float>
Float plaquette(const GaugeField& data, QudaFieldLocation location) {
// Switching to FloatNOrder for the gauge field in order to support RECONSTRUCT_12
// Need to fix this!!
Float res;
if(data.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if(data.Reconstruct() == QUDA_RECONSTRUCT_NO) {
plaquette(FloatNOrder<Float, 18, 2, 18>(data), data, location, res);
} else if(data.Reconstruct() == QUDA_RECONSTRUCT_12){
plaquette(FloatNOrder<Float, 18, 2, 12>(data), data, location, res);
} else if(data.Reconstruct() == QUDA_RECONSTRUCT_8){
plaquette(FloatNOrder<Float, 18, 2, 8>(data), data, location, res);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else if(data.Order() == QUDA_FLOAT4_GAUGE_ORDER) {
if(data.Reconstruct() == QUDA_RECONSTRUCT_NO) {
plaquette(FloatNOrder<Float, 18, 4, 18>(data), data, location, res);
} else if(data.Reconstruct() == QUDA_RECONSTRUCT_12){
plaquette(FloatNOrder<Float, 18, 4, 12>(data), data, location, res);
} else if(data.Reconstruct() == QUDA_RECONSTRUCT_8){
plaquette(FloatNOrder<Float, 18, 4, 8>(data), data, location, res);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order\n");
}
return res;
}
#endif
double plaquette(const GaugeField& data, QudaFieldLocation location) {
#ifdef GPU_GAUGE_TOOLS
if(data.Precision() == QUDA_HALF_PRECISION) {
errorQuda("Half precision not supported\n");
}
if (data.Precision() == QUDA_SINGLE_PRECISION) {
return plaquette<float> (data, location);
} else if(data.Precision() == QUDA_DOUBLE_PRECISION) {
return plaquette<double>(data, location);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
#else
errorQuda("Gauge tools are not build");
#endif
}
}
|
35280091ad828eea05641d57759ca437c44399a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<math.h>
#include<time.h>
#include<stdexcept>
#include<iostream>
using namespace std;
int NUMBER_OF_ELEMENTS = 8;
int SIZE = NUMBER_OF_ELEMENTS*sizeof(int);
__global__ void kernel_sum( int* A, int* B, int* C, int NUMBERofELEMENTS);
void sum( int* A, int* B, int* C, int n_el);
int main()
{
//allocate memory for host vectors
int* hostA = (int*)malloc(SIZE);
int* hostB = (int*)malloc(SIZE);
int* hostC = (int*)malloc(SIZE);
int* deviceA,*deviceB,*deviceC;
srand(time(0));
int i;
for(i=0;i<NUMBER_OF_ELEMENTS;i++)
{
hostA[i] = rand();
hostB[i] = rand();
}
//allocate memory for device vectors
hipMalloc(&deviceA,SIZE);
hipMalloc(&deviceB,SIZE);
hipMalloc(&deviceC,SIZE);
//kernel function
hipMemcpy(deviceA,hostA,SIZE,hipMemcpyHostToDevice);
hipMemcpy(deviceB,hostB,SIZE,hipMemcpyHostToDevice);
sum(deviceA,deviceB,deviceC,NUMBER_OF_ELEMENTS);
hipMemcpy(hostC,deviceC,SIZE,hipMemcpyDeviceToHost);
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
double error = 0;
for(i = 0;i<NUMBER_OF_ELEMENTS;i++)
{
double diff = double((hostA[i]+hostB[i])-hostC[i]);
error+=diff;
cout<<"A+B = "<<hostA[i]+hostB[i]<<"\n";
cout<<"C = "<<hostC[i]<<"\n";
}
error = sqrt(error);
cout<<"error = "<<error<<endl;
delete[] hostA;
delete[] hostB;
delete[] hostC;
return hipDeviceSynchronize();
}
void sum( int* A, int* B, int* C, int n_el)
{
int threadsPerblock,blocksperGrid;
if(n_el<512)
{
threadsPerblock = n_el;
blocksperGrid = 1;
}
else
{
threadsPerblock = 512;
blocksperGrid = ceil(double(n_el)/double(threadsPerblock));
}
//now invoke kernel method
hipLaunchKernelGGL(( kernel_sum), dim3(blocksperGrid),dim3(threadsPerblock), 0, 0, A,B,C,n_el);
}
__global__ void kernel_sum( int* A, int* B, int* C, int NUMBERofELEMENTS)
{
//calculate unique thread index
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index<NUMBERofELEMENTS)
C[index] = A[index] + B[index];
} | 35280091ad828eea05641d57759ca437c44399a8.cu | #include<math.h>
#include<time.h>
#include<stdexcept>
#include<iostream>
using namespace std;
int NUMBER_OF_ELEMENTS = 8;
int SIZE = NUMBER_OF_ELEMENTS*sizeof(int);
__global__ void kernel_sum( int* A, int* B, int* C, int NUMBERofELEMENTS);
void sum( int* A, int* B, int* C, int n_el);
int main()
{
//allocate memory for host vectors
int* hostA = (int*)malloc(SIZE);
int* hostB = (int*)malloc(SIZE);
int* hostC = (int*)malloc(SIZE);
int* deviceA,*deviceB,*deviceC;
srand(time(0));
int i;
for(i=0;i<NUMBER_OF_ELEMENTS;i++)
{
hostA[i] = rand();
hostB[i] = rand();
}
//allocate memory for device vectors
cudaMalloc(&deviceA,SIZE);
cudaMalloc(&deviceB,SIZE);
cudaMalloc(&deviceC,SIZE);
//kernel function
cudaMemcpy(deviceA,hostA,SIZE,cudaMemcpyHostToDevice);
cudaMemcpy(deviceB,hostB,SIZE,cudaMemcpyHostToDevice);
sum(deviceA,deviceB,deviceC,NUMBER_OF_ELEMENTS);
cudaMemcpy(hostC,deviceC,SIZE,cudaMemcpyDeviceToHost);
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
double error = 0;
for(i = 0;i<NUMBER_OF_ELEMENTS;i++)
{
double diff = double((hostA[i]+hostB[i])-hostC[i]);
error+=diff;
cout<<"A+B = "<<hostA[i]+hostB[i]<<"\n";
cout<<"C = "<<hostC[i]<<"\n";
}
error = sqrt(error);
cout<<"error = "<<error<<endl;
delete[] hostA;
delete[] hostB;
delete[] hostC;
return cudaDeviceSynchronize();
}
void sum( int* A, int* B, int* C, int n_el)
{
int threadsPerblock,blocksperGrid;
if(n_el<512)
{
threadsPerblock = n_el;
blocksperGrid = 1;
}
else
{
threadsPerblock = 512;
blocksperGrid = ceil(double(n_el)/double(threadsPerblock));
}
//now invoke kernel method
kernel_sum<<<blocksperGrid,threadsPerblock>>>(A,B,C,n_el);
}
__global__ void kernel_sum( int* A, int* B, int* C, int NUMBERofELEMENTS)
{
//calculate unique thread index
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index<NUMBERofELEMENTS)
C[index] = A[index] + B[index];
} |
f48ac5b8ba074a91769b945162520e045ce2b911.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2016 by Contributors
* \file lsoftmax.cu
* \brief LSoftmax from <Large-Margin Softmax Loss for Convolutional Neural Networks>
* \author luoyetx
*/
#include "./lsoftmax-inl.h"
namespace mshadow {
namespace cuda {
namespace {
// workspace variables
enum LSoftmaxTempSpaceType {kCost, kCosmt, kK, kSin2t, kFo, kCostM};
}
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
MSHADOW_XINLINE int LSPowOfMO(const int k) {
return 1 - ((k&0x01) << 1);
}
template<typename DType>
__global__ void LSCalcNorm(const Tensor<gpu, 2, DType> x,
Tensor<gpu, 1, DType> x_norm) {
const int n = x.size(0);
const int m = x.size(1);
CUDA_KERNEL_LOOP(i, n) {
DType norm = 0;
for (int j = 0; j < m; ++j) {
norm += x[i][j] * x[i][j];
}
x_norm[i] = sqrt(norm);
}
}
template<typename DType>
__device__ int LSFindK(const DType *k_table, const int n, const DType cos_t) {
const DType eps = 1e-5;
for (int i = 0; i < n; ++i) {
if (((k_table[i+1] < cos_t) || (abs(k_table[i+1] - cos_t) < eps)) &&
((k_table[i] > cos_t) || (abs(k_table[i] - cos_t) < eps))) {
return i;
}
}
return 0;
}
template<typename DType>
__device__ DType LSCalcCosmt(const DType *c_table, const int n,
const DType cos_t, const int margin) {
const DType sin2_t = 1 - cos_t * cos_t;
DType cos_t_p = pow(cos_t, margin);
DType sin2_t_p = 1;
DType cos_mt = cos_t_p; // p = 0
for (int p = 1; p <= margin / 2; ++p) {
cos_t_p /= cos_t * cos_t; // don't replace `cos_t*cos_t` with `1-sin2_t`, this can cause numeric issue if cos_t --> 0
sin2_t_p *= sin2_t;
cos_mt += LSPowOfMO(p) * c_table[2*p] * cos_t_p * sin2_t_p;
}
return cos_mt;
}
template<typename DType>
__global__ void LSoftmaxForwardKernel(const Tensor<gpu, 2, DType> x,
const Tensor<gpu, 2, DType> w,
const Tensor<gpu, 1, DType> label,
const Tensor<gpu, 1, DType> x_norm,
const Tensor<gpu, 1, DType> w_norm,
Tensor<gpu, 2, DType> out,
const Tensor<gpu, 1, DType> k_table,
const Tensor<gpu, 1, DType> c_table,
const int margin,
const DType beta) {
const int n = x.size(0);
const int feature_dim = x.size(1);
const int m = w.size(0);
CUDA_KERNEL_LOOP(i, n) {
const int yi = static_cast<int>(label[i]);
const DType fo_i_yi = out[i][yi];
const DType cos_t = fo_i_yi / (x_norm[i] * w_norm[yi]);
const int k = LSFindK(k_table.dptr_, k_table.size(0), cos_t);
const DType cos_mt = LSCalcCosmt(c_table.dptr_, c_table.size(0), cos_t, margin);
const DType f_i_yi = (LSPowOfMO(k) * cos_mt - 2*k) * (w_norm[yi] * x_norm[i]);
out[i][yi] = (f_i_yi + beta * fo_i_yi) / (1 + beta);
}
}
template<typename DType>
inline void LSoftmaxForward(const Tensor<gpu, 2, DType> &x,
const Tensor<gpu, 2, DType> &w,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &out,
const Tensor<gpu, 1, DType> &x_norm,
const Tensor<gpu, 1, DType> &w_norm,
const Tensor<gpu, 1, DType> &k_table,
const Tensor<gpu, 1, DType> &c_table,
const int margin,
const DType beta) {
const int n = x.size(0);
const int m = w.size(0);
dim3 dimBlock(kBaseThreadNum);
dim3 dimGrid((n + kBaseThreadNum - 1) / kBaseThreadNum);
hipLaunchKernelGGL(( LSCalcNorm), dim3(dimGrid), dim3(dimBlock), 0, 0, x, x_norm);
dimGrid.x = ((m + kBaseThreadNum - 1) / kBaseThreadNum);
hipLaunchKernelGGL(( LSCalcNorm), dim3(dimGrid), dim3(dimBlock), 0, 0, w, w_norm);
dimGrid.x = ((n + kBaseThreadNum - 1) / kBaseThreadNum);
hipLaunchKernelGGL(( LSoftmaxForwardKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, x, w, label, x_norm, w_norm, out, k_table, c_table, margin, beta);
}
template<typename DType>
__global__ void LSoftmaxBackwardRequired(const Tensor<gpu, 2, DType> x,
const Tensor<gpu, 2, DType> w,
const Tensor<gpu, 1, DType> label,
const Tensor<gpu, 1, DType> x_norm,
const Tensor<gpu, 1, DType> w_norm,
Tensor<gpu, 2, DType> workspace,
const Tensor<gpu, 1, DType> k_table,
const Tensor<gpu, 1, DType> c_table,
const int margin) {
const int n = x.size(0);
const int feature_dim = x.size(1);
CUDA_KERNEL_LOOP(i, n) {
const int yi = static_cast<int>(label[i]);
// fo_i_yi = dot(w_yi, x_i)
DType fo_i_yi = 0;
for (int p = 0; p < feature_dim; ++p) {
fo_i_yi += w[yi][p] * x[i][p];
}
const DType cos_t = fo_i_yi / (x_norm[i] * w_norm[yi]);
const int k = LSFindK(k_table.dptr_, k_table.size(0), cos_t);
const DType cos_mt = LSCalcCosmt(c_table.dptr_, c_table.size(0), cos_t, margin);
const DType sin2_t = 1 - cos_t * cos_t;
workspace[kCost][i] = cos_t;
workspace[kCosmt][i] = cos_mt;
workspace[kK][i] = static_cast<DType>(k);
workspace[kSin2t][i] = sin2_t;
workspace[kFo][i] = fo_i_yi;
workspace[kCostM][i] = pow(cos_t, margin - 1);
}
}
template<typename DType>
__global__ void LSoftmaxBackwardXKernel(const Tensor<gpu, 2, DType> x,
const Tensor<gpu, 2, DType> w,
const Tensor<gpu, 1, DType> label,
const Tensor<gpu, 1, DType> x_norm,
const Tensor<gpu, 1, DType> w_norm,
const Tensor<gpu, 2, DType> o_grad,
Tensor<gpu, 2, DType> x_grad,
const Tensor<gpu, 2, DType> workspace,
const Tensor<gpu, 1, DType> c_table,
const int margin,
const DType beta) {
const int nthreads = x.size(0) * x.size(1);
const int feature_dim = x.size(1);
CUDA_KERNEL_LOOP(idx, nthreads) {
const int i = idx / feature_dim;
const int l = idx % feature_dim;
const int yi = static_cast<int>(label[i]);
const DType cos_t = workspace[kCost][i];
const DType cos_mt = workspace[kCosmt][i];
const int k = static_cast<int>(workspace[kK][i]);
const DType sin2_t = workspace[kSin2t][i];
const DType fo_i_yi = workspace[kFo][i];
const DType w_norm_yi = w_norm[yi];
const DType x_norm_i = x_norm[i];
const DType dcos_dx = w[yi][l] / (w_norm_yi * x_norm_i) - \
fo_i_yi * x[i][l] / (w_norm_yi * x_norm_i * x_norm_i * x_norm_i);
const DType dsin2_dx = -2 * cos_t * dcos_dx;
DType cos_t_p = workspace[kCostM][i];
DType sin2_t_p = 1;
DType dcosm_dx = margin * cos_t_p * dcos_dx; // p = 0
for (int p = 1; p <= margin / 2; ++p) {
cos_t_p /= cos_t * cos_t;
dcosm_dx += LSPowOfMO(p) * c_table[2*p] * (p * cos_t * dsin2_dx + \
(margin - 2*p) * sin2_t * dcos_dx) * cos_t_p * sin2_t_p;
sin2_t_p *= sin2_t;
}
const DType df_dx = (LSPowOfMO(k) * cos_mt - 2*k) * w_norm_yi / x_norm_i * x[i][l] + \
LSPowOfMO(k) * w_norm_yi * x_norm_i * dcosm_dx;
const DType alpha = 1 / (1 + beta);
x_grad[i][l] += alpha * o_grad[i][yi] * (df_dx - w[yi][l]);
}
}
template<typename DType>
__global__ void LSoftmaxBackwardWKernel(const Tensor<gpu, 2, DType> x,
const Tensor<gpu, 2, DType> w,
const Tensor<gpu, 1, DType> label,
const Tensor<gpu, 1, DType> x_norm,
const Tensor<gpu, 1, DType> w_norm,
const Tensor<gpu, 2, DType> o_grad,
Tensor<gpu, 2, DType> w_grad,
const Tensor<gpu, 2, DType> workspace,
const Tensor<gpu, 1, DType> c_table,
const int margin,
const DType beta) {
const int nthreads = w.size(0) * w.size(1);
const int n = x.size(0);
const int feature_dim = w.size(1);
CUDA_KERNEL_LOOP(idx, nthreads) {
const int j = idx / feature_dim;
const int l = idx % feature_dim;
DType dw = 0;
for (int i = 0; i < n; ++i) {
const int yi = static_cast<int>(label[i]);
if (yi == j) {
const DType cos_t = workspace[kCost][i];
const DType cos_mt = workspace[kCosmt][i];
const int k = static_cast<int>(workspace[kK][i]);
const DType sin2_t = workspace[kSin2t][i];
const DType fo_i_yi = workspace[kFo][i];
const DType x_norm_i = x_norm[i];
const DType w_norm_yi = w_norm[yi];
const DType dcos_dw = x[i][l] / (w_norm_yi * x_norm_i) - \
fo_i_yi * w[yi][l] / (x_norm_i * w_norm_yi * w_norm_yi * w_norm_yi);
const DType dsin2_dw = -2 * cos_t * dcos_dw;
DType cos_t_p = workspace[kCostM][i];
DType sin2_t_p = 1;
DType dcosm_dw = margin * cos_t_p * dcos_dw; // p = 0
for (int p = 1; p <= margin / 2; ++p) {
cos_t_p /= cos_t * cos_t;
dcosm_dw += LSPowOfMO(p) * c_table[2*p] * (p * cos_t * dsin2_dw + \
(margin - 2*p) * sin2_t * dcos_dw) * cos_t_p * sin2_t_p;
sin2_t_p *= sin2_t;
}
const DType df_dw_j = (LSPowOfMO(k) * cos_mt - 2*k) * x_norm_i / w_norm_yi * w[yi][l] + \
LSPowOfMO(k) * w_norm_yi * x_norm_i * dcosm_dw;
dw += o_grad[i][yi] * (df_dw_j - x[i][l]);
}
}
const DType alpha = 1 / (1 + beta);
w_grad[j][l] += alpha * dw;
}
}
template<typename DType>
inline void LSoftmaxBackward(const Tensor<gpu, 2, DType> &x,
const Tensor<gpu, 2, DType> &w,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 1, DType> &x_norm,
const Tensor<gpu, 1, DType> &w_norm,
const Tensor<gpu, 2, DType> &o_grad,
const Tensor<gpu, 2, DType> &x_grad,
const Tensor<gpu, 2, DType> &w_grad,
const Tensor<gpu, 2, DType> &workspace,
const Tensor<gpu, 1, DType> &k_table,
const Tensor<gpu, 1, DType> &c_table,
const int margin,
const DType beta) {
const int n = x.size(0);
const int feature_dim = x.size(1);
const int m = w.size(0);
dim3 dimBlock(kBaseThreadNum);
dim3 dimGrid((n + kBaseThreadNum - 1) / kBaseThreadNum);
hipLaunchKernelGGL(( LSoftmaxBackwardRequired), dim3(dimGrid), dim3(dimBlock), 0, 0, x, w, label, x_norm, w_norm, workspace,
k_table, c_table, margin);
dimGrid.x = ((n * feature_dim + kBaseThreadNum - 1) / kBaseThreadNum);
hipLaunchKernelGGL(( LSoftmaxBackwardXKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, x, w, label, x_norm, w_norm, o_grad, x_grad, workspace,
c_table, margin, beta);
dimGrid.x = ((m * feature_dim + kBaseThreadNum - 1) / kBaseThreadNum);
hipLaunchKernelGGL(( LSoftmaxBackwardWKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, x, w, label, x_norm, w_norm, o_grad, w_grad, workspace,
c_table, margin, beta);
}
} // namespace cuda
template<typename DType>
inline void LSoftmaxForward(const Tensor<gpu, 2, DType> &x,
const Tensor<gpu, 2, DType> &w,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &out,
const Tensor<gpu, 1, DType> &x_norm,
const Tensor<gpu, 1, DType> &w_norm,
const Tensor<gpu, 1, DType> &k_table,
const Tensor<gpu, 1, DType> &c_table,
const int margin,
const DType beta) {
cuda::LSoftmaxForward(x, w, label, out, x_norm, w_norm,
k_table, c_table, margin, beta);
}
template<typename DType>
inline void LSoftmaxBackward(const Tensor<gpu, 2, DType> &x,
const Tensor<gpu, 2, DType> &w,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 1, DType> &x_norm,
const Tensor<gpu, 1, DType> &w_norm,
const Tensor<gpu, 2, DType> &o_grad,
const Tensor<gpu, 2, DType> &x_grad,
const Tensor<gpu, 2, DType> &w_grad,
const Tensor<gpu, 2, DType> &workspace,
const Tensor<gpu, 1, DType> &k_table,
const Tensor<gpu, 1, DType> &c_table,
const int margin,
const DType beta) {
cuda::LSoftmaxBackward(x, w, label, x_norm, w_norm, o_grad, x_grad, w_grad, workspace,
k_table, c_table, margin, beta);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator *CreateOp<gpu>(LSoftmaxParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new LSoftmaxOp<gpu, DType>(param);
})
return op;
}
} // namespace op
} // namespace mxnet
| f48ac5b8ba074a91769b945162520e045ce2b911.cu | /*!
* Copyright (c) 2016 by Contributors
* \file lsoftmax.cu
* \brief LSoftmax from <Large-Margin Softmax Loss for Convolutional Neural Networks>
* \author luoyetx
*/
#include "./lsoftmax-inl.h"
namespace mshadow {
namespace cuda {
namespace {
// workspace variables
enum LSoftmaxTempSpaceType {kCost, kCosmt, kK, kSin2t, kFo, kCostM};
}
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
MSHADOW_XINLINE int LSPowOfMO(const int k) {
return 1 - ((k&0x01) << 1);
}
template<typename DType>
__global__ void LSCalcNorm(const Tensor<gpu, 2, DType> x,
Tensor<gpu, 1, DType> x_norm) {
const int n = x.size(0);
const int m = x.size(1);
CUDA_KERNEL_LOOP(i, n) {
DType norm = 0;
for (int j = 0; j < m; ++j) {
norm += x[i][j] * x[i][j];
}
x_norm[i] = sqrt(norm);
}
}
template<typename DType>
__device__ int LSFindK(const DType *k_table, const int n, const DType cos_t) {
const DType eps = 1e-5;
for (int i = 0; i < n; ++i) {
if (((k_table[i+1] < cos_t) || (abs(k_table[i+1] - cos_t) < eps)) &&
((k_table[i] > cos_t) || (abs(k_table[i] - cos_t) < eps))) {
return i;
}
}
return 0;
}
template<typename DType>
__device__ DType LSCalcCosmt(const DType *c_table, const int n,
const DType cos_t, const int margin) {
const DType sin2_t = 1 - cos_t * cos_t;
DType cos_t_p = pow(cos_t, margin);
DType sin2_t_p = 1;
DType cos_mt = cos_t_p; // p = 0
for (int p = 1; p <= margin / 2; ++p) {
cos_t_p /= cos_t * cos_t; // don't replace `cos_t*cos_t` with `1-sin2_t`, this can cause numeric issue if cos_t --> 0
sin2_t_p *= sin2_t;
cos_mt += LSPowOfMO(p) * c_table[2*p] * cos_t_p * sin2_t_p;
}
return cos_mt;
}
template<typename DType>
__global__ void LSoftmaxForwardKernel(const Tensor<gpu, 2, DType> x,
const Tensor<gpu, 2, DType> w,
const Tensor<gpu, 1, DType> label,
const Tensor<gpu, 1, DType> x_norm,
const Tensor<gpu, 1, DType> w_norm,
Tensor<gpu, 2, DType> out,
const Tensor<gpu, 1, DType> k_table,
const Tensor<gpu, 1, DType> c_table,
const int margin,
const DType beta) {
const int n = x.size(0);
const int feature_dim = x.size(1);
const int m = w.size(0);
CUDA_KERNEL_LOOP(i, n) {
const int yi = static_cast<int>(label[i]);
const DType fo_i_yi = out[i][yi];
const DType cos_t = fo_i_yi / (x_norm[i] * w_norm[yi]);
const int k = LSFindK(k_table.dptr_, k_table.size(0), cos_t);
const DType cos_mt = LSCalcCosmt(c_table.dptr_, c_table.size(0), cos_t, margin);
const DType f_i_yi = (LSPowOfMO(k) * cos_mt - 2*k) * (w_norm[yi] * x_norm[i]);
out[i][yi] = (f_i_yi + beta * fo_i_yi) / (1 + beta);
}
}
template<typename DType>
inline void LSoftmaxForward(const Tensor<gpu, 2, DType> &x,
const Tensor<gpu, 2, DType> &w,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &out,
const Tensor<gpu, 1, DType> &x_norm,
const Tensor<gpu, 1, DType> &w_norm,
const Tensor<gpu, 1, DType> &k_table,
const Tensor<gpu, 1, DType> &c_table,
const int margin,
const DType beta) {
const int n = x.size(0);
const int m = w.size(0);
dim3 dimBlock(kBaseThreadNum);
dim3 dimGrid((n + kBaseThreadNum - 1) / kBaseThreadNum);
LSCalcNorm<<<dimGrid, dimBlock>>>(x, x_norm);
dimGrid.x = ((m + kBaseThreadNum - 1) / kBaseThreadNum);
LSCalcNorm<<<dimGrid, dimBlock>>>(w, w_norm);
dimGrid.x = ((n + kBaseThreadNum - 1) / kBaseThreadNum);
LSoftmaxForwardKernel<<<dimGrid, dimBlock>>>(x, w, label, x_norm, w_norm, out, k_table, c_table, margin, beta);
}
template<typename DType>
__global__ void LSoftmaxBackwardRequired(const Tensor<gpu, 2, DType> x,
const Tensor<gpu, 2, DType> w,
const Tensor<gpu, 1, DType> label,
const Tensor<gpu, 1, DType> x_norm,
const Tensor<gpu, 1, DType> w_norm,
Tensor<gpu, 2, DType> workspace,
const Tensor<gpu, 1, DType> k_table,
const Tensor<gpu, 1, DType> c_table,
const int margin) {
const int n = x.size(0);
const int feature_dim = x.size(1);
CUDA_KERNEL_LOOP(i, n) {
const int yi = static_cast<int>(label[i]);
// fo_i_yi = dot(w_yi, x_i)
DType fo_i_yi = 0;
for (int p = 0; p < feature_dim; ++p) {
fo_i_yi += w[yi][p] * x[i][p];
}
const DType cos_t = fo_i_yi / (x_norm[i] * w_norm[yi]);
const int k = LSFindK(k_table.dptr_, k_table.size(0), cos_t);
const DType cos_mt = LSCalcCosmt(c_table.dptr_, c_table.size(0), cos_t, margin);
const DType sin2_t = 1 - cos_t * cos_t;
workspace[kCost][i] = cos_t;
workspace[kCosmt][i] = cos_mt;
workspace[kK][i] = static_cast<DType>(k);
workspace[kSin2t][i] = sin2_t;
workspace[kFo][i] = fo_i_yi;
workspace[kCostM][i] = pow(cos_t, margin - 1);
}
}
template<typename DType>
__global__ void LSoftmaxBackwardXKernel(const Tensor<gpu, 2, DType> x,
const Tensor<gpu, 2, DType> w,
const Tensor<gpu, 1, DType> label,
const Tensor<gpu, 1, DType> x_norm,
const Tensor<gpu, 1, DType> w_norm,
const Tensor<gpu, 2, DType> o_grad,
Tensor<gpu, 2, DType> x_grad,
const Tensor<gpu, 2, DType> workspace,
const Tensor<gpu, 1, DType> c_table,
const int margin,
const DType beta) {
const int nthreads = x.size(0) * x.size(1);
const int feature_dim = x.size(1);
CUDA_KERNEL_LOOP(idx, nthreads) {
const int i = idx / feature_dim;
const int l = idx % feature_dim;
const int yi = static_cast<int>(label[i]);
const DType cos_t = workspace[kCost][i];
const DType cos_mt = workspace[kCosmt][i];
const int k = static_cast<int>(workspace[kK][i]);
const DType sin2_t = workspace[kSin2t][i];
const DType fo_i_yi = workspace[kFo][i];
const DType w_norm_yi = w_norm[yi];
const DType x_norm_i = x_norm[i];
const DType dcos_dx = w[yi][l] / (w_norm_yi * x_norm_i) - \
fo_i_yi * x[i][l] / (w_norm_yi * x_norm_i * x_norm_i * x_norm_i);
const DType dsin2_dx = -2 * cos_t * dcos_dx;
DType cos_t_p = workspace[kCostM][i];
DType sin2_t_p = 1;
DType dcosm_dx = margin * cos_t_p * dcos_dx; // p = 0
for (int p = 1; p <= margin / 2; ++p) {
cos_t_p /= cos_t * cos_t;
dcosm_dx += LSPowOfMO(p) * c_table[2*p] * (p * cos_t * dsin2_dx + \
(margin - 2*p) * sin2_t * dcos_dx) * cos_t_p * sin2_t_p;
sin2_t_p *= sin2_t;
}
const DType df_dx = (LSPowOfMO(k) * cos_mt - 2*k) * w_norm_yi / x_norm_i * x[i][l] + \
LSPowOfMO(k) * w_norm_yi * x_norm_i * dcosm_dx;
const DType alpha = 1 / (1 + beta);
x_grad[i][l] += alpha * o_grad[i][yi] * (df_dx - w[yi][l]);
}
}
template<typename DType>
__global__ void LSoftmaxBackwardWKernel(const Tensor<gpu, 2, DType> x,
const Tensor<gpu, 2, DType> w,
const Tensor<gpu, 1, DType> label,
const Tensor<gpu, 1, DType> x_norm,
const Tensor<gpu, 1, DType> w_norm,
const Tensor<gpu, 2, DType> o_grad,
Tensor<gpu, 2, DType> w_grad,
const Tensor<gpu, 2, DType> workspace,
const Tensor<gpu, 1, DType> c_table,
const int margin,
const DType beta) {
const int nthreads = w.size(0) * w.size(1);
const int n = x.size(0);
const int feature_dim = w.size(1);
CUDA_KERNEL_LOOP(idx, nthreads) {
const int j = idx / feature_dim;
const int l = idx % feature_dim;
DType dw = 0;
for (int i = 0; i < n; ++i) {
const int yi = static_cast<int>(label[i]);
if (yi == j) {
const DType cos_t = workspace[kCost][i];
const DType cos_mt = workspace[kCosmt][i];
const int k = static_cast<int>(workspace[kK][i]);
const DType sin2_t = workspace[kSin2t][i];
const DType fo_i_yi = workspace[kFo][i];
const DType x_norm_i = x_norm[i];
const DType w_norm_yi = w_norm[yi];
const DType dcos_dw = x[i][l] / (w_norm_yi * x_norm_i) - \
fo_i_yi * w[yi][l] / (x_norm_i * w_norm_yi * w_norm_yi * w_norm_yi);
const DType dsin2_dw = -2 * cos_t * dcos_dw;
DType cos_t_p = workspace[kCostM][i];
DType sin2_t_p = 1;
DType dcosm_dw = margin * cos_t_p * dcos_dw; // p = 0
for (int p = 1; p <= margin / 2; ++p) {
cos_t_p /= cos_t * cos_t;
dcosm_dw += LSPowOfMO(p) * c_table[2*p] * (p * cos_t * dsin2_dw + \
(margin - 2*p) * sin2_t * dcos_dw) * cos_t_p * sin2_t_p;
sin2_t_p *= sin2_t;
}
const DType df_dw_j = (LSPowOfMO(k) * cos_mt - 2*k) * x_norm_i / w_norm_yi * w[yi][l] + \
LSPowOfMO(k) * w_norm_yi * x_norm_i * dcosm_dw;
dw += o_grad[i][yi] * (df_dw_j - x[i][l]);
}
}
const DType alpha = 1 / (1 + beta);
w_grad[j][l] += alpha * dw;
}
}
template<typename DType>
inline void LSoftmaxBackward(const Tensor<gpu, 2, DType> &x,
const Tensor<gpu, 2, DType> &w,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 1, DType> &x_norm,
const Tensor<gpu, 1, DType> &w_norm,
const Tensor<gpu, 2, DType> &o_grad,
const Tensor<gpu, 2, DType> &x_grad,
const Tensor<gpu, 2, DType> &w_grad,
const Tensor<gpu, 2, DType> &workspace,
const Tensor<gpu, 1, DType> &k_table,
const Tensor<gpu, 1, DType> &c_table,
const int margin,
const DType beta) {
const int n = x.size(0);
const int feature_dim = x.size(1);
const int m = w.size(0);
dim3 dimBlock(kBaseThreadNum);
dim3 dimGrid((n + kBaseThreadNum - 1) / kBaseThreadNum);
LSoftmaxBackwardRequired<<<dimGrid, dimBlock>>>(x, w, label, x_norm, w_norm, workspace,
k_table, c_table, margin);
dimGrid.x = ((n * feature_dim + kBaseThreadNum - 1) / kBaseThreadNum);
LSoftmaxBackwardXKernel<<<dimGrid, dimBlock>>>(x, w, label, x_norm, w_norm, o_grad, x_grad, workspace,
c_table, margin, beta);
dimGrid.x = ((m * feature_dim + kBaseThreadNum - 1) / kBaseThreadNum);
LSoftmaxBackwardWKernel<<<dimGrid, dimBlock>>>(x, w, label, x_norm, w_norm, o_grad, w_grad, workspace,
c_table, margin, beta);
}
} // namespace cuda
template<typename DType>
inline void LSoftmaxForward(const Tensor<gpu, 2, DType> &x,
const Tensor<gpu, 2, DType> &w,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &out,
const Tensor<gpu, 1, DType> &x_norm,
const Tensor<gpu, 1, DType> &w_norm,
const Tensor<gpu, 1, DType> &k_table,
const Tensor<gpu, 1, DType> &c_table,
const int margin,
const DType beta) {
cuda::LSoftmaxForward(x, w, label, out, x_norm, w_norm,
k_table, c_table, margin, beta);
}
template<typename DType>
inline void LSoftmaxBackward(const Tensor<gpu, 2, DType> &x,
const Tensor<gpu, 2, DType> &w,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 1, DType> &x_norm,
const Tensor<gpu, 1, DType> &w_norm,
const Tensor<gpu, 2, DType> &o_grad,
const Tensor<gpu, 2, DType> &x_grad,
const Tensor<gpu, 2, DType> &w_grad,
const Tensor<gpu, 2, DType> &workspace,
const Tensor<gpu, 1, DType> &k_table,
const Tensor<gpu, 1, DType> &c_table,
const int margin,
const DType beta) {
cuda::LSoftmaxBackward(x, w, label, x_norm, w_norm, o_grad, x_grad, w_grad, workspace,
k_table, c_table, margin, beta);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator *CreateOp<gpu>(LSoftmaxParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new LSoftmaxOp<gpu, DType>(param);
})
return op;
}
} // namespace op
} // namespace mxnet
|
f2d9f29bbbb021c3c0fe73a109c473481361ca9e.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <time.h>
#include <Windows.h>
/* After checking in many different forums, we have discovered that for some reason
Visual Studio Intellisense does not recognize the <<< >>> nomenclature, so we
have been forced to implement a macro to change that for KERNEL_ARGS2 and so
as it can be seen in the following lines.
We know that the right way is to directly use the <<< >>> but we had no option.
Hope you understand :)
*/
#ifdef __HIPCC__
#define KERNEL_ARGS2(grid,hipLaunchKernelGGL(( block)) , dim3(grid), dim3(block) , 0, 0,
#define KERNEL_ARGS3grid, block,hipLaunchKernelGGL(( sh_mem)) , dim3(grid), dim3(block), sh_mem , 0,
#define KERNEL_ARGS4grid, block, sh_mem,hipLaunchKernelGGL(( stream)) , dim3(grid), dim3(block), sh_mem, stream ,
#else
#define KERNEL_ARGS2grid, block)
#define KERNEL_ARGS3(grid, block, sh_mem)
#define KERNEL_ARGS4(grid, block, sh_mem, stream)
#endif
#define TPB 1024
#define NUM_PARTICLES 100000
#define NUM_ITERATIONS 1000
struct Particle
{
float3 position;
float3 velocity;
};
__global__ void kernelParticlesUpdate(int len, Particle* d_particleArray)
{
// We calculate the id with the general way although is just one block of threads
// and could be easily obtained with threadIdx.x
const int i = blockIdx.x * blockDim.x + threadIdx.x;
float dt = 1;
if (i < len)
{
// Velocity update
d_particleArray[i].velocity.x *= 0.5 * i;
d_particleArray[i].velocity.y *= 2 * i;
d_particleArray[i].velocity.z *= 0.75 * i;
// Position update
d_particleArray[i].position.x += d_particleArray[i].velocity.x * dt;
d_particleArray[i].position.y += d_particleArray[i].velocity.y * dt;
d_particleArray[i].position.z += d_particleArray[i].velocity.z * dt;
}
}
void cpuParticlesUpdate(Particle* particleArray)
{
int i;
for (i = 0; i < NUM_PARTICLES; i++)
{
float dt = 1;
// Velocity update
particleArray[i].velocity.x *= (float)0.5 * i;
particleArray[i].velocity.y *= (float)2 * i;
particleArray[i].velocity.z *= (float)0.75 * i;
// Position update
particleArray[i].position.x += particleArray[i].velocity.x * dt;
particleArray[i].position.y += particleArray[i].velocity.y * dt;
particleArray[i].position.z += particleArray[i].velocity.z * dt;
}
}
int gettimeofday(struct timeval* tv, struct timezone* tz)
{
static LONGLONG birthunixhnsec = 116444736000000000; /*in units of 100 ns */
FILETIME systemtime;
GetSystemTimeAsFileTime(&systemtime);
ULARGE_INTEGER utime;
utime.LowPart = systemtime.dwLowDateTime;
utime.HighPart = systemtime.dwHighDateTime;
ULARGE_INTEGER birthunix;
birthunix.LowPart = (DWORD)birthunixhnsec;
birthunix.HighPart = birthunixhnsec >> 32;
LONGLONG usecs;
usecs = (LONGLONG)((utime.QuadPart - birthunix.QuadPart) / 10);
tv->tv_sec = (long long)(usecs / 1000000);
tv->tv_usec = (long long)(usecs % 1000000);
return 0;
}
bool compareArrays(Particle* d_particleArrayRes, Particle* particleArray)
{
int i;
for (i = 0; i < NUM_PARTICLES; i++)
{
if (d_particleArrayRes[i].velocity.x != particleArray[i].velocity.x ||
d_particleArrayRes[i].velocity.y != particleArray[i].velocity.y ||
d_particleArrayRes[i].velocity.z != particleArray[i].velocity.z ||
d_particleArrayRes[i].position.x != particleArray[i].position.x ||
d_particleArrayRes[i].position.y != particleArray[i].position.y ||
d_particleArrayRes[i].position.z != particleArray[i].position.z)
return false;
}
return true;
}
int main()
{
printf("\nLets run exercise 3! \n\n");
printf("\nNUM_PARTICLES: %d ", NUM_PARTICLES);
printf("\nNUM_ITERATIONS: %d ", NUM_ITERATIONS);
printf("\nThreads per block: %d ", TPB);
printf("\nNumber of thread blocks: %d \n\n", (NUM_PARTICLES + TPB - 1) / TPB);
//Particle* d_particleArray;
Particle* particleArray;
// We ask for pinned memory allocation
if(hipMallocManaged(&particleArray, NUM_PARTICLES * sizeof(Particle) == NULL))
{
// We check for errors after requesting CUDA unified memory allocation
printf("\n\nERROR 1! fail when allocating CUDA unified memory!\n\n");
return 1;
}
for (int i = 0; i < NUM_PARTICLES; i++)
{
particleArray[i].position = make_float3(rand(), rand(), rand());
particleArray[i].velocity = make_float3(rand(), rand(), rand());
}
// Let's measure the time
struct timeval tStart;
struct timeval tEnd;
printf("Computing GPU Particles Update... \n");
// We start the timer
gettimeofday(&tStart, NULL);
// Launch kernel to print hello worlds with Ids
for (int i = 0; i < NUM_ITERATIONS; i++)
{
// Copy the particles data from the CPU into the GPU pinned memory (NOT WITH MANAGED UNIFIED MEMORY)
//hipMemcpy(d_particleArray, particleArray, NUM_PARTICLES * sizeof(Particle), hipMemcpyHostToDevice);
// Then, process the particles data in the GPU
kernelParticlesUpdate KERNEL_ARGS2((NUM_PARTICLES + TPB - 1) / TPB, TPB)(NUM_PARTICLES, particleArray);
hipDeviceSynchronize();
// Copy the particles data from the GPU into the CPU (NOT WITH MANAGED UNIFIED MEMORY)
//hipMemcpy(particleArray, d_particleArray, NUM_PARTICLES * sizeof(Particle), hipMemcpyDeviceToHost);
}
// We wait for the GPU
hipDeviceSynchronize();
// We stop the timer...
gettimeofday(&tEnd, NULL);
// And finally print the timer
printf("GPU Particles Update completed in %3.10f miliseconds \n", ((tEnd.tv_sec - tStart.tv_sec) * 1000000.0 + (tEnd.tv_usec - tStart.tv_usec)) / 1000.0);
//Particle* d_particleArrayRes = (Particle*)malloc(NUM_PARTICLES * sizeof(Particle));
//if (d_particleArrayRes == NULL) { printf("\n\nERROR 2! fail when allocating cuda dynamic memory!\n\n"); return 1; }
//hipMemcpy(d_particleArrayRes, d_particleArray, NUM_PARTICLES * sizeof(Particle), hipMemcpyDeviceToHost);
// We comment this since we are not interested in CPU calculating it anymore
/*
// Now let's go with the CPU Saxpy
printf("\nComputing CPU Particles Update... \n");
// We restart the timer
gettimeofday(&tStart, NULL);
// The CPU Saxpy is computed
for (int i = 0; i < NUM_ITERATIONS; i++)
{
cpuParticlesUpdate(particleArray);
}
// And then we stop the timer again
gettimeofday(&tEnd, NULL);
// Finally, we print the difference between the start time and the finished time
printf("CPU Particles Update completed in %3.10f miliseconds \n", ((tEnd.tv_sec - tStart.tv_sec) * 1000000.0 + (tEnd.tv_usec - tStart.tv_usec)) / 1000.0);
*/
// We comment this as we are not comparing CPU and GPU
/*
if (compareArrays(d_particleArrayRes, particleArray))
{
printf("\n\nBoth methods ARE equivalent since the resultant arrays HAVE the same data!\n\n");
}
else {
printf("\n\nMethods ARE NOT equivalent because the resultant arrays HAVE NOT the same data!\n\n");
}*/
// Free resources of the 2 arrays that were dynamically allocated
//hipHostFree(d_particleArray);
free(particleArray);
printf("\nExcercise 3 completed! \n");
return 0;
}
| f2d9f29bbbb021c3c0fe73a109c473481361ca9e.cu | #pragma once
#include <stdio.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <time.h>
#include <Windows.h>
/* After checking in many different forums, we have discovered that for some reason
Visual Studio Intellisense does not recognize the <<< >>> nomenclature, so we
have been forced to implement a macro to change that for KERNEL_ARGS2 and so
as it can be seen in the following lines.
We know that the right way is to directly use the <<< >>> but we had no option.
Hope you understand :)
*/
#ifdef __CUDACC__
#define KERNEL_ARGS2(grid, block) <<< grid, block >>>
#define KERNEL_ARGS3(grid, block, sh_mem) <<< grid, block, sh_mem >>>
#define KERNEL_ARGS4(grid, block, sh_mem, stream) <<< grid, block, sh_mem, stream >>>
#else
#define KERNEL_ARGS2(grid, block)
#define KERNEL_ARGS3(grid, block, sh_mem)
#define KERNEL_ARGS4(grid, block, sh_mem, stream)
#endif
#define TPB 1024
#define NUM_PARTICLES 100000
#define NUM_ITERATIONS 1000
struct Particle
{
float3 position;
float3 velocity;
};
__global__ void kernelParticlesUpdate(int len, Particle* d_particleArray)
{
// We calculate the id with the general way although is just one block of threads
// and could be easily obtained with threadIdx.x
const int i = blockIdx.x * blockDim.x + threadIdx.x;
float dt = 1;
if (i < len)
{
// Velocity update
d_particleArray[i].velocity.x *= 0.5 * i;
d_particleArray[i].velocity.y *= 2 * i;
d_particleArray[i].velocity.z *= 0.75 * i;
// Position update
d_particleArray[i].position.x += d_particleArray[i].velocity.x * dt;
d_particleArray[i].position.y += d_particleArray[i].velocity.y * dt;
d_particleArray[i].position.z += d_particleArray[i].velocity.z * dt;
}
}
void cpuParticlesUpdate(Particle* particleArray)
{
int i;
for (i = 0; i < NUM_PARTICLES; i++)
{
float dt = 1;
// Velocity update
particleArray[i].velocity.x *= (float)0.5 * i;
particleArray[i].velocity.y *= (float)2 * i;
particleArray[i].velocity.z *= (float)0.75 * i;
// Position update
particleArray[i].position.x += particleArray[i].velocity.x * dt;
particleArray[i].position.y += particleArray[i].velocity.y * dt;
particleArray[i].position.z += particleArray[i].velocity.z * dt;
}
}
int gettimeofday(struct timeval* tv, struct timezone* tz)
{
static LONGLONG birthunixhnsec = 116444736000000000; /*in units of 100 ns */
FILETIME systemtime;
GetSystemTimeAsFileTime(&systemtime);
ULARGE_INTEGER utime;
utime.LowPart = systemtime.dwLowDateTime;
utime.HighPart = systemtime.dwHighDateTime;
ULARGE_INTEGER birthunix;
birthunix.LowPart = (DWORD)birthunixhnsec;
birthunix.HighPart = birthunixhnsec >> 32;
LONGLONG usecs;
usecs = (LONGLONG)((utime.QuadPart - birthunix.QuadPart) / 10);
tv->tv_sec = (long long)(usecs / 1000000);
tv->tv_usec = (long long)(usecs % 1000000);
return 0;
}
bool compareArrays(Particle* d_particleArrayRes, Particle* particleArray)
{
int i;
for (i = 0; i < NUM_PARTICLES; i++)
{
if (d_particleArrayRes[i].velocity.x != particleArray[i].velocity.x ||
d_particleArrayRes[i].velocity.y != particleArray[i].velocity.y ||
d_particleArrayRes[i].velocity.z != particleArray[i].velocity.z ||
d_particleArrayRes[i].position.x != particleArray[i].position.x ||
d_particleArrayRes[i].position.y != particleArray[i].position.y ||
d_particleArrayRes[i].position.z != particleArray[i].position.z)
return false;
}
return true;
}
int main()
{
printf("\nLet´s run exercise 3! \n\n");
printf("\nNUM_PARTICLES: %d ", NUM_PARTICLES);
printf("\nNUM_ITERATIONS: %d ", NUM_ITERATIONS);
printf("\nThreads per block: %d ", TPB);
printf("\nNumber of thread blocks: %d \n\n", (NUM_PARTICLES + TPB - 1) / TPB);
//Particle* d_particleArray;
Particle* particleArray;
// We ask for pinned memory allocation
if(cudaMallocManaged(&particleArray, NUM_PARTICLES * sizeof(Particle) == NULL))
{
// We check for errors after requesting CUDA unified memory allocation
printf("\n\nERROR 1! fail when allocating CUDA unified memory!\n\n");
return 1;
}
for (int i = 0; i < NUM_PARTICLES; i++)
{
particleArray[i].position = make_float3(rand(), rand(), rand());
particleArray[i].velocity = make_float3(rand(), rand(), rand());
}
// Let's measure the time
struct timeval tStart;
struct timeval tEnd;
printf("Computing GPU Particles Update... \n");
// We start the timer
gettimeofday(&tStart, NULL);
// Launch kernel to print hello worlds with Ids
for (int i = 0; i < NUM_ITERATIONS; i++)
{
// Copy the particles data from the CPU into the GPU pinned memory (NOT WITH MANAGED UNIFIED MEMORY)
//cudaMemcpy(d_particleArray, particleArray, NUM_PARTICLES * sizeof(Particle), cudaMemcpyHostToDevice);
// Then, process the particles data in the GPU
kernelParticlesUpdate KERNEL_ARGS2((NUM_PARTICLES + TPB - 1) / TPB, TPB)(NUM_PARTICLES, particleArray);
cudaDeviceSynchronize();
// Copy the particles data from the GPU into the CPU (NOT WITH MANAGED UNIFIED MEMORY)
//cudaMemcpy(particleArray, d_particleArray, NUM_PARTICLES * sizeof(Particle), cudaMemcpyDeviceToHost);
}
// We wait for the GPU
cudaDeviceSynchronize();
// We stop the timer...
gettimeofday(&tEnd, NULL);
// And finally print the timer
printf("GPU Particles Update completed in %3.10f miliseconds \n", ((tEnd.tv_sec - tStart.tv_sec) * 1000000.0 + (tEnd.tv_usec - tStart.tv_usec)) / 1000.0);
//Particle* d_particleArrayRes = (Particle*)malloc(NUM_PARTICLES * sizeof(Particle));
//if (d_particleArrayRes == NULL) { printf("\n\nERROR 2! fail when allocating cuda dynamic memory!\n\n"); return 1; }
//cudaMemcpy(d_particleArrayRes, d_particleArray, NUM_PARTICLES * sizeof(Particle), cudaMemcpyDeviceToHost);
// We comment this since we are not interested in CPU calculating it anymore
/*
// Now let's go with the CPU Saxpy
printf("\nComputing CPU Particles Update... \n");
// We restart the timer
gettimeofday(&tStart, NULL);
// The CPU Saxpy is computed
for (int i = 0; i < NUM_ITERATIONS; i++)
{
cpuParticlesUpdate(particleArray);
}
// And then we stop the timer again
gettimeofday(&tEnd, NULL);
// Finally, we print the difference between the start time and the finished time
printf("CPU Particles Update completed in %3.10f miliseconds \n", ((tEnd.tv_sec - tStart.tv_sec) * 1000000.0 + (tEnd.tv_usec - tStart.tv_usec)) / 1000.0);
*/
// We comment this as we are not comparing CPU and GPU
/*
if (compareArrays(d_particleArrayRes, particleArray))
{
printf("\n\nBoth methods ARE equivalent since the resultant arrays HAVE the same data!\n\n");
}
else {
printf("\n\nMethods ARE NOT equivalent because the resultant arrays HAVE NOT the same data!\n\n");
}*/
// Free resources of the 2 arrays that were dynamically allocated
//cudaFreeHost(d_particleArray);
free(particleArray);
printf("\nExcercise 3 completed! \n");
return 0;
}
|
13f9e41082381dbf8676cbc17d0e1b17681b26c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __FACE_DETECTION_MAIN
#define __FACE_DETECTION_MAIN
#include "../easypng/png.h"
#include "../util/png2arrays.h"
#include "kernels.hip"
void do_face_detection_cuda(float * r, float * g, float * b, int width, int height) {
int size = width * height;
hipError_t cuda_ret;
//allocate the device input arrays
float *r_dev, *r_gauss_dev;
float *g_dev, *g_gauss_dev;
float *b_dev, *b_gauss_dev;
hipMalloc((void**) &r_dev, size * sizeof(float));
hipMalloc((void**) &g_dev, size * sizeof(float));
hipMalloc((void**) &b_dev, size * sizeof(float));
hipMalloc((void**) &r_gauss_dev, size * sizeof(float));
hipMalloc((void**) &g_gauss_dev, size * sizeof(float));
hipMalloc((void**) &b_gauss_dev, size * sizeof(float));
//allocate device output arrays
float *grayscale_dev_1;
hipMalloc((void**) &grayscale_dev_1, size * sizeof(float));
float *grayscale_dev_2;
hipMalloc((void**) &grayscale_dev_2, size * sizeof(float));
float *grayscale_dev_1_out;
hipMalloc((void**) &grayscale_dev_1_out, size * sizeof(float));
float *grayscale_dev_2_out;
hipMalloc((void**) &grayscale_dev_2_out, size * sizeof(float));
float *grayscale_dev_3_out;
hipMalloc((void**) &grayscale_dev_3_out, size * sizeof(float));
float *grayscale_dev_4_out;
hipMalloc((void**) &grayscale_dev_4_out, size * sizeof(float));
//copy input to device
hipMemcpy(r_gauss_dev, r, size * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(g_gauss_dev, g, size * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(b_gauss_dev, b, size * sizeof(float), hipMemcpyHostToDevice);
dim3 dim_grid_gauss(width/12 + 1,height/12 + 1, 1);
dim3 dim_block_gauss(16, 16, 1);
hipLaunchKernelGGL(( gaussian_filter_kernel), dim3(dim_grid_gauss), dim3(dim_block_gauss), 0, 0, r_gauss_dev, r_dev, width, height);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess)
{
printf("%s\n", hipGetErrorString(cuda_ret));
}
hipLaunchKernelGGL(( gaussian_filter_kernel), dim3(dim_grid_gauss), dim3(dim_block_gauss), 0, 0, g_gauss_dev, g_dev, width, height);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess)
{
printf("%s\n", hipGetErrorString(cuda_ret));
}
hipLaunchKernelGGL(( gaussian_filter_kernel), dim3(dim_grid_gauss), dim3(dim_block_gauss), 0, 0, b_gauss_dev, b_dev, width, height);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess)
{
printf("%s\n", hipGetErrorString(cuda_ret));
}
hipFree(r_gauss_dev);
hipFree(g_gauss_dev);
hipFree(b_gauss_dev);
dim3 dim_grid_gray(width / 16 + 1, height / 16 + 1, 1);
dim3 dim_block_gray(16, 16, 1);
hipLaunchKernelGGL(( skin_detection), dim3(dim_grid_gray), dim3(dim_block_gray), 0, 0, r_dev, g_dev, b_dev, grayscale_dev_1, grayscale_dev_2, width, height);
hipLaunchKernelGGL(( clean_up), dim3(dim_grid_gray), dim3(dim_block_gray), 0, 0, grayscale_dev_1, grayscale_dev_2, grayscale_dev_3_out, grayscale_dev_4_out, width, height);
hipLaunchKernelGGL(( quantization), dim3(dim_grid_gray), dim3(dim_block_gray), 0, 0, grayscale_dev_3_out, grayscale_dev_4_out, grayscale_dev_1_out, grayscale_dev_2_out, width, height);
hipFree(r_dev);
hipFree(g_dev);
hipFree(b_dev);
float * grayscale_host_skin = (float *)malloc(size * sizeof(float));
hipMemcpy(grayscale_host_skin, grayscale_dev_1, size * sizeof(float), hipMemcpyDeviceToHost);
float * grayscale_host_hair = (float *)malloc(size * sizeof(float));
hipMemcpy(grayscale_host_hair, grayscale_dev_2, size * sizeof(float), hipMemcpyDeviceToHost);
face_detection(grayscale_host_skin, grayscale_host_hair, r, g, b, width, height);
// hipMemcpy(r, grayscale_dev_1, size * sizeof(float), hipMemcpyDeviceToHost);
// hipMemcpy(g, grayscale_dev_1, size * sizeof(float), hipMemcpyDeviceToHost);
// hipMemcpy(b, grayscale_dev_1, size * sizeof(float), hipMemcpyDeviceToHost);
hipFree(grayscale_dev_1);
hipFree(grayscale_dev_2);
hipFree(grayscale_dev_1_out);
hipFree(grayscale_dev_2_out);
hipFree(grayscale_dev_3_out);
hipFree(grayscale_dev_4_out);
free(grayscale_host_skin);
free(grayscale_host_hair);
}
#endif //__CUDA_MAIN
| 13f9e41082381dbf8676cbc17d0e1b17681b26c4.cu | #ifndef __FACE_DETECTION_MAIN
#define __FACE_DETECTION_MAIN
#include "../easypng/png.h"
#include "../util/png2arrays.h"
#include "kernels.cu"
void do_face_detection_cuda(float * r, float * g, float * b, int width, int height) {
int size = width * height;
cudaError_t cuda_ret;
//allocate the device input arrays
float *r_dev, *r_gauss_dev;
float *g_dev, *g_gauss_dev;
float *b_dev, *b_gauss_dev;
cudaMalloc((void**) &r_dev, size * sizeof(float));
cudaMalloc((void**) &g_dev, size * sizeof(float));
cudaMalloc((void**) &b_dev, size * sizeof(float));
cudaMalloc((void**) &r_gauss_dev, size * sizeof(float));
cudaMalloc((void**) &g_gauss_dev, size * sizeof(float));
cudaMalloc((void**) &b_gauss_dev, size * sizeof(float));
//allocate device output arrays
float *grayscale_dev_1;
cudaMalloc((void**) &grayscale_dev_1, size * sizeof(float));
float *grayscale_dev_2;
cudaMalloc((void**) &grayscale_dev_2, size * sizeof(float));
float *grayscale_dev_1_out;
cudaMalloc((void**) &grayscale_dev_1_out, size * sizeof(float));
float *grayscale_dev_2_out;
cudaMalloc((void**) &grayscale_dev_2_out, size * sizeof(float));
float *grayscale_dev_3_out;
cudaMalloc((void**) &grayscale_dev_3_out, size * sizeof(float));
float *grayscale_dev_4_out;
cudaMalloc((void**) &grayscale_dev_4_out, size * sizeof(float));
//copy input to device
cudaMemcpy(r_gauss_dev, r, size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(g_gauss_dev, g, size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_gauss_dev, b, size * sizeof(float), cudaMemcpyHostToDevice);
dim3 dim_grid_gauss(width/12 + 1,height/12 + 1, 1);
dim3 dim_block_gauss(16, 16, 1);
gaussian_filter_kernel<<<dim_grid_gauss, dim_block_gauss>>>(r_gauss_dev, r_dev, width, height);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess)
{
printf("%s\n", cudaGetErrorString(cuda_ret));
}
gaussian_filter_kernel<<<dim_grid_gauss, dim_block_gauss>>>(g_gauss_dev, g_dev, width, height);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess)
{
printf("%s\n", cudaGetErrorString(cuda_ret));
}
gaussian_filter_kernel<<<dim_grid_gauss, dim_block_gauss>>>(b_gauss_dev, b_dev, width, height);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess)
{
printf("%s\n", cudaGetErrorString(cuda_ret));
}
cudaFree(r_gauss_dev);
cudaFree(g_gauss_dev);
cudaFree(b_gauss_dev);
dim3 dim_grid_gray(width / 16 + 1, height / 16 + 1, 1);
dim3 dim_block_gray(16, 16, 1);
skin_detection<<<dim_grid_gray, dim_block_gray>>>(r_dev, g_dev, b_dev, grayscale_dev_1, grayscale_dev_2, width, height);
clean_up<<<dim_grid_gray, dim_block_gray>>>(grayscale_dev_1, grayscale_dev_2, grayscale_dev_3_out, grayscale_dev_4_out, width, height);
quantization<<<dim_grid_gray, dim_block_gray>>>(grayscale_dev_3_out, grayscale_dev_4_out, grayscale_dev_1_out, grayscale_dev_2_out, width, height);
cudaFree(r_dev);
cudaFree(g_dev);
cudaFree(b_dev);
float * grayscale_host_skin = (float *)malloc(size * sizeof(float));
cudaMemcpy(grayscale_host_skin, grayscale_dev_1, size * sizeof(float), cudaMemcpyDeviceToHost);
float * grayscale_host_hair = (float *)malloc(size * sizeof(float));
cudaMemcpy(grayscale_host_hair, grayscale_dev_2, size * sizeof(float), cudaMemcpyDeviceToHost);
face_detection(grayscale_host_skin, grayscale_host_hair, r, g, b, width, height);
// cudaMemcpy(r, grayscale_dev_1, size * sizeof(float), cudaMemcpyDeviceToHost);
// cudaMemcpy(g, grayscale_dev_1, size * sizeof(float), cudaMemcpyDeviceToHost);
// cudaMemcpy(b, grayscale_dev_1, size * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(grayscale_dev_1);
cudaFree(grayscale_dev_2);
cudaFree(grayscale_dev_1_out);
cudaFree(grayscale_dev_2_out);
cudaFree(grayscale_dev_3_out);
cudaFree(grayscale_dev_4_out);
free(grayscale_host_skin);
free(grayscale_host_hair);
}
#endif //__CUDA_MAIN
|
618be6f5e48a21bf66fe8e6da6aa629ef97c71da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void update_gpu( const double *qold, double *q, double *res,
const double *adt, double *rms) {
double del, adti, rmsl;
rmsl = 0.0f;
adti = 1.0f / (*adt);
for (int n = 0; n < 4; n++) {
del = adti * res[n];
q[n] = qold[n] - del;
res[n] = 0.0f;
rmsl += del * del;
}
*rms += rmsl;
}
// CUDA kernel function
__global__ void op_cuda_update(
const double *__restrict arg0,
double *arg1,
double *arg2,
const double *__restrict arg3,
double *arg4,
int set_size ) {
double arg4_l[1];
for ( int d=0; d<1; d++ ){
arg4_l[d]=ZERO_double;
}
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
update_gpu(arg0+n*4,
arg1+n*4,
arg2+n*4,
arg3+n*1,
arg4_l);
}
//global reductions
for ( int d=0; d<1; d++ ){
op_reduction<OP_INC>(&arg4[d+blockIdx.x*1],arg4_l[d]);
}
}
//host stub function
void op_par_loop_update(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4){
double*arg4h = (double *)arg4.data;
int nargs = 5;
op_arg args[5];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(4);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[4].name = name;
OP_kernels[4].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: update");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_4
int nthread = OP_BLOCK_SIZE_4;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
//transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg4.data = OP_reduct_h + reduct_bytes;
arg4.data_d = OP_reduct_d + reduct_bytes;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
((double *)arg4.data)[d+b*1] = ZERO_double;
}
}
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int nshared = reduct_size*nthread;
hipLaunchKernelGGL(( op_cuda_update), dim3(nblocks),dim3(nthread),nshared, 0,
(double *) arg0.data_d,
(double *) arg1.data_d,
(double *) arg2.data_d,
(double *) arg3.data_d,
(double *) arg4.data_d,
set->size );
//transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg4h[d] = arg4h[d] + ((double *)arg4.data)[d+b*1];
}
}
arg4.data = (char *)arg4h;
op_mpi_reduce(&arg4,arg4h);
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[4].time += wall_t2 - wall_t1;
OP_kernels[4].transfer += (float)set->size * arg0.size;
OP_kernels[4].transfer += (float)set->size * arg1.size;
OP_kernels[4].transfer += (float)set->size * arg2.size * 2.0f;
OP_kernels[4].transfer += (float)set->size * arg3.size;
}
| 618be6f5e48a21bf66fe8e6da6aa629ef97c71da.cu | //
// auto-generated by op2.py
//
//user function
__device__ void update_gpu( const double *qold, double *q, double *res,
const double *adt, double *rms) {
double del, adti, rmsl;
rmsl = 0.0f;
adti = 1.0f / (*adt);
for (int n = 0; n < 4; n++) {
del = adti * res[n];
q[n] = qold[n] - del;
res[n] = 0.0f;
rmsl += del * del;
}
*rms += rmsl;
}
// CUDA kernel function
__global__ void op_cuda_update(
const double *__restrict arg0,
double *arg1,
double *arg2,
const double *__restrict arg3,
double *arg4,
int set_size ) {
double arg4_l[1];
for ( int d=0; d<1; d++ ){
arg4_l[d]=ZERO_double;
}
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
update_gpu(arg0+n*4,
arg1+n*4,
arg2+n*4,
arg3+n*1,
arg4_l);
}
//global reductions
for ( int d=0; d<1; d++ ){
op_reduction<OP_INC>(&arg4[d+blockIdx.x*1],arg4_l[d]);
}
}
//host stub function
void op_par_loop_update(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4){
double*arg4h = (double *)arg4.data;
int nargs = 5;
op_arg args[5];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(4);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[4].name = name;
OP_kernels[4].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: update");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_4
int nthread = OP_BLOCK_SIZE_4;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
//transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg4.data = OP_reduct_h + reduct_bytes;
arg4.data_d = OP_reduct_d + reduct_bytes;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
((double *)arg4.data)[d+b*1] = ZERO_double;
}
}
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int nshared = reduct_size*nthread;
op_cuda_update<<<nblocks,nthread,nshared>>>(
(double *) arg0.data_d,
(double *) arg1.data_d,
(double *) arg2.data_d,
(double *) arg3.data_d,
(double *) arg4.data_d,
set->size );
//transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg4h[d] = arg4h[d] + ((double *)arg4.data)[d+b*1];
}
}
arg4.data = (char *)arg4h;
op_mpi_reduce(&arg4,arg4h);
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[4].time += wall_t2 - wall_t1;
OP_kernels[4].transfer += (float)set->size * arg0.size;
OP_kernels[4].transfer += (float)set->size * arg1.size;
OP_kernels[4].transfer += (float)set->size * arg2.size * 2.0f;
OP_kernels[4].transfer += (float)set->size * arg3.size;
}
|
99607953679a4f8475a6ddb08d2a572b7d63f1bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "data_source.hpp"
#include <iostream>
#include <cstdlib>
#include <hip/hip_runtime_api.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
template <typename T>
struct LARGERT
{
__host__ __device__ __forceinline__
T operator()(const T& x, const T& y) const {
float4 result;
printf("Adress first: %p\n", (void*)&x);
printf("Adress second: %p\n", (void*)&y);
printf("comparing LargerT: %f, %f, %f with %f %f %f\n", x.x, x.y, x.z, y.x, y.y, y.z);
result.x = fmax(x.x, y.x);
result.y = fmax(x.y, y.y);
result.z = fmax(x.z, y.z);
return result;
}
};
template <typename T>
struct LESST
{
__host__ __device__ __forceinline__
T operator()(const T& x, const T& y) const {
float4 result;
printf("Adress first: %p\n", (void*)&x);
printf("Adress second: %p\n", (void*)&y);
printf("comparing LessT: %f, %f, %f with %f %f %f\n", x.x, x.y, x.z, y.x, y.y, y.z);
result.x = fmin(x.x, y.x);
result.y = fmin(x.y, y.y);
result.z = fmin(x.z, y.z);
return result;
}
};
void error(const char* error_string, const char* file, const int line, const char* func)
{
std::cout << "Error: " << error_string << "\t" << file << ":" << line << std::endl;
exit(EXIT_FAILURE);
}
static inline void ___cudaSafeCall(hipError_t err, const char* file, const int line, const char* func = "")
{
if (hipSuccess != err)
error(hipGetErrorString(err), file, line, func);
}
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__)
void FilterPoints(float4* baseAddress, size_t sizeBytes_)
{
/** \brief Device pointer. */
void* data_;
cudaSafeCall(hipMalloc(&data_, sizeBytes_));
cudaSafeCall(hipMemcpy(data_, baseAddress, sizeBytes_, hipMemcpyHostToDevice));
cudaSafeCall(hipDeviceSynchronize());
float4 max;
max.x = max.y = max.z = FLT_MAX;
max.w = 0;
float4 min;
min.x = min.y = min.z = -FLT_MAX;
min.w = 0;
thrust::device_ptr<float4> beg((float4*)data_);
thrust::device_ptr<float4> end = beg + sizeBytes_ / 16;
std::cout << "Before reduce" << std::endl;
float4 minp = thrust::reduce(beg, end, max, LESST<float4>{});
float4 maxp = thrust::reduce(beg, end, min, LARGERT<float4>{});
std::cout << "minp is: " << minp.x << "," << minp.y << "," << minp.z << std::endl;
std::cout << "maxp is: " << maxp.x << "," << maxp.y << "," << maxp.z << std::endl;
}
int main(void)
{
DataGenerator data;
data.data_size = 2;
data.tests_num = 10000;
data.cube_size = 1024.f;
data.max_radius = data.cube_size / 30.f;
data.shared_radius = data.cube_size / 30.f;
data.printParams();
//generate
data();
size_t sizeBytes_ = data.data_size * 16;
std::cout << "Before filter" << std::endl;
FilterPoints(&data.points[0], sizeBytes_);
return 0;
} | 99607953679a4f8475a6ddb08d2a572b7d63f1bb.cu | #include "data_source.hpp"
#include <iostream>
#include <cstdlib>
#include <cuda_runtime_api.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
template <typename T>
struct LARGERT
{
__host__ __device__ __forceinline__
T operator()(const T& x, const T& y) const {
float4 result;
printf("Adress first: %p\n", (void*)&x);
printf("Adress second: %p\n", (void*)&y);
printf("comparing LargerT: %f, %f, %f with %f %f %f\n", x.x, x.y, x.z, y.x, y.y, y.z);
result.x = fmax(x.x, y.x);
result.y = fmax(x.y, y.y);
result.z = fmax(x.z, y.z);
return result;
}
};
template <typename T>
struct LESST
{
__host__ __device__ __forceinline__
T operator()(const T& x, const T& y) const {
float4 result;
printf("Adress first: %p\n", (void*)&x);
printf("Adress second: %p\n", (void*)&y);
printf("comparing LessT: %f, %f, %f with %f %f %f\n", x.x, x.y, x.z, y.x, y.y, y.z);
result.x = fmin(x.x, y.x);
result.y = fmin(x.y, y.y);
result.z = fmin(x.z, y.z);
return result;
}
};
void error(const char* error_string, const char* file, const int line, const char* func)
{
std::cout << "Error: " << error_string << "\t" << file << ":" << line << std::endl;
exit(EXIT_FAILURE);
}
static inline void ___cudaSafeCall(cudaError_t err, const char* file, const int line, const char* func = "")
{
if (cudaSuccess != err)
error(cudaGetErrorString(err), file, line, func);
}
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__)
void FilterPoints(float4* baseAddress, size_t sizeBytes_)
{
/** \brief Device pointer. */
void* data_;
cudaSafeCall(cudaMalloc(&data_, sizeBytes_));
cudaSafeCall(cudaMemcpy(data_, baseAddress, sizeBytes_, cudaMemcpyHostToDevice));
cudaSafeCall(cudaDeviceSynchronize());
float4 max;
max.x = max.y = max.z = FLT_MAX;
max.w = 0;
float4 min;
min.x = min.y = min.z = -FLT_MAX;
min.w = 0;
thrust::device_ptr<float4> beg((float4*)data_);
thrust::device_ptr<float4> end = beg + sizeBytes_ / 16;
std::cout << "Before reduce" << std::endl;
float4 minp = thrust::reduce(beg, end, max, LESST<float4>{});
float4 maxp = thrust::reduce(beg, end, min, LARGERT<float4>{});
std::cout << "minp is: " << minp.x << "," << minp.y << "," << minp.z << std::endl;
std::cout << "maxp is: " << maxp.x << "," << maxp.y << "," << maxp.z << std::endl;
}
int main(void)
{
DataGenerator data;
data.data_size = 2;
data.tests_num = 10000;
data.cube_size = 1024.f;
data.max_radius = data.cube_size / 30.f;
data.shared_radius = data.cube_size / 30.f;
data.printParams();
//generate
data();
size_t sizeBytes_ = data.data_size * 16;
std::cout << "Before filter" << std::endl;
FilterPoints(&data.points[0], sizeBytes_);
return 0;
} |
29ab2247c4d4101bb2bb9d2f3b975b579f64ca0f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/util/cuda_arithemetic_interface.h"
#include "oneflow/core/common/switch_func.h"
#include "oneflow/core/kernel/util/host_arithemetic_interface.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/register/blob.h"
#include "oneflow/core/kernel/util/cuda_half_util.h"
namespace oneflow {
namespace {
template<int32_t NDIMS>
struct Int32Array {
int32_t val[NDIMS];
};
template<int32_t NDIMS>
__device__ int32_t GetXIndex(const int32_t* y_shape, const int32_t* x_strides, int32_t y_idx) {
int32_t x_idx = 0;
for (int32_t i = NDIMS - 1; i >= 0; --i) {
const int32_t next_y_idx = y_idx / y_shape[i];
x_idx += (y_idx - next_y_idx * y_shape[i]) * x_strides[i];
y_idx = next_y_idx;
}
return x_idx;
}
template<int32_t NDIMS, typename T>
__global__ void TransposeGpu(const Int32Array<NDIMS> y_shape, const Int32Array<NDIMS> x_strides,
const int32_t elem_cnt, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(y_idx, elem_cnt) {
const int32_t x_idx = GetXIndex<NDIMS>(y_shape.val, x_strides.val, y_idx);
#if __CUDA_ARCH__ >= 350
y[y_idx] = __ldg(x + x_idx);
#else
y[y_idx] = x[x_idx];
#endif
}
}
template<int32_t NDIMS, typename T>
void TransposeImpl(DeviceCtx* ctx, const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation, const int64_t elem_cnt, const T* x,
T* y) {
CHECK_LE(y_shape.elem_cnt(), GetMaxVal<int32_t>());
Int32Array<NDIMS> y_shape_struct;
FOR_RANGE(int32_t, i, 0, NDIMS) { y_shape_struct.val[i] = y_shape.At(i); }
Int32Array<NDIMS> x_strides;
int32_t buff[NDIMS];
int32_t cur_stride = 1;
for (int32_t i = NDIMS - 1; i >= 0; --i) {
buff[i] = cur_stride;
cur_stride *= x_shape.At(i);
}
for (int32_t i = 0; i < NDIMS; ++i) { x_strides.val[i] = buff[permutation[i]]; }
hipLaunchKernelGGL(( TransposeGpu<NDIMS, T>)
, dim3(SMBlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
y_shape_struct, x_strides, elem_cnt, x, y);
}
template<typename T>
struct TransposeUtil final {
#define MAKE_TRANSPOSE_SWITCH_ENTRY(func_name, NDIMS) func_name<NDIMS, T>
DEFINE_STATIC_SWITCH_FUNC(void, TransposeImpl, MAKE_TRANSPOSE_SWITCH_ENTRY,
MAKE_NDIM_CTRV_SEQ(DIM_SEQ));
};
} // namespace
#define TRANSPOSE_CHECK \
CHECK_LE(y_shape.elem_cnt(), GetMaxVal<int32_t>()); \
CHECK_EQ(num_axis, y_shape.NumAxes()); \
CHECK_EQ(num_axis, x_shape.NumAxes())
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const float* x, float* y) {
TRANSPOSE_CHECK;
TransposeUtil<float>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const double* x,
double* y) {
TRANSPOSE_CHECK;
TransposeUtil<double>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const float16* x,
float16* y) {
TRANSPOSE_CHECK;
TransposeUtil<half>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation,
elem_cnt, reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const int8_t* x,
int8_t* y) {
TRANSPOSE_CHECK;
TransposeUtil<int8_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const int32_t* x,
int32_t* y) {
TRANSPOSE_CHECK;
TransposeUtil<int32_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const int64_t* x,
int64_t* y) {
TRANSPOSE_CHECK;
TransposeUtil<int64_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
#undef TRANSPOSE_CHECK
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const float* x, float* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const double* x,
double* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const float16* x,
float16* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const int8_t* x,
int8_t* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const int32_t* x,
int32_t* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const int64_t* x,
int64_t* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::InitializeWithConstConf(
DeviceCtx* ctx, const ConstantInitializerConf& initializer_conf, Blob* blob) {
WithHostBlobAndStreamSynchronizeEnv(ctx, blob, [&](Blob* host_blob) {
ArithemeticIf<DeviceType::kCPU>::InitializeWithConstConf(nullptr, initializer_conf, host_blob);
});
}
namespace {
template<typename T>
__global__ void MulByScalarGpu(const int64_t n, const T* x, const T y, T* z) {
CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] * y; }
}
template<>
__global__ void MulByScalarGpu<half>(const int64_t n, const half* x, const half y, half* z) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
CUDA_1D_KERNEL_LOOP(i, n) { z[i] = __hmul(x[i], y); }
#else
HALF_CHECK_FAILED;
#endif // __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
}
template<typename T>
__global__ void MulByScalarPtrGpu(const int64_t n, const T* x, const T* y, T* z) {
const T y_value = y[0];
CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] * y_value; }
}
template<typename T>
__global__ void AddByScalarPtrGpu(const int64_t n, const T* x, const T* y, T* z) {
const T y_value = y[0];
CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] + y_value; }
}
template<typename T>
__global__ void SubByScalarPtrGpu(const int64_t n, const T* x, const T* y, T* z) {
const T y_value = y[0];
CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] - y_value; }
}
template<typename T>
__global__ void DivByScalarPtrGpu(const int64_t n, const T* x, const T* y, T* z) {
const T y_value = y[0];
CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] / y_value; }
}
template<typename T>
__global__ void FillGpu(const int64_t n, const T value, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = value; }
}
template<typename T>
__global__ void CopyColsRegionGpu(const int64_t row_num, const int64_t col_num, const T* x,
const int64_t x_col_offset, const int64_t x_lda, T* y,
const int64_t y_col_offset, const int64_t y_lda) {
CUDA_1D_KERNEL_LOOP(index, row_num * col_num) {
const int64_t i = index / col_num;
const int64_t j = index % col_num;
y[i * y_lda + y_col_offset + j] = x[i * x_lda + x_col_offset + j];
}
}
} // namespace
#define MUL_BY_SCALAR(T) \
void ArithemeticIf<DeviceType::kGPU>::MulByScalar(DeviceCtx* ctx, const int64_t n, const T* x, \
const T y, T* z) { \
hipLaunchKernelGGL(( MulByScalarGpu<T>) \
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, x, y, z); \
}
MUL_BY_SCALAR(float)
MUL_BY_SCALAR(double)
MUL_BY_SCALAR(int32_t)
MUL_BY_SCALAR(int64_t)
#undef MUL_BY_SCALAR
void ArithemeticIf<DeviceType::kGPU>::MulByScalar(DeviceCtx* ctx, const int64_t n, const float16* x,
const float16 y, float16* z) {
hipLaunchKernelGGL(( MulByScalarGpu<half>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, reinterpret_cast<const half*>(x), float16_2half(y), reinterpret_cast<half*>(z));
}
#define MUL_BY_SCALAR_PTR(T) \
void ArithemeticIf<DeviceType::kGPU>::MulByScalarPtr(DeviceCtx* ctx, const int64_t n, \
const T* x, const T* y, T* z) { \
hipLaunchKernelGGL(( MulByScalarPtrGpu<T>) \
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, x, y, z); \
}
MUL_BY_SCALAR_PTR(float)
MUL_BY_SCALAR_PTR(double)
MUL_BY_SCALAR_PTR(int8_t)
MUL_BY_SCALAR_PTR(int32_t)
MUL_BY_SCALAR_PTR(int64_t)
#undef MUL_BY_SCALAR_PTR
#define ADD_BY_SCALAR_PTR(T) \
void ArithemeticIf<DeviceType::kGPU>::AddByScalarPtr(DeviceCtx* ctx, const int64_t n, \
const T* x, const T* y, T* z) { \
hipLaunchKernelGGL(( AddByScalarPtrGpu<T>) \
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, x, y, z); \
}
ADD_BY_SCALAR_PTR(float)
ADD_BY_SCALAR_PTR(double)
ADD_BY_SCALAR_PTR(int8_t)
ADD_BY_SCALAR_PTR(int32_t)
ADD_BY_SCALAR_PTR(int64_t)
#undef ADD_BY_SCALAR_PTR
#define SUB_BY_SCALAR_PTR(T) \
void ArithemeticIf<DeviceType::kGPU>::SubByScalarPtr(DeviceCtx* ctx, const int64_t n, \
const T* x, const T* y, T* z) { \
hipLaunchKernelGGL(( SubByScalarPtrGpu<T>) \
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, x, y, z); \
}
SUB_BY_SCALAR_PTR(float)
SUB_BY_SCALAR_PTR(double)
SUB_BY_SCALAR_PTR(int8_t)
SUB_BY_SCALAR_PTR(int32_t)
SUB_BY_SCALAR_PTR(int64_t)
#undef SUB_BY_SCALAR_PTR
#define DIV_BY_SCALAR_PTR(T) \
void ArithemeticIf<DeviceType::kGPU>::DivByScalarPtr(DeviceCtx* ctx, const int64_t n, \
const T* x, const T* y, T* z) { \
hipLaunchKernelGGL(( DivByScalarPtrGpu<T>) \
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, x, y, z); \
}
DIV_BY_SCALAR_PTR(float)
DIV_BY_SCALAR_PTR(double)
DIV_BY_SCALAR_PTR(int8_t)
DIV_BY_SCALAR_PTR(int32_t)
DIV_BY_SCALAR_PTR(int64_t)
#undef DIV_BY_SCALAR_PTR
#define FILL(T) \
void ArithemeticIf<DeviceType::kGPU>::Fill(DeviceCtx* ctx, const int64_t n, const T value, \
T* y) { \
hipLaunchKernelGGL(( FillGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), \
n, value, y); \
}
FILL(float)
FILL(double)
FILL(int8_t)
FILL(int32_t)
FILL(int64_t)
#undef FILL
void ArithemeticIf<DeviceType::kGPU>::Fill(DeviceCtx* ctx, const int64_t n, const float16 value,
float16* y) {
hipLaunchKernelGGL(( FillGpu<half>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, float16_2half(value), reinterpret_cast<half*>(y));
}
#define COPY_COLS_REGION(T) \
void ArithemeticIf<DeviceType::kGPU>::CopyColsRegion( \
DeviceCtx* ctx, const int64_t row_num, const int64_t col_num, const T* x, \
const int64_t x_col_offset, const int64_t x_lda, T* y, const int64_t y_col_offset, \
const int64_t y_lda) { \
hipLaunchKernelGGL(( CopyColsRegionGpu<T>), dim3(BlocksNum4ThreadsNum(row_num* col_num)), dim3(kCudaThreadsNumPerBlock), 0, \
ctx->cuda_stream(), row_num, col_num, x, x_col_offset, x_lda, y, \
y_col_offset, y_lda); \
}
COPY_COLS_REGION(float)
COPY_COLS_REGION(double)
COPY_COLS_REGION(int8_t)
COPY_COLS_REGION(int32_t)
COPY_COLS_REGION(int64_t)
#undef COPY_COLS_REGION
void ArithemeticIf<DeviceType::kGPU>::CopyColsRegion(DeviceCtx* ctx, const int64_t row_num,
const int64_t col_num, const float16* x,
const int64_t x_col_offset,
const int64_t x_lda, float16* y,
const int64_t y_col_offset,
const int64_t y_lda) {
hipLaunchKernelGGL(( CopyColsRegionGpu<half>)
, dim3(BlocksNum4ThreadsNum(row_num * col_num)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
row_num, col_num, reinterpret_cast<const half*>(x), x_col_offset, x_lda,
reinterpret_cast<half*>(y), y_col_offset, y_lda);
}
} // namespace oneflow
| 29ab2247c4d4101bb2bb9d2f3b975b579f64ca0f.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/util/cuda_arithemetic_interface.h"
#include "oneflow/core/common/switch_func.h"
#include "oneflow/core/kernel/util/host_arithemetic_interface.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/register/blob.h"
#include "oneflow/core/kernel/util/cuda_half_util.h"
namespace oneflow {
namespace {
template<int32_t NDIMS>
struct Int32Array {
int32_t val[NDIMS];
};
template<int32_t NDIMS>
__device__ int32_t GetXIndex(const int32_t* y_shape, const int32_t* x_strides, int32_t y_idx) {
int32_t x_idx = 0;
for (int32_t i = NDIMS - 1; i >= 0; --i) {
const int32_t next_y_idx = y_idx / y_shape[i];
x_idx += (y_idx - next_y_idx * y_shape[i]) * x_strides[i];
y_idx = next_y_idx;
}
return x_idx;
}
template<int32_t NDIMS, typename T>
__global__ void TransposeGpu(const Int32Array<NDIMS> y_shape, const Int32Array<NDIMS> x_strides,
const int32_t elem_cnt, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(y_idx, elem_cnt) {
const int32_t x_idx = GetXIndex<NDIMS>(y_shape.val, x_strides.val, y_idx);
#if __CUDA_ARCH__ >= 350
y[y_idx] = __ldg(x + x_idx);
#else
y[y_idx] = x[x_idx];
#endif
}
}
template<int32_t NDIMS, typename T>
void TransposeImpl(DeviceCtx* ctx, const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation, const int64_t elem_cnt, const T* x,
T* y) {
CHECK_LE(y_shape.elem_cnt(), GetMaxVal<int32_t>());
Int32Array<NDIMS> y_shape_struct;
FOR_RANGE(int32_t, i, 0, NDIMS) { y_shape_struct.val[i] = y_shape.At(i); }
Int32Array<NDIMS> x_strides;
int32_t buff[NDIMS];
int32_t cur_stride = 1;
for (int32_t i = NDIMS - 1; i >= 0; --i) {
buff[i] = cur_stride;
cur_stride *= x_shape.At(i);
}
for (int32_t i = 0; i < NDIMS; ++i) { x_strides.val[i] = buff[permutation[i]]; }
TransposeGpu<NDIMS, T>
<<<SMBlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
y_shape_struct, x_strides, elem_cnt, x, y);
}
template<typename T>
struct TransposeUtil final {
#define MAKE_TRANSPOSE_SWITCH_ENTRY(func_name, NDIMS) func_name<NDIMS, T>
DEFINE_STATIC_SWITCH_FUNC(void, TransposeImpl, MAKE_TRANSPOSE_SWITCH_ENTRY,
MAKE_NDIM_CTRV_SEQ(DIM_SEQ));
};
} // namespace
#define TRANSPOSE_CHECK \
CHECK_LE(y_shape.elem_cnt(), GetMaxVal<int32_t>()); \
CHECK_EQ(num_axis, y_shape.NumAxes()); \
CHECK_EQ(num_axis, x_shape.NumAxes())
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const float* x, float* y) {
TRANSPOSE_CHECK;
TransposeUtil<float>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const double* x,
double* y) {
TRANSPOSE_CHECK;
TransposeUtil<double>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const float16* x,
float16* y) {
TRANSPOSE_CHECK;
TransposeUtil<half>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation,
elem_cnt, reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const int8_t* x,
int8_t* y) {
TRANSPOSE_CHECK;
TransposeUtil<int8_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const int32_t* x,
int32_t* y) {
TRANSPOSE_CHECK;
TransposeUtil<int32_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const int64_t* x,
int64_t* y) {
TRANSPOSE_CHECK;
TransposeUtil<int64_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
#undef TRANSPOSE_CHECK
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const float* x, float* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const double* x,
double* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const float16* x,
float16* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const int8_t* x,
int8_t* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const int32_t* x,
int32_t* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const int64_t* x,
int64_t* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::InitializeWithConstConf(
DeviceCtx* ctx, const ConstantInitializerConf& initializer_conf, Blob* blob) {
WithHostBlobAndStreamSynchronizeEnv(ctx, blob, [&](Blob* host_blob) {
ArithemeticIf<DeviceType::kCPU>::InitializeWithConstConf(nullptr, initializer_conf, host_blob);
});
}
namespace {
template<typename T>
__global__ void MulByScalarGpu(const int64_t n, const T* x, const T y, T* z) {
CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] * y; }
}
template<>
__global__ void MulByScalarGpu<half>(const int64_t n, const half* x, const half y, half* z) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
CUDA_1D_KERNEL_LOOP(i, n) { z[i] = __hmul(x[i], y); }
#else
HALF_CHECK_FAILED;
#endif // __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
}
template<typename T>
__global__ void MulByScalarPtrGpu(const int64_t n, const T* x, const T* y, T* z) {
const T y_value = y[0];
CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] * y_value; }
}
template<typename T>
__global__ void AddByScalarPtrGpu(const int64_t n, const T* x, const T* y, T* z) {
const T y_value = y[0];
CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] + y_value; }
}
template<typename T>
__global__ void SubByScalarPtrGpu(const int64_t n, const T* x, const T* y, T* z) {
const T y_value = y[0];
CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] - y_value; }
}
template<typename T>
__global__ void DivByScalarPtrGpu(const int64_t n, const T* x, const T* y, T* z) {
const T y_value = y[0];
CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] / y_value; }
}
template<typename T>
__global__ void FillGpu(const int64_t n, const T value, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = value; }
}
template<typename T>
__global__ void CopyColsRegionGpu(const int64_t row_num, const int64_t col_num, const T* x,
const int64_t x_col_offset, const int64_t x_lda, T* y,
const int64_t y_col_offset, const int64_t y_lda) {
CUDA_1D_KERNEL_LOOP(index, row_num * col_num) {
const int64_t i = index / col_num;
const int64_t j = index % col_num;
y[i * y_lda + y_col_offset + j] = x[i * x_lda + x_col_offset + j];
}
}
} // namespace
#define MUL_BY_SCALAR(T) \
void ArithemeticIf<DeviceType::kGPU>::MulByScalar(DeviceCtx* ctx, const int64_t n, const T* x, \
const T y, T* z) { \
MulByScalarGpu<T> \
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(n, x, y, z); \
}
MUL_BY_SCALAR(float)
MUL_BY_SCALAR(double)
MUL_BY_SCALAR(int32_t)
MUL_BY_SCALAR(int64_t)
#undef MUL_BY_SCALAR
void ArithemeticIf<DeviceType::kGPU>::MulByScalar(DeviceCtx* ctx, const int64_t n, const float16* x,
const float16 y, float16* z) {
MulByScalarGpu<half><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, reinterpret_cast<const half*>(x), float16_2half(y), reinterpret_cast<half*>(z));
}
#define MUL_BY_SCALAR_PTR(T) \
void ArithemeticIf<DeviceType::kGPU>::MulByScalarPtr(DeviceCtx* ctx, const int64_t n, \
const T* x, const T* y, T* z) { \
MulByScalarPtrGpu<T> \
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(n, x, y, z); \
}
MUL_BY_SCALAR_PTR(float)
MUL_BY_SCALAR_PTR(double)
MUL_BY_SCALAR_PTR(int8_t)
MUL_BY_SCALAR_PTR(int32_t)
MUL_BY_SCALAR_PTR(int64_t)
#undef MUL_BY_SCALAR_PTR
#define ADD_BY_SCALAR_PTR(T) \
void ArithemeticIf<DeviceType::kGPU>::AddByScalarPtr(DeviceCtx* ctx, const int64_t n, \
const T* x, const T* y, T* z) { \
AddByScalarPtrGpu<T> \
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(n, x, y, z); \
}
ADD_BY_SCALAR_PTR(float)
ADD_BY_SCALAR_PTR(double)
ADD_BY_SCALAR_PTR(int8_t)
ADD_BY_SCALAR_PTR(int32_t)
ADD_BY_SCALAR_PTR(int64_t)
#undef ADD_BY_SCALAR_PTR
#define SUB_BY_SCALAR_PTR(T) \
void ArithemeticIf<DeviceType::kGPU>::SubByScalarPtr(DeviceCtx* ctx, const int64_t n, \
const T* x, const T* y, T* z) { \
SubByScalarPtrGpu<T> \
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(n, x, y, z); \
}
SUB_BY_SCALAR_PTR(float)
SUB_BY_SCALAR_PTR(double)
SUB_BY_SCALAR_PTR(int8_t)
SUB_BY_SCALAR_PTR(int32_t)
SUB_BY_SCALAR_PTR(int64_t)
#undef SUB_BY_SCALAR_PTR
#define DIV_BY_SCALAR_PTR(T) \
void ArithemeticIf<DeviceType::kGPU>::DivByScalarPtr(DeviceCtx* ctx, const int64_t n, \
const T* x, const T* y, T* z) { \
DivByScalarPtrGpu<T> \
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(n, x, y, z); \
}
DIV_BY_SCALAR_PTR(float)
DIV_BY_SCALAR_PTR(double)
DIV_BY_SCALAR_PTR(int8_t)
DIV_BY_SCALAR_PTR(int32_t)
DIV_BY_SCALAR_PTR(int64_t)
#undef DIV_BY_SCALAR_PTR
#define FILL(T) \
void ArithemeticIf<DeviceType::kGPU>::Fill(DeviceCtx* ctx, const int64_t n, const T value, \
T* y) { \
FillGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( \
n, value, y); \
}
FILL(float)
FILL(double)
FILL(int8_t)
FILL(int32_t)
FILL(int64_t)
#undef FILL
void ArithemeticIf<DeviceType::kGPU>::Fill(DeviceCtx* ctx, const int64_t n, const float16 value,
float16* y) {
FillGpu<half><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, float16_2half(value), reinterpret_cast<half*>(y));
}
#define COPY_COLS_REGION(T) \
void ArithemeticIf<DeviceType::kGPU>::CopyColsRegion( \
DeviceCtx* ctx, const int64_t row_num, const int64_t col_num, const T* x, \
const int64_t x_col_offset, const int64_t x_lda, T* y, const int64_t y_col_offset, \
const int64_t y_lda) { \
CopyColsRegionGpu<T><<<BlocksNum4ThreadsNum(row_num* col_num), kCudaThreadsNumPerBlock, 0, \
ctx->cuda_stream()>>>(row_num, col_num, x, x_col_offset, x_lda, y, \
y_col_offset, y_lda); \
}
COPY_COLS_REGION(float)
COPY_COLS_REGION(double)
COPY_COLS_REGION(int8_t)
COPY_COLS_REGION(int32_t)
COPY_COLS_REGION(int64_t)
#undef COPY_COLS_REGION
void ArithemeticIf<DeviceType::kGPU>::CopyColsRegion(DeviceCtx* ctx, const int64_t row_num,
const int64_t col_num, const float16* x,
const int64_t x_col_offset,
const int64_t x_lda, float16* y,
const int64_t y_col_offset,
const int64_t y_lda) {
CopyColsRegionGpu<half>
<<<BlocksNum4ThreadsNum(row_num * col_num), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
row_num, col_num, reinterpret_cast<const half*>(x), x_col_offset, x_lda,
reinterpret_cast<half*>(y), y_col_offset, y_lda);
}
} // namespace oneflow
|
831e3a6281f191ca605835ea52b8e54bcabf5505.hip | // !!! This is a file automatically generated by hipify!!!
/* *******************************************************
* Created By Ethan Kreloff April 4th, 2014.
* *******************************************************
* Based off of code from CSCI 5239/4239 Advanced Computer
* Graphics at the University of Colorado, Boulder.
* *******************************************************
* Uses CUDA GPU computing to parallelize a brute force
* search.
* *******************************************************
* hw10.cu
* ******************************************************/
/*
* CUDA BruteSearch
*/
#include <stdio.h>
#include <stdarg.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <hip/hip_runtime.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <sys/time.h>
#endif
/*
* Return elapsed wall time since last call (seconds)
*/
static double t0=0;
double Elapsed(void)
{
#ifdef _WIN32
// Windows version of wall time
LARGE_INTEGER tv,freq;
QueryPerformanceCounter((LARGE_INTEGER*)&tv);
QueryPerformanceFrequency((LARGE_INTEGER*)&freq);
double t = tv.QuadPart/(double)freq.QuadPart;
#else
// Unix/Linux/OSX version of wall time
struct timeval tv;
gettimeofday(&tv,NULL);
double t = tv.tv_sec+1e-6*tv.tv_usec;
#endif
double s = t-t0;
t0 = t;
return s;
}
/*
* Print message to stderr and exit
*/
void Fatal(const char* format , ...)
{
va_list args;
va_start(args,format);
vfprintf(stderr,format,args);
va_end(args);
exit(1);
}
/*
* Initialize matrix with random values
*/
void RandomInit(float x[],const unsigned int n)
{
for (unsigned int i=0;i<n*n;i++)
x[i] = rand() / (float)RAND_MAX;
}
/*
* Initialize fastest GPU device
*/
int InitGPU(int verbose)
{
// Get number of CUDA devices
int num;
if (hipGetDeviceCount(&num)) Fatal("Cannot get number of CUDA devices\n");
if (num<1) Fatal("No CUDA devices found\n");
// Get fastest device
hipDeviceProp_t prop;
int MaxDevice = -1;
int MaxGflops = -1;
for (int dev=0;dev<num;dev++)
{
if (hipGetDeviceProperties(&prop,dev)) Fatal("Error getting device %d properties\n",dev);
int Gflops = prop.multiProcessorCount * prop.clockRate;
if (verbose) printf("CUDA Device %d: %s Gflops %f Processors %d Threads/Block %d\n",dev,prop.name,1e-6*Gflops,prop.multiProcessorCount,prop.maxThreadsPerBlock);
if(Gflops > MaxGflops)
{
MaxGflops = Gflops;
MaxDevice = dev;
}
}
// Print and set device
if (hipGetDeviceProperties(&prop,MaxDevice)) Fatal("Error getting device %d properties\n",MaxDevice);
printf("Fastest CUDA Device %d: %s\n",MaxDevice,prop.name);
hipSetDevice(MaxDevice);
// Return max thread count
return prop.maxThreadsPerBlock;
}
/*
* C = A * B -- host
*/
void AxBh(float C[], const float A[], const float B[], unsigned int n)
{
for (unsigned int i=0;i<n;i++)
for (unsigned int j=0;j<n;j++)
{
double sum=0;
for (unsigned int k=0;k<n;k++)
sum += (double)A[i*n+k] * (double)B[k*n+j];
C[i*n+j] = (float)sum;
}
}
/*
* Compute one element of A * B
*/
__global__ void AxB(float C[],const float A[],const float B[],const unsigned int n)
{
unsigned int j = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int i = blockIdx.y*blockDim.y+threadIdx.y;
float sum =0;
for (int k=0;k<n;k++)
sum += A[i*n+k] * B[k*n+j];
C[i*n+j] = sum;
}
/*
* C = A * B -- device
*/
void AxBd(float Ch[],const float Ah[],const float Bh[],const unsigned int Bw,const unsigned int Bn)
{
// Calculate matrix dimensions
int n = Bw*Bn;
int N = n*n*sizeof(float);
// Allocate device memory
float* Ad;
float* Bd;
float* Cd;
if (hipMalloc((void**)&Ad,N)) Fatal("Cannot allocate device memory Ad\n");
if (hipMalloc((void**)&Bd,N)) Fatal("Cannot allocate device memory Bd\n");
if (hipMalloc((void**)&Cd,N)) Fatal("Cannot allocate device memory Cd\n");
// Copy A and B from host to device
if (hipMemcpy(Ad,Ah,N,hipMemcpyHostToDevice)) Fatal("Cannot copy A from host to device\n");
if (hipMemcpy(Bd,Bh,N,hipMemcpyHostToDevice)) Fatal("Cannot copy B from host to device\n");
// Set size of block to Bw x Bw, and Bn x Bn blocks
dim3 threads(Bw,Bw);
dim3 grid(Bn,Bn);
// Execute the kernel
hipLaunchKernelGGL(( AxB), dim3(grid),dim3(threads), 0, 0, Cd,Ad,Bd,n);
if (hipGetLastError()) Fatal("AxB failed\n");
// Copy C from device to host
if (hipMemcpy(Ch,Cd,N,hipMemcpyDeviceToHost)) Fatal("Cannot copy C from device to host\n");
// Free device memory
hipFree(Ad);
hipFree(Bd);
hipFree(Cd);
}
/*
* main
*/
int main(int argc, char* argv[])
{
// Process options
int opt;
int verbose=0;
while ((opt=getopt(argc,argv,"v"))!=-1)
{
if (opt=='v')
verbose++;
else
Fatal("Usage: [-v] <block width> <number of blocks>\n");
}
argc -= optind;
argv += optind;
// Get width and number of blocks
if (argc!=2) Fatal("Usage: [-v] <block width> <number of blocks>\n");
int Bw = atoi(argv[0]);
if (Bw<1) Fatal("Block width out of range %d\n",Bw);
int Bn = atoi(argv[1]);
if (Bn<1) Fatal("Number of blocks out of range %d\n",Bn);
// Total width is block times number of blocks
int n = Bw*Bn;
int N = n*n*sizeof(float);
printf("Bw=%d Bn=%d n=%d\n",Bw,Bn,n);
// Initialize GPU
int Mw = InitGPU(verbose);
if (Mw<Bw*Bw) Fatal("Thread count %d exceeds threads per block of %d\n",Bw*Bw,Mw);
// Allocate host matrices A/B/C/R
float* Ah = (float*)malloc(N);
float* Bh = (float*)malloc(N);
float* Ch = (float*)malloc(N);
float* Rh = (float*)malloc(N);
if (!Ah || !Bh || !Ch || !Rh) Fatal("Cannot allocate host memory\n");
// Initialize A & B
srand(9999);
RandomInit(Ah,n);
RandomInit(Bh,n);
// Compute R = AB on host
Elapsed();
AxBh(Rh,Ah,Bh,n);
double Th = Elapsed();
// Compute C = AB on device
Elapsed();
AxBd(Ch,Ah,Bh,Bw,Bn);
double Td = Elapsed();
// Compute difference between R and C
double r2=0;
for (int i=0;i<n*n;i++)
r2 += fabs(Ch[i]-Rh[i]);
r2 /= n*n;
// Free host memory
free(Ah);
free(Bh);
free(Ch);
free(Rh);
// Print results
printf("Host Time = %6.3f s\n",Th);
printf("Device Time = %6.3f s\n",Td);
printf("Speedup = %.1f\n",Th/Td);
printf("Difference = %.2e\n",r2);
// Done
return 0;
}
| 831e3a6281f191ca605835ea52b8e54bcabf5505.cu | /* *******************************************************
* Created By Ethan Kreloff April 4th, 2014.
* *******************************************************
* Based off of code from CSCI 5239/4239 Advanced Computer
* Graphics at the University of Colorado, Boulder.
* *******************************************************
* Uses CUDA GPU computing to parallelize a brute force
* search.
* *******************************************************
* hw10.cu
* ******************************************************/
/*
* CUDA BruteSearch
*/
#include <stdio.h>
#include <stdarg.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <cuda.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <sys/time.h>
#endif
/*
* Return elapsed wall time since last call (seconds)
*/
static double t0=0;
double Elapsed(void)
{
#ifdef _WIN32
// Windows version of wall time
LARGE_INTEGER tv,freq;
QueryPerformanceCounter((LARGE_INTEGER*)&tv);
QueryPerformanceFrequency((LARGE_INTEGER*)&freq);
double t = tv.QuadPart/(double)freq.QuadPart;
#else
// Unix/Linux/OSX version of wall time
struct timeval tv;
gettimeofday(&tv,NULL);
double t = tv.tv_sec+1e-6*tv.tv_usec;
#endif
double s = t-t0;
t0 = t;
return s;
}
/*
* Print message to stderr and exit
*/
void Fatal(const char* format , ...)
{
va_list args;
va_start(args,format);
vfprintf(stderr,format,args);
va_end(args);
exit(1);
}
/*
* Initialize matrix with random values
*/
void RandomInit(float x[],const unsigned int n)
{
for (unsigned int i=0;i<n*n;i++)
x[i] = rand() / (float)RAND_MAX;
}
/*
* Initialize fastest GPU device
*/
int InitGPU(int verbose)
{
// Get number of CUDA devices
int num;
if (cudaGetDeviceCount(&num)) Fatal("Cannot get number of CUDA devices\n");
if (num<1) Fatal("No CUDA devices found\n");
// Get fastest device
cudaDeviceProp prop;
int MaxDevice = -1;
int MaxGflops = -1;
for (int dev=0;dev<num;dev++)
{
if (cudaGetDeviceProperties(&prop,dev)) Fatal("Error getting device %d properties\n",dev);
int Gflops = prop.multiProcessorCount * prop.clockRate;
if (verbose) printf("CUDA Device %d: %s Gflops %f Processors %d Threads/Block %d\n",dev,prop.name,1e-6*Gflops,prop.multiProcessorCount,prop.maxThreadsPerBlock);
if(Gflops > MaxGflops)
{
MaxGflops = Gflops;
MaxDevice = dev;
}
}
// Print and set device
if (cudaGetDeviceProperties(&prop,MaxDevice)) Fatal("Error getting device %d properties\n",MaxDevice);
printf("Fastest CUDA Device %d: %s\n",MaxDevice,prop.name);
cudaSetDevice(MaxDevice);
// Return max thread count
return prop.maxThreadsPerBlock;
}
/*
* C = A * B -- host
*/
void AxBh(float C[], const float A[], const float B[], unsigned int n)
{
for (unsigned int i=0;i<n;i++)
for (unsigned int j=0;j<n;j++)
{
double sum=0;
for (unsigned int k=0;k<n;k++)
sum += (double)A[i*n+k] * (double)B[k*n+j];
C[i*n+j] = (float)sum;
}
}
/*
* Compute one element of A * B
*/
__global__ void AxB(float C[],const float A[],const float B[],const unsigned int n)
{
unsigned int j = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int i = blockIdx.y*blockDim.y+threadIdx.y;
float sum =0;
for (int k=0;k<n;k++)
sum += A[i*n+k] * B[k*n+j];
C[i*n+j] = sum;
}
/*
* C = A * B -- device
*/
void AxBd(float Ch[],const float Ah[],const float Bh[],const unsigned int Bw,const unsigned int Bn)
{
// Calculate matrix dimensions
int n = Bw*Bn;
int N = n*n*sizeof(float);
// Allocate device memory
float* Ad;
float* Bd;
float* Cd;
if (cudaMalloc((void**)&Ad,N)) Fatal("Cannot allocate device memory Ad\n");
if (cudaMalloc((void**)&Bd,N)) Fatal("Cannot allocate device memory Bd\n");
if (cudaMalloc((void**)&Cd,N)) Fatal("Cannot allocate device memory Cd\n");
// Copy A and B from host to device
if (cudaMemcpy(Ad,Ah,N,cudaMemcpyHostToDevice)) Fatal("Cannot copy A from host to device\n");
if (cudaMemcpy(Bd,Bh,N,cudaMemcpyHostToDevice)) Fatal("Cannot copy B from host to device\n");
// Set size of block to Bw x Bw, and Bn x Bn blocks
dim3 threads(Bw,Bw);
dim3 grid(Bn,Bn);
// Execute the kernel
AxB<<<grid,threads>>>(Cd,Ad,Bd,n);
if (cudaGetLastError()) Fatal("AxB failed\n");
// Copy C from device to host
if (cudaMemcpy(Ch,Cd,N,cudaMemcpyDeviceToHost)) Fatal("Cannot copy C from device to host\n");
// Free device memory
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
}
/*
* main
*/
int main(int argc, char* argv[])
{
// Process options
int opt;
int verbose=0;
while ((opt=getopt(argc,argv,"v"))!=-1)
{
if (opt=='v')
verbose++;
else
Fatal("Usage: [-v] <block width> <number of blocks>\n");
}
argc -= optind;
argv += optind;
// Get width and number of blocks
if (argc!=2) Fatal("Usage: [-v] <block width> <number of blocks>\n");
int Bw = atoi(argv[0]);
if (Bw<1) Fatal("Block width out of range %d\n",Bw);
int Bn = atoi(argv[1]);
if (Bn<1) Fatal("Number of blocks out of range %d\n",Bn);
// Total width is block times number of blocks
int n = Bw*Bn;
int N = n*n*sizeof(float);
printf("Bw=%d Bn=%d n=%d\n",Bw,Bn,n);
// Initialize GPU
int Mw = InitGPU(verbose);
if (Mw<Bw*Bw) Fatal("Thread count %d exceeds threads per block of %d\n",Bw*Bw,Mw);
// Allocate host matrices A/B/C/R
float* Ah = (float*)malloc(N);
float* Bh = (float*)malloc(N);
float* Ch = (float*)malloc(N);
float* Rh = (float*)malloc(N);
if (!Ah || !Bh || !Ch || !Rh) Fatal("Cannot allocate host memory\n");
// Initialize A & B
srand(9999);
RandomInit(Ah,n);
RandomInit(Bh,n);
// Compute R = AB on host
Elapsed();
AxBh(Rh,Ah,Bh,n);
double Th = Elapsed();
// Compute C = AB on device
Elapsed();
AxBd(Ch,Ah,Bh,Bw,Bn);
double Td = Elapsed();
// Compute difference between R and C
double r2=0;
for (int i=0;i<n*n;i++)
r2 += fabs(Ch[i]-Rh[i]);
r2 /= n*n;
// Free host memory
free(Ah);
free(Bh);
free(Ch);
free(Rh);
// Print results
printf("Host Time = %6.3f s\n",Th);
printf("Device Time = %6.3f s\n",Td);
printf("Speedup = %.1f\n",Th/Td);
printf("Difference = %.2e\n",r2);
// Done
return 0;
}
|
54f52f6a2ed20dac53d7bc17653b401d6151d108.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#include <chrono>
#include <vector>
#include <algorithm>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
using namespace std;
/* ----- BEGIN Shared Library Export ----- */
// taken from http://stackoverflow.com/questions/2164827/explicitly-exporting-shared-library-functions-in-linux
#if defined(_MSC_VER)
// Microsoft
#define EXPORT __declspec(dllexport)
#define IMPORT __declspec(dllimport)
#elif defined(_GCC)
// GCC
#define EXPORT __attribute__((visibility("default")))
#define IMPORT
#else
// do nothing and hope for the best?
#define EXPORT
#define IMPORT
#pragma warning Unknown dynamic link import/export semantics.
#endif
/* ----- END Shared Library Export ----- */
/* ----- BEGIN Class Type ----- */
typedef int obj_id_t;
typedef int class_id_t;
/* ----- END Class Type ----- */
/* ----- BEGIN Environment (lexical variables) ----- */
// environment_struct must be defined later
typedef struct environment_struct environment_t;
/* ----- END Environment (lexical variables) ----- */
/* ----- BEGIN Forward declarations ----- */
typedef struct result_t result_t;
/* ----- END Forward declarations ----- */
// Define program result variable. Also contains benchmark numbers.
result_t *program_result;
// Variables for measuring time
chrono::high_resolution_clock::time_point start_time;
chrono::high_resolution_clock::time_point end_time;
/* ----- BEGIN Macros ----- */
#define timeStartMeasure() start_time = chrono::high_resolution_clock::now();
#define timeReportMeasure(result_var, variable_name) \
end_time = chrono::high_resolution_clock::now(); \
result_var->time_##variable_name = result_var->time_##variable_name + chrono::duration_cast<chrono::microseconds>(end_time - start_time).count();
/* ----- END Macros ----- */
struct indexed_struct_4_lt_int_int_int_int_gt_t
{
int field_0;
int field_1;
int field_2;
int field_3;
};
/* ----- BEGIN Structs ----- */
struct variable_size_array_t {
void *content;
int size;
variable_size_array_t(void *content_ = NULL, int size_ = 0) : content(content_), size(size_) { };
static const variable_size_array_t error_return_value;
};
// error_return_value is used in case a host section terminates abnormally
const variable_size_array_t variable_size_array_t::error_return_value =
variable_size_array_t(NULL, 0);
/* ----- BEGIN Union Type ----- */
typedef union union_type_value {
obj_id_t object_id;
int int_;
float float_;
bool bool_;
void *pointer;
variable_size_array_t variable_size_array;
__host__ __device__ union_type_value(int value) : int_(value) { };
__host__ __device__ union_type_value(float value) : float_(value) { };
__host__ __device__ union_type_value(bool value) : bool_(value) { };
__host__ __device__ union_type_value(void *value) : pointer(value) { };
__host__ __device__ union_type_value(variable_size_array_t value) : variable_size_array(value) { };
__host__ __device__ static union_type_value from_object_id(obj_id_t value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_int(int value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_float(float value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_bool(bool value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_pointer(void *value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_variable_size_array_t(variable_size_array_t value)
{
return union_type_value(value);
}
} union_v_t;
typedef struct union_type_struct
{
class_id_t class_id;
union_v_t value;
__host__ __device__ union_type_struct(
class_id_t class_id_ = 0, union_v_t value_ = union_v_t(0))
: class_id(class_id_), value(value_) { };
static const union_type_struct error_return_value;
} union_t;
// error_return_value is used in case a host section terminates abnormally
const union_type_struct union_t::error_return_value = union_type_struct(0, union_v_t(0));
/* ----- END Union Type ----- */
typedef struct result_t {
variable_size_array_t result;
int last_error;
uint64_t time_setup_cuda;
uint64_t time_prepare_env;
uint64_t time_kernel;
uint64_t time_free_memory;
uint64_t time_transfer_memory;
uint64_t time_allocate_memory;
// Memory management
vector<void*> *device_allocations;
} result_t;
/* ----- END Structs ----- */
struct environment_struct
{
};
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((indices.field_0 + indices.field_1)) % ((((indices.field_3 + ({ int _temp_var_125 = ((indices.field_1 % 4));
(_temp_var_125 == 0 ? indices.field_0 : (_temp_var_125 == 1 ? indices.field_1 : (_temp_var_125 == 2 ? indices.field_2 : (_temp_var_125 == 3 ? indices.field_3 : NULL)))); }))) + 7)));
}
}
#endif
__global__ void kernel_149(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
// TODO: There should be a better to check if _block_k_3_ is already defined
#ifndef _block_k_3__func
#define _block_k_3__func
__device__ int _block_k_3_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_126 = ((({ int _temp_var_127 = ((({ int _temp_var_128 = ((values[2] % 4));
(_temp_var_128 == 0 ? indices.field_0 : (_temp_var_128 == 1 ? indices.field_1 : (_temp_var_128 == 2 ? indices.field_2 : (_temp_var_128 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_127 == 0 ? indices.field_0 : (_temp_var_127 == 1 ? indices.field_1 : (_temp_var_127 == 2 ? indices.field_2 : (_temp_var_127 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_126 == 0 ? indices.field_0 : (_temp_var_126 == 1 ? indices.field_1 : (_temp_var_126 == 2 ? indices.field_2 : (_temp_var_126 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_147(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_150)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_151;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_151 = _block_k_3_(_env_, _kernel_result_150[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_150[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_150[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_150[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_151 = 37;
}
_result_[_tid_] = temp_stencil_151;
}
}
// TODO: There should be a better to check if _block_k_5_ is already defined
#ifndef _block_k_5__func
#define _block_k_5__func
__device__ int _block_k_5_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_129 = ((({ int _temp_var_130 = ((({ int _temp_var_131 = ((values[2] % 4));
(_temp_var_131 == 0 ? indices.field_0 : (_temp_var_131 == 1 ? indices.field_1 : (_temp_var_131 == 2 ? indices.field_2 : (_temp_var_131 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_130 == 0 ? indices.field_0 : (_temp_var_130 == 1 ? indices.field_1 : (_temp_var_130 == 2 ? indices.field_2 : (_temp_var_130 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_129 == 0 ? indices.field_0 : (_temp_var_129 == 1 ? indices.field_1 : (_temp_var_129 == 2 ? indices.field_2 : (_temp_var_129 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_145(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_148)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_152;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_152 = _block_k_5_(_env_, _kernel_result_148[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_148[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_148[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_148[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_152 = 37;
}
_result_[_tid_] = temp_stencil_152;
}
}
// TODO: There should be a better to check if _block_k_7_ is already defined
#ifndef _block_k_7__func
#define _block_k_7__func
__device__ int _block_k_7_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_132 = ((({ int _temp_var_133 = ((({ int _temp_var_134 = ((values[2] % 4));
(_temp_var_134 == 0 ? indices.field_0 : (_temp_var_134 == 1 ? indices.field_1 : (_temp_var_134 == 2 ? indices.field_2 : (_temp_var_134 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_133 == 0 ? indices.field_0 : (_temp_var_133 == 1 ? indices.field_1 : (_temp_var_133 == 2 ? indices.field_2 : (_temp_var_133 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_132 == 0 ? indices.field_0 : (_temp_var_132 == 1 ? indices.field_1 : (_temp_var_132 == 2 ? indices.field_2 : (_temp_var_132 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_143(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_146)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_153;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_153 = _block_k_7_(_env_, _kernel_result_146[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_146[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_146[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_146[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_153 = 37;
}
_result_[_tid_] = temp_stencil_153;
}
}
// TODO: There should be a better to check if _block_k_9_ is already defined
#ifndef _block_k_9__func
#define _block_k_9__func
__device__ int _block_k_9_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_135 = ((({ int _temp_var_136 = ((({ int _temp_var_137 = ((values[2] % 4));
(_temp_var_137 == 0 ? indices.field_0 : (_temp_var_137 == 1 ? indices.field_1 : (_temp_var_137 == 2 ? indices.field_2 : (_temp_var_137 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_136 == 0 ? indices.field_0 : (_temp_var_136 == 1 ? indices.field_1 : (_temp_var_136 == 2 ? indices.field_2 : (_temp_var_136 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_135 == 0 ? indices.field_0 : (_temp_var_135 == 1 ? indices.field_1 : (_temp_var_135 == 2 ? indices.field_2 : (_temp_var_135 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_141(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_144)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_154;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_154 = _block_k_9_(_env_, _kernel_result_144[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_144[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_144[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_144[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_154 = 37;
}
_result_[_tid_] = temp_stencil_154;
}
}
// TODO: There should be a better to check if _block_k_11_ is already defined
#ifndef _block_k_11__func
#define _block_k_11__func
__device__ int _block_k_11_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_138 = ((({ int _temp_var_139 = ((({ int _temp_var_140 = ((values[2] % 4));
(_temp_var_140 == 0 ? indices.field_0 : (_temp_var_140 == 1 ? indices.field_1 : (_temp_var_140 == 2 ? indices.field_2 : (_temp_var_140 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_139 == 0 ? indices.field_0 : (_temp_var_139 == 1 ? indices.field_1 : (_temp_var_139 == 2 ? indices.field_2 : (_temp_var_139 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_138 == 0 ? indices.field_0 : (_temp_var_138 == 1 ? indices.field_1 : (_temp_var_138 == 2 ? indices.field_2 : (_temp_var_138 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_139(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_142)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_155;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_155 = _block_k_11_(_env_, _kernel_result_142[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_142[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_142[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_142[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_155 = 37;
}
_result_[_tid_] = temp_stencil_155;
}
}
// TODO: There should be a better to check if _block_k_13_ is already defined
#ifndef _block_k_13__func
#define _block_k_13__func
__device__ int _block_k_13_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_141 = ((({ int _temp_var_142 = ((({ int _temp_var_143 = ((values[2] % 4));
(_temp_var_143 == 0 ? indices.field_0 : (_temp_var_143 == 1 ? indices.field_1 : (_temp_var_143 == 2 ? indices.field_2 : (_temp_var_143 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_142 == 0 ? indices.field_0 : (_temp_var_142 == 1 ? indices.field_1 : (_temp_var_142 == 2 ? indices.field_2 : (_temp_var_142 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_141 == 0 ? indices.field_0 : (_temp_var_141 == 1 ? indices.field_1 : (_temp_var_141 == 2 ? indices.field_2 : (_temp_var_141 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_137(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_140)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_156;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_156 = _block_k_13_(_env_, _kernel_result_140[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_140[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_140[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_140[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_156 = 37;
}
_result_[_tid_] = temp_stencil_156;
}
}
// TODO: There should be a better to check if _block_k_15_ is already defined
#ifndef _block_k_15__func
#define _block_k_15__func
__device__ int _block_k_15_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_144 = ((({ int _temp_var_145 = ((({ int _temp_var_146 = ((values[2] % 4));
(_temp_var_146 == 0 ? indices.field_0 : (_temp_var_146 == 1 ? indices.field_1 : (_temp_var_146 == 2 ? indices.field_2 : (_temp_var_146 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_145 == 0 ? indices.field_0 : (_temp_var_145 == 1 ? indices.field_1 : (_temp_var_145 == 2 ? indices.field_2 : (_temp_var_145 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_144 == 0 ? indices.field_0 : (_temp_var_144 == 1 ? indices.field_1 : (_temp_var_144 == 2 ? indices.field_2 : (_temp_var_144 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_135(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_138)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_157;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_157 = _block_k_15_(_env_, _kernel_result_138[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_138[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_138[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_138[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_157 = 37;
}
_result_[_tid_] = temp_stencil_157;
}
}
// TODO: There should be a better to check if _block_k_17_ is already defined
#ifndef _block_k_17__func
#define _block_k_17__func
__device__ int _block_k_17_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_147 = ((({ int _temp_var_148 = ((({ int _temp_var_149 = ((values[2] % 4));
(_temp_var_149 == 0 ? indices.field_0 : (_temp_var_149 == 1 ? indices.field_1 : (_temp_var_149 == 2 ? indices.field_2 : (_temp_var_149 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_148 == 0 ? indices.field_0 : (_temp_var_148 == 1 ? indices.field_1 : (_temp_var_148 == 2 ? indices.field_2 : (_temp_var_148 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_147 == 0 ? indices.field_0 : (_temp_var_147 == 1 ? indices.field_1 : (_temp_var_147 == 2 ? indices.field_2 : (_temp_var_147 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_133(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_136)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_158;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_158 = _block_k_17_(_env_, _kernel_result_136[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_136[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_136[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_136[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_158 = 37;
}
_result_[_tid_] = temp_stencil_158;
}
}
// TODO: There should be a better to check if _block_k_19_ is already defined
#ifndef _block_k_19__func
#define _block_k_19__func
__device__ int _block_k_19_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_150 = ((({ int _temp_var_151 = ((({ int _temp_var_152 = ((values[2] % 4));
(_temp_var_152 == 0 ? indices.field_0 : (_temp_var_152 == 1 ? indices.field_1 : (_temp_var_152 == 2 ? indices.field_2 : (_temp_var_152 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_151 == 0 ? indices.field_0 : (_temp_var_151 == 1 ? indices.field_1 : (_temp_var_151 == 2 ? indices.field_2 : (_temp_var_151 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_150 == 0 ? indices.field_0 : (_temp_var_150 == 1 ? indices.field_1 : (_temp_var_150 == 2 ? indices.field_2 : (_temp_var_150 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_131(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_134)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_159;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_159 = _block_k_19_(_env_, _kernel_result_134[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_134[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_134[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_134[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_159 = 37;
}
_result_[_tid_] = temp_stencil_159;
}
}
// TODO: There should be a better to check if _block_k_21_ is already defined
#ifndef _block_k_21__func
#define _block_k_21__func
__device__ int _block_k_21_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_153 = ((({ int _temp_var_154 = ((({ int _temp_var_155 = ((values[2] % 4));
(_temp_var_155 == 0 ? indices.field_0 : (_temp_var_155 == 1 ? indices.field_1 : (_temp_var_155 == 2 ? indices.field_2 : (_temp_var_155 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_154 == 0 ? indices.field_0 : (_temp_var_154 == 1 ? indices.field_1 : (_temp_var_154 == 2 ? indices.field_2 : (_temp_var_154 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_153 == 0 ? indices.field_0 : (_temp_var_153 == 1 ? indices.field_1 : (_temp_var_153 == 2 ? indices.field_2 : (_temp_var_153 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_129(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_132)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_160;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_160 = _block_k_21_(_env_, _kernel_result_132[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_132[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_132[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_132[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_160 = 37;
}
_result_[_tid_] = temp_stencil_160;
}
}
#undef checkErrorReturn
#define checkErrorReturn(result_var, expr) \
if (result_var->last_error = expr) \
{\
hipError_t error = hipGetLastError();\
printf("!!! Cuda Failure %s:%d (%i): '%s'\n", __FILE__, __LINE__, expr, hipGetErrorString(error));\
hipDeviceReset();\
return result_var;\
}
extern "C" EXPORT result_t *launch_kernel(environment_t *host_env)
{
// CUDA Initialization
program_result = new result_t();
program_result->device_allocations = new vector<void*>();
timeStartMeasure();
hipError_t cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
program_result->last_error = -1;
return program_result;
}
checkErrorReturn(program_result, hipFree(0));
timeReportMeasure(program_result, setup_cuda);
/* Prepare environment */
/* Allocate device environment and copy over struct */
environment_t *dev_env;
timeStartMeasure();
checkErrorReturn(program_result, hipMalloc(&dev_env, sizeof(environment_t)));
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
checkErrorReturn(program_result, hipMemcpy(dev_env, host_env, sizeof(environment_t), hipMemcpyHostToDevice));
timeReportMeasure(program_result, transfer_memory);
/* Launch all kernels */
timeStartMeasure();
int * _kernel_result_150;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_150, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_150);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_149), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_150);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_148;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_148, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_148);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_147), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_148, _kernel_result_150);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_146;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_146, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_146);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_145), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_146, _kernel_result_148);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_144;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_144, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_144);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_143), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_144, _kernel_result_146);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_142;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_142, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_142);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_141), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_142, _kernel_result_144);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_140;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_140, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_140);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_139), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_140, _kernel_result_142);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_138;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_138, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_138);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_137), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_138, _kernel_result_140);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_136;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_136, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_136);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_135), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_136, _kernel_result_138);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_134;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_134, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_134);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_133), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_134, _kernel_result_136);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_132;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_132, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_132);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_131), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_132, _kernel_result_134);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_130;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_130, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_130);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_129), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_130, _kernel_result_132);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
/* Copy over result to the host */
program_result->result = ({
variable_size_array_t device_array = variable_size_array_t((void *) _kernel_result_130, 10000000);
int * tmp_result = (int *) malloc(sizeof(int) * device_array.size);
timeStartMeasure();
checkErrorReturn(program_result, hipMemcpy(tmp_result, device_array.content, sizeof(int) * device_array.size, hipMemcpyDeviceToHost));
timeReportMeasure(program_result, transfer_memory);
variable_size_array_t((void *) tmp_result, device_array.size);
});
/* Free device memory */
timeStartMeasure();
checkErrorReturn(program_result, hipFree(_kernel_result_150));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, hipFree(_kernel_result_148));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, hipFree(_kernel_result_146));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, hipFree(_kernel_result_144));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, hipFree(_kernel_result_142));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, hipFree(_kernel_result_140));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, hipFree(_kernel_result_138));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, hipFree(_kernel_result_136));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, hipFree(_kernel_result_134));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, hipFree(_kernel_result_132));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, hipFree(_kernel_result_130));
timeReportMeasure(program_result, free_memory);
delete program_result->device_allocations;
return program_result;
}
| 54f52f6a2ed20dac53d7bc17653b401d6151d108.cu | #include <stdio.h>
#include <assert.h>
#include <chrono>
#include <vector>
#include <algorithm>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
using namespace std;
/* ----- BEGIN Shared Library Export ----- */
// taken from http://stackoverflow.com/questions/2164827/explicitly-exporting-shared-library-functions-in-linux
#if defined(_MSC_VER)
// Microsoft
#define EXPORT __declspec(dllexport)
#define IMPORT __declspec(dllimport)
#elif defined(_GCC)
// GCC
#define EXPORT __attribute__((visibility("default")))
#define IMPORT
#else
// do nothing and hope for the best?
#define EXPORT
#define IMPORT
#pragma warning Unknown dynamic link import/export semantics.
#endif
/* ----- END Shared Library Export ----- */
/* ----- BEGIN Class Type ----- */
typedef int obj_id_t;
typedef int class_id_t;
/* ----- END Class Type ----- */
/* ----- BEGIN Environment (lexical variables) ----- */
// environment_struct must be defined later
typedef struct environment_struct environment_t;
/* ----- END Environment (lexical variables) ----- */
/* ----- BEGIN Forward declarations ----- */
typedef struct result_t result_t;
/* ----- END Forward declarations ----- */
// Define program result variable. Also contains benchmark numbers.
result_t *program_result;
// Variables for measuring time
chrono::high_resolution_clock::time_point start_time;
chrono::high_resolution_clock::time_point end_time;
/* ----- BEGIN Macros ----- */
#define timeStartMeasure() start_time = chrono::high_resolution_clock::now();
#define timeReportMeasure(result_var, variable_name) \
end_time = chrono::high_resolution_clock::now(); \
result_var->time_##variable_name = result_var->time_##variable_name + chrono::duration_cast<chrono::microseconds>(end_time - start_time).count();
/* ----- END Macros ----- */
struct indexed_struct_4_lt_int_int_int_int_gt_t
{
int field_0;
int field_1;
int field_2;
int field_3;
};
/* ----- BEGIN Structs ----- */
struct variable_size_array_t {
void *content;
int size;
variable_size_array_t(void *content_ = NULL, int size_ = 0) : content(content_), size(size_) { };
static const variable_size_array_t error_return_value;
};
// error_return_value is used in case a host section terminates abnormally
const variable_size_array_t variable_size_array_t::error_return_value =
variable_size_array_t(NULL, 0);
/* ----- BEGIN Union Type ----- */
typedef union union_type_value {
obj_id_t object_id;
int int_;
float float_;
bool bool_;
void *pointer;
variable_size_array_t variable_size_array;
__host__ __device__ union_type_value(int value) : int_(value) { };
__host__ __device__ union_type_value(float value) : float_(value) { };
__host__ __device__ union_type_value(bool value) : bool_(value) { };
__host__ __device__ union_type_value(void *value) : pointer(value) { };
__host__ __device__ union_type_value(variable_size_array_t value) : variable_size_array(value) { };
__host__ __device__ static union_type_value from_object_id(obj_id_t value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_int(int value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_float(float value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_bool(bool value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_pointer(void *value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_variable_size_array_t(variable_size_array_t value)
{
return union_type_value(value);
}
} union_v_t;
typedef struct union_type_struct
{
class_id_t class_id;
union_v_t value;
__host__ __device__ union_type_struct(
class_id_t class_id_ = 0, union_v_t value_ = union_v_t(0))
: class_id(class_id_), value(value_) { };
static const union_type_struct error_return_value;
} union_t;
// error_return_value is used in case a host section terminates abnormally
const union_type_struct union_t::error_return_value = union_type_struct(0, union_v_t(0));
/* ----- END Union Type ----- */
typedef struct result_t {
variable_size_array_t result;
int last_error;
uint64_t time_setup_cuda;
uint64_t time_prepare_env;
uint64_t time_kernel;
uint64_t time_free_memory;
uint64_t time_transfer_memory;
uint64_t time_allocate_memory;
// Memory management
vector<void*> *device_allocations;
} result_t;
/* ----- END Structs ----- */
struct environment_struct
{
};
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((indices.field_0 + indices.field_1)) % ((((indices.field_3 + ({ int _temp_var_125 = ((indices.field_1 % 4));
(_temp_var_125 == 0 ? indices.field_0 : (_temp_var_125 == 1 ? indices.field_1 : (_temp_var_125 == 2 ? indices.field_2 : (_temp_var_125 == 3 ? indices.field_3 : NULL)))); }))) + 7)));
}
}
#endif
__global__ void kernel_149(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
// TODO: There should be a better to check if _block_k_3_ is already defined
#ifndef _block_k_3__func
#define _block_k_3__func
__device__ int _block_k_3_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_126 = ((({ int _temp_var_127 = ((({ int _temp_var_128 = ((values[2] % 4));
(_temp_var_128 == 0 ? indices.field_0 : (_temp_var_128 == 1 ? indices.field_1 : (_temp_var_128 == 2 ? indices.field_2 : (_temp_var_128 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_127 == 0 ? indices.field_0 : (_temp_var_127 == 1 ? indices.field_1 : (_temp_var_127 == 2 ? indices.field_2 : (_temp_var_127 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_126 == 0 ? indices.field_0 : (_temp_var_126 == 1 ? indices.field_1 : (_temp_var_126 == 2 ? indices.field_2 : (_temp_var_126 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_147(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_150)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_151;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_151 = _block_k_3_(_env_, _kernel_result_150[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_150[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_150[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_150[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_151 = 37;
}
_result_[_tid_] = temp_stencil_151;
}
}
// TODO: There should be a better to check if _block_k_5_ is already defined
#ifndef _block_k_5__func
#define _block_k_5__func
__device__ int _block_k_5_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_129 = ((({ int _temp_var_130 = ((({ int _temp_var_131 = ((values[2] % 4));
(_temp_var_131 == 0 ? indices.field_0 : (_temp_var_131 == 1 ? indices.field_1 : (_temp_var_131 == 2 ? indices.field_2 : (_temp_var_131 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_130 == 0 ? indices.field_0 : (_temp_var_130 == 1 ? indices.field_1 : (_temp_var_130 == 2 ? indices.field_2 : (_temp_var_130 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_129 == 0 ? indices.field_0 : (_temp_var_129 == 1 ? indices.field_1 : (_temp_var_129 == 2 ? indices.field_2 : (_temp_var_129 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_145(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_148)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_152;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_152 = _block_k_5_(_env_, _kernel_result_148[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_148[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_148[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_148[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_152 = 37;
}
_result_[_tid_] = temp_stencil_152;
}
}
// TODO: There should be a better to check if _block_k_7_ is already defined
#ifndef _block_k_7__func
#define _block_k_7__func
__device__ int _block_k_7_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_132 = ((({ int _temp_var_133 = ((({ int _temp_var_134 = ((values[2] % 4));
(_temp_var_134 == 0 ? indices.field_0 : (_temp_var_134 == 1 ? indices.field_1 : (_temp_var_134 == 2 ? indices.field_2 : (_temp_var_134 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_133 == 0 ? indices.field_0 : (_temp_var_133 == 1 ? indices.field_1 : (_temp_var_133 == 2 ? indices.field_2 : (_temp_var_133 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_132 == 0 ? indices.field_0 : (_temp_var_132 == 1 ? indices.field_1 : (_temp_var_132 == 2 ? indices.field_2 : (_temp_var_132 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_143(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_146)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_153;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_153 = _block_k_7_(_env_, _kernel_result_146[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_146[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_146[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_146[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_153 = 37;
}
_result_[_tid_] = temp_stencil_153;
}
}
// TODO: There should be a better to check if _block_k_9_ is already defined
#ifndef _block_k_9__func
#define _block_k_9__func
__device__ int _block_k_9_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_135 = ((({ int _temp_var_136 = ((({ int _temp_var_137 = ((values[2] % 4));
(_temp_var_137 == 0 ? indices.field_0 : (_temp_var_137 == 1 ? indices.field_1 : (_temp_var_137 == 2 ? indices.field_2 : (_temp_var_137 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_136 == 0 ? indices.field_0 : (_temp_var_136 == 1 ? indices.field_1 : (_temp_var_136 == 2 ? indices.field_2 : (_temp_var_136 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_135 == 0 ? indices.field_0 : (_temp_var_135 == 1 ? indices.field_1 : (_temp_var_135 == 2 ? indices.field_2 : (_temp_var_135 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_141(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_144)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_154;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_154 = _block_k_9_(_env_, _kernel_result_144[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_144[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_144[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_144[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_154 = 37;
}
_result_[_tid_] = temp_stencil_154;
}
}
// TODO: There should be a better to check if _block_k_11_ is already defined
#ifndef _block_k_11__func
#define _block_k_11__func
__device__ int _block_k_11_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_138 = ((({ int _temp_var_139 = ((({ int _temp_var_140 = ((values[2] % 4));
(_temp_var_140 == 0 ? indices.field_0 : (_temp_var_140 == 1 ? indices.field_1 : (_temp_var_140 == 2 ? indices.field_2 : (_temp_var_140 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_139 == 0 ? indices.field_0 : (_temp_var_139 == 1 ? indices.field_1 : (_temp_var_139 == 2 ? indices.field_2 : (_temp_var_139 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_138 == 0 ? indices.field_0 : (_temp_var_138 == 1 ? indices.field_1 : (_temp_var_138 == 2 ? indices.field_2 : (_temp_var_138 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_139(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_142)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_155;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_155 = _block_k_11_(_env_, _kernel_result_142[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_142[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_142[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_142[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_155 = 37;
}
_result_[_tid_] = temp_stencil_155;
}
}
// TODO: There should be a better to check if _block_k_13_ is already defined
#ifndef _block_k_13__func
#define _block_k_13__func
__device__ int _block_k_13_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_141 = ((({ int _temp_var_142 = ((({ int _temp_var_143 = ((values[2] % 4));
(_temp_var_143 == 0 ? indices.field_0 : (_temp_var_143 == 1 ? indices.field_1 : (_temp_var_143 == 2 ? indices.field_2 : (_temp_var_143 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_142 == 0 ? indices.field_0 : (_temp_var_142 == 1 ? indices.field_1 : (_temp_var_142 == 2 ? indices.field_2 : (_temp_var_142 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_141 == 0 ? indices.field_0 : (_temp_var_141 == 1 ? indices.field_1 : (_temp_var_141 == 2 ? indices.field_2 : (_temp_var_141 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_137(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_140)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_156;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_156 = _block_k_13_(_env_, _kernel_result_140[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_140[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_140[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_140[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_156 = 37;
}
_result_[_tid_] = temp_stencil_156;
}
}
// TODO: There should be a better to check if _block_k_15_ is already defined
#ifndef _block_k_15__func
#define _block_k_15__func
__device__ int _block_k_15_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_144 = ((({ int _temp_var_145 = ((({ int _temp_var_146 = ((values[2] % 4));
(_temp_var_146 == 0 ? indices.field_0 : (_temp_var_146 == 1 ? indices.field_1 : (_temp_var_146 == 2 ? indices.field_2 : (_temp_var_146 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_145 == 0 ? indices.field_0 : (_temp_var_145 == 1 ? indices.field_1 : (_temp_var_145 == 2 ? indices.field_2 : (_temp_var_145 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_144 == 0 ? indices.field_0 : (_temp_var_144 == 1 ? indices.field_1 : (_temp_var_144 == 2 ? indices.field_2 : (_temp_var_144 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_135(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_138)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_157;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_157 = _block_k_15_(_env_, _kernel_result_138[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_138[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_138[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_138[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_157 = 37;
}
_result_[_tid_] = temp_stencil_157;
}
}
// TODO: There should be a better to check if _block_k_17_ is already defined
#ifndef _block_k_17__func
#define _block_k_17__func
__device__ int _block_k_17_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_147 = ((({ int _temp_var_148 = ((({ int _temp_var_149 = ((values[2] % 4));
(_temp_var_149 == 0 ? indices.field_0 : (_temp_var_149 == 1 ? indices.field_1 : (_temp_var_149 == 2 ? indices.field_2 : (_temp_var_149 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_148 == 0 ? indices.field_0 : (_temp_var_148 == 1 ? indices.field_1 : (_temp_var_148 == 2 ? indices.field_2 : (_temp_var_148 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_147 == 0 ? indices.field_0 : (_temp_var_147 == 1 ? indices.field_1 : (_temp_var_147 == 2 ? indices.field_2 : (_temp_var_147 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_133(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_136)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_158;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_158 = _block_k_17_(_env_, _kernel_result_136[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_136[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_136[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_136[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_158 = 37;
}
_result_[_tid_] = temp_stencil_158;
}
}
// TODO: There should be a better to check if _block_k_19_ is already defined
#ifndef _block_k_19__func
#define _block_k_19__func
__device__ int _block_k_19_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_150 = ((({ int _temp_var_151 = ((({ int _temp_var_152 = ((values[2] % 4));
(_temp_var_152 == 0 ? indices.field_0 : (_temp_var_152 == 1 ? indices.field_1 : (_temp_var_152 == 2 ? indices.field_2 : (_temp_var_152 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_151 == 0 ? indices.field_0 : (_temp_var_151 == 1 ? indices.field_1 : (_temp_var_151 == 2 ? indices.field_2 : (_temp_var_151 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_150 == 0 ? indices.field_0 : (_temp_var_150 == 1 ? indices.field_1 : (_temp_var_150 == 2 ? indices.field_2 : (_temp_var_150 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_131(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_134)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_159;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_159 = _block_k_19_(_env_, _kernel_result_134[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_134[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_134[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_134[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_159 = 37;
}
_result_[_tid_] = temp_stencil_159;
}
}
// TODO: There should be a better to check if _block_k_21_ is already defined
#ifndef _block_k_21__func
#define _block_k_21__func
__device__ int _block_k_21_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_153 = ((({ int _temp_var_154 = ((({ int _temp_var_155 = ((values[2] % 4));
(_temp_var_155 == 0 ? indices.field_0 : (_temp_var_155 == 1 ? indices.field_1 : (_temp_var_155 == 2 ? indices.field_2 : (_temp_var_155 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_154 == 0 ? indices.field_0 : (_temp_var_154 == 1 ? indices.field_1 : (_temp_var_154 == 2 ? indices.field_2 : (_temp_var_154 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_153 == 0 ? indices.field_0 : (_temp_var_153 == 1 ? indices.field_1 : (_temp_var_153 == 2 ? indices.field_2 : (_temp_var_153 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_129(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_132)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_160;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_160 = _block_k_21_(_env_, _kernel_result_132[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_132[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_132[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_132[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_160 = 37;
}
_result_[_tid_] = temp_stencil_160;
}
}
#undef checkErrorReturn
#define checkErrorReturn(result_var, expr) \
if (result_var->last_error = expr) \
{\
cudaError_t error = cudaGetLastError();\
printf("!!! Cuda Failure %s:%d (%i): '%s'\n", __FILE__, __LINE__, expr, cudaGetErrorString(error));\
cudaDeviceReset();\
return result_var;\
}
extern "C" EXPORT result_t *launch_kernel(environment_t *host_env)
{
// CUDA Initialization
program_result = new result_t();
program_result->device_allocations = new vector<void*>();
timeStartMeasure();
cudaError_t cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
program_result->last_error = -1;
return program_result;
}
checkErrorReturn(program_result, cudaFree(0));
timeReportMeasure(program_result, setup_cuda);
/* Prepare environment */
/* Allocate device environment and copy over struct */
environment_t *dev_env;
timeStartMeasure();
checkErrorReturn(program_result, cudaMalloc(&dev_env, sizeof(environment_t)));
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
checkErrorReturn(program_result, cudaMemcpy(dev_env, host_env, sizeof(environment_t), cudaMemcpyHostToDevice));
timeReportMeasure(program_result, transfer_memory);
/* Launch all kernels */
timeStartMeasure();
int * _kernel_result_150;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_150, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_150);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_149<<<39063, 256>>>(dev_env, 10000000, _kernel_result_150);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_148;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_148, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_148);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_147<<<39063, 256>>>(dev_env, 10000000, _kernel_result_148, _kernel_result_150);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_146;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_146, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_146);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_145<<<39063, 256>>>(dev_env, 10000000, _kernel_result_146, _kernel_result_148);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_144;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_144, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_144);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_143<<<39063, 256>>>(dev_env, 10000000, _kernel_result_144, _kernel_result_146);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_142;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_142, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_142);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_141<<<39063, 256>>>(dev_env, 10000000, _kernel_result_142, _kernel_result_144);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_140;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_140, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_140);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_139<<<39063, 256>>>(dev_env, 10000000, _kernel_result_140, _kernel_result_142);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_138;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_138, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_138);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_137<<<39063, 256>>>(dev_env, 10000000, _kernel_result_138, _kernel_result_140);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_136;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_136, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_136);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_135<<<39063, 256>>>(dev_env, 10000000, _kernel_result_136, _kernel_result_138);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_134;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_134, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_134);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_133<<<39063, 256>>>(dev_env, 10000000, _kernel_result_134, _kernel_result_136);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_132;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_132, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_132);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_131<<<39063, 256>>>(dev_env, 10000000, _kernel_result_132, _kernel_result_134);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_130;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_130, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_130);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_129<<<39063, 256>>>(dev_env, 10000000, _kernel_result_130, _kernel_result_132);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
/* Copy over result to the host */
program_result->result = ({
variable_size_array_t device_array = variable_size_array_t((void *) _kernel_result_130, 10000000);
int * tmp_result = (int *) malloc(sizeof(int) * device_array.size);
timeStartMeasure();
checkErrorReturn(program_result, cudaMemcpy(tmp_result, device_array.content, sizeof(int) * device_array.size, cudaMemcpyDeviceToHost));
timeReportMeasure(program_result, transfer_memory);
variable_size_array_t((void *) tmp_result, device_array.size);
});
/* Free device memory */
timeStartMeasure();
checkErrorReturn(program_result, cudaFree(_kernel_result_150));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, cudaFree(_kernel_result_148));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, cudaFree(_kernel_result_146));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, cudaFree(_kernel_result_144));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, cudaFree(_kernel_result_142));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, cudaFree(_kernel_result_140));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, cudaFree(_kernel_result_138));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, cudaFree(_kernel_result_136));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, cudaFree(_kernel_result_134));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, cudaFree(_kernel_result_132));
timeReportMeasure(program_result, free_memory);
timeStartMeasure();
checkErrorReturn(program_result, cudaFree(_kernel_result_130));
timeReportMeasure(program_result, free_memory);
delete program_result->device_allocations;
return program_result;
}
|
411e9fabe198e54fb18cf7d1c85a72bb36542d1b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <math_constants.h>
#include <hip/hip_runtime.h>
/* #define N 20000 */
/* #define GRID_D1 20 */
/* #define GRID_D2 2 */
/* #define BLOCK_D1 512 */
/* #define BLOCK_D2 1 */
/* #define BLOCK_D3 1 */
extern "C"
{
__global__ void
rtruncnorm_kernel(float *vals, int n,
float *mu, float *sigma,
float *lo, float *hi,
int mu_len, int sigma_len,
int lo_len, int hi_len,
int maxtries,
int rng_a, //RNG seed constant
int rng_b, //RNG seed constant
int rng_c) //RNG seed constant
{
// Notes/Hints from class
// i.e. threadIdx.x .y .z map these to a single index
//
// Check whether idx < N
//
// Initialize RNG
hiprandState_t rng;
hiprand_init(rng_a*idx+rng_b,rng_c,0,&rng);
// Sample the truncated normal
// mu for this index is mu[idx]
// sigma for this index is sigma[idx]
// a for this index is a[idx]
// b for this index is b[idx]
// X_i ~ Truncated-Normal(mu_i,sigma_i;[a_i,b_i])
// Sample N(mu, sigma^2):
x[idx] = mu[idx] + sigma[idx]*hiprand_normal(&rng);
// To get the random uniform hiprand_uniform(&rng)
return;
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
// Copied this from hello_world
if (idx < N){
printf("Hello world! My block index is (%d,%d) [Grid dims=(%d,%d)], 3D-thread index within block=(%d,%d,%d) => thread index=%d\n", blockIdx.x, blockIdx.y, gridDim.x, gridDim.y, threadIdx.x, threadIdx.y, threadIdx.y, idx);
} else {
printf("Hello world! My block index is (%d,%d) [Grid dims=(%d,%d)], 3D-thread index within block=(%d,%d,%d) => thread index=%d [### this thread would not be used for N=%d ###]\n", blockIdx.x, blockIdx.y, gridDim.x, gridDim.y, threadIdx.x, threadIdx.y, threadIdx.y, idx, N);
}
// Setup the RNG:
// Sample:
return;
}
} // END extern "C"
| 411e9fabe198e54fb18cf7d1c85a72bb36542d1b.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <math_constants.h>
#include <cuda_runtime.h>
/* #define N 20000 */
/* #define GRID_D1 20 */
/* #define GRID_D2 2 */
/* #define BLOCK_D1 512 */
/* #define BLOCK_D2 1 */
/* #define BLOCK_D3 1 */
extern "C"
{
__global__ void
rtruncnorm_kernel(float *vals, int n,
float *mu, float *sigma,
float *lo, float *hi,
int mu_len, int sigma_len,
int lo_len, int hi_len,
int maxtries,
int rng_a, //RNG seed constant
int rng_b, //RNG seed constant
int rng_c) //RNG seed constant
{
// Notes/Hints from class
// i.e. threadIdx.x .y .z map these to a single index
//
// Check whether idx < N
//
// Initialize RNG
curandState rng;
curand_init(rng_a*idx+rng_b,rng_c,0,&rng);
// Sample the truncated normal
// mu for this index is mu[idx]
// sigma for this index is sigma[idx]
// a for this index is a[idx]
// b for this index is b[idx]
// X_i ~ Truncated-Normal(mu_i,sigma_i;[a_i,b_i])
// Sample N(mu, sigma^2):
x[idx] = mu[idx] + sigma[idx]*curand_normal(&rng);
// To get the random uniform curand_uniform(&rng)
return;
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
// Copied this from hello_world
if (idx < N){
printf("Hello world! My block index is (%d,%d) [Grid dims=(%d,%d)], 3D-thread index within block=(%d,%d,%d) => thread index=%d\n", blockIdx.x, blockIdx.y, gridDim.x, gridDim.y, threadIdx.x, threadIdx.y, threadIdx.y, idx);
} else {
printf("Hello world! My block index is (%d,%d) [Grid dims=(%d,%d)], 3D-thread index within block=(%d,%d,%d) => thread index=%d [### this thread would not be used for N=%d ###]\n", blockIdx.x, blockIdx.y, gridDim.x, gridDim.y, threadIdx.x, threadIdx.y, threadIdx.y, idx, N);
}
// Setup the RNG:
// Sample:
return;
}
} // END extern "C"
|
bfb07396897444014fa6599e7354e344453ab0e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/memory/memory.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/stack_kernel.h"
namespace phi {
template <typename T, typename IntType>
__global__ void StackCUDAKernel(T** input_ptrs,
int split_size,
int rows,
int cols,
T* __restrict__ output) {
IntType grid_x = blockIdx.x * blockDim.x + threadIdx.x;
for (; grid_x < cols; grid_x += blockDim.x * gridDim.x) {
IntType grid_y = blockIdx.y * blockDim.y + threadIdx.y;
IntType split = grid_x / split_size;
const T* input_ptr = input_ptrs[split];
IntType col_offset = grid_x % split_size;
#pragma unroll
for (; grid_y < rows; grid_y += blockDim.y * gridDim.y) {
output[grid_y * cols + grid_x] =
input_ptr[grid_y * split_size + col_offset];
}
}
}
template <typename T, typename Context>
void StackKernel(const Context& dev_ctx,
const std::vector<const DenseTensor*>& x,
int axis,
DenseTensor* out) {
if (axis < 0) axis += (x[0]->dims().size() + 1);
int n = static_cast<int>(x.size());
T* y_data = dev_ctx.template Alloc<T>(out);
std::vector<const T*> x_datas(n);
for (int i = 0; i < n; i++) {
x_datas[i] = x[i]->data<T>();
}
auto tmp_x_data = paddle::memory::Alloc(dev_ctx, x_datas.size() * sizeof(T*));
paddle::memory::Copy(dev_ctx.GetPlace(),
tmp_x_data->ptr(),
phi::CPUPlace(),
reinterpret_cast<void*>(x_datas.data()),
x_datas.size() * sizeof(T*),
dev_ctx.stream());
// Split x dim from axis to matrix
int x_row = 1, x_col = 1;
for (int i = 0; i < axis; ++i) {
x_row *= x[0]->dims()[i];
}
x_col = x[0]->numel() / x_row;
int out_col = x_col * n;
auto config =
phi::backends::gpu::GetGpuLaunchConfig2D(dev_ctx, out_col, x_row);
if (out->numel() < std::numeric_limits<int32_t>::max()) {
hipLaunchKernelGGL(( StackCUDAKernel<T, int32_t>)
, dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), reinterpret_cast<T**>(tmp_x_data->ptr()),
x_col,
x_row,
out_col,
y_data);
} else {
hipLaunchKernelGGL(( StackCUDAKernel<T, int64_t>)
, dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), reinterpret_cast<T**>(tmp_x_data->ptr()),
x_col,
x_row,
out_col,
y_data);
}
}
} // namespace phi
PD_REGISTER_KERNEL(stack,
GPU,
ALL_LAYOUT,
phi::StackKernel,
float,
double,
int64_t,
int,
phi::dtype::float16,
phi::dtype::bfloat16) {}
| bfb07396897444014fa6599e7354e344453ab0e0.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/memory/memory.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/stack_kernel.h"
namespace phi {
template <typename T, typename IntType>
__global__ void StackCUDAKernel(T** input_ptrs,
int split_size,
int rows,
int cols,
T* __restrict__ output) {
IntType grid_x = blockIdx.x * blockDim.x + threadIdx.x;
for (; grid_x < cols; grid_x += blockDim.x * gridDim.x) {
IntType grid_y = blockIdx.y * blockDim.y + threadIdx.y;
IntType split = grid_x / split_size;
const T* input_ptr = input_ptrs[split];
IntType col_offset = grid_x % split_size;
#pragma unroll
for (; grid_y < rows; grid_y += blockDim.y * gridDim.y) {
output[grid_y * cols + grid_x] =
input_ptr[grid_y * split_size + col_offset];
}
}
}
template <typename T, typename Context>
void StackKernel(const Context& dev_ctx,
const std::vector<const DenseTensor*>& x,
int axis,
DenseTensor* out) {
if (axis < 0) axis += (x[0]->dims().size() + 1);
int n = static_cast<int>(x.size());
T* y_data = dev_ctx.template Alloc<T>(out);
std::vector<const T*> x_datas(n);
for (int i = 0; i < n; i++) {
x_datas[i] = x[i]->data<T>();
}
auto tmp_x_data = paddle::memory::Alloc(dev_ctx, x_datas.size() * sizeof(T*));
paddle::memory::Copy(dev_ctx.GetPlace(),
tmp_x_data->ptr(),
phi::CPUPlace(),
reinterpret_cast<void*>(x_datas.data()),
x_datas.size() * sizeof(T*),
dev_ctx.stream());
// Split x dim from axis to matrix
int x_row = 1, x_col = 1;
for (int i = 0; i < axis; ++i) {
x_row *= x[0]->dims()[i];
}
x_col = x[0]->numel() / x_row;
int out_col = x_col * n;
auto config =
phi::backends::gpu::GetGpuLaunchConfig2D(dev_ctx, out_col, x_row);
if (out->numel() < std::numeric_limits<int32_t>::max()) {
StackCUDAKernel<T, int32_t>
<<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(reinterpret_cast<T**>(tmp_x_data->ptr()),
x_col,
x_row,
out_col,
y_data);
} else {
StackCUDAKernel<T, int64_t>
<<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(reinterpret_cast<T**>(tmp_x_data->ptr()),
x_col,
x_row,
out_col,
y_data);
}
}
} // namespace phi
PD_REGISTER_KERNEL(stack,
GPU,
ALL_LAYOUT,
phi::StackKernel,
float,
double,
int64_t,
int,
phi::dtype::float16,
phi::dtype::bfloat16) {}
|
ebc232238523654a7ae929aa79aaae95252ac5db.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/serialize/tensor.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
namespace {
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
// The number of cuda threads to use. 512 is used for backward compatibility
constexpr int ROI_CUDA_NUM_THREADS = 512;
// The maximum number of blocks to use in the default kernel call.
constexpr int ROI_MAXIMUM_NUM_BLOCKS = 4096;
/**
* @brief Compute the number of blocks needed to run N threads.
*/
inline int ROI_GET_BLOCKS(const int N) {
return ::max(
::min(
(N + ROI_CUDA_NUM_THREADS - 1) / ROI_CUDA_NUM_THREADS,
ROI_MAXIMUM_NUM_BLOCKS),
// Use at least 1 block, since CUDA does not allow empty block
1);
}
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForwardKernel(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T* w1,
T* w2,
T* w3,
T* w4,
int* x_low,
int* x_high,
int* y_low,
int* y_high,
const int /*index*/ /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
*w1 = *w2 = *w3 = *w4 = 0.;
*x_low = *x_high = *y_low = *y_high = -1;
return;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
*y_low = static_cast<int>(y);
*x_low = static_cast<int>(x);
if (*y_low >= height - 1) {
*y_high = *y_low = height - 1;
y = (T)*y_low;
} else {
*y_high = *y_low + 1;
}
if (*x_low >= width - 1) {
*x_high = *x_low = width - 1;
x = (T)*x_low;
} else {
*x_high = *x_low + 1;
}
T ly = y - *y_low;
T lx = x - *x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
*w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx;
return;
}
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__ float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <>
inline __device__ double gpu_atomic_add(const double val, double* address) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull;
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return val;
}
template <typename T>
__global__ void RoIAlignBackwardKernel(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
&w1,
&w2,
&w3,
&w4,
&x_low,
&x_high,
&y_low,
&y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
/*
atomicAdd(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
*/
gpu_atomic_add(
static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low);
gpu_atomic_add(
static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high);
gpu_atomic_add(
static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low);
gpu_atomic_add(
static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high);
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
} // namespace
at::Tensor ROIAlign_Forward_CUDA(
const at::Tensor input,
const at::Tensor rois,
int64_t pooled_height,
int64_t pooled_width,
double spatial_scale,
int64_t sampling_ratio) {
AT_ASSERT(input.is_contiguous());
AT_ASSERT(rois.is_contiguous());
AT_ASSERT(input.ndimension() == 4);
AT_ASSERT(rois.ndimension() == 2);
AT_ASSERT(rois.size(1) == 5);
auto proposals = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
// Output Tensor is (num_rois, C, pooled_height, pooled_width)
auto output = at::zeros({proposals, channels, pooled_height, pooled_width}, input.options());
auto count = output.numel();
AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_Forward_CUDA", ([&] {
hipLaunchKernelGGL(( RoIAlignForwardKernel<scalar_t>)
, dim3(ROI_GET_BLOCKS(count)),
dim3(ROI_CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
input.data<scalar_t>(),
static_cast<scalar_t>(spatial_scale),
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.data<scalar_t>(),
output.data<scalar_t>());
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return output;
}
at::Tensor ROIAlign_Backward_CUDA(
const at::Tensor rois,
const at::Tensor grad_output,
int64_t b_size,
int64_t channels,
int64_t height,
int64_t width,
int64_t pooled_height,
int64_t pooled_width,
double spatial_scale,
int64_t sampling_ratio) {
AT_ASSERT(rois.is_contiguous());
AT_ASSERT(rois.ndimension() == 2);
AT_ASSERT(rois.size(1) == 5);
auto roi_cols = rois.size(1);
AT_ASSERT(roi_cols == 4 || roi_cols == 5);
// Output Tensor is (num_rois, C, pooled_height, pooled_width)
// gradient wrt input features
auto grad_in = at::zeros({b_size, channels, height, width}, rois.options());
auto num_rois = rois.size(0);
auto count = grad_output.numel();
AT_DISPATCH_FLOATING_TYPES(rois.type(), "ROIAlign_Backward_CUDA", ([&] {
hipLaunchKernelGGL(( RoIAlignBackwardKernel<scalar_t>)
, dim3(ROI_GET_BLOCKS(count)),
dim3(ROI_CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
grad_output.data<scalar_t>(),
num_rois,
static_cast<scalar_t>(spatial_scale),
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_in.data<scalar_t>(),
rois.data<scalar_t>());
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return grad_in;
}
| ebc232238523654a7ae929aa79aaae95252ac5db.cu | #include <torch/serialize/tensor.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
namespace {
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
// The number of cuda threads to use. 512 is used for backward compatibility
constexpr int ROI_CUDA_NUM_THREADS = 512;
// The maximum number of blocks to use in the default kernel call.
constexpr int ROI_MAXIMUM_NUM_BLOCKS = 4096;
/**
* @brief Compute the number of blocks needed to run N threads.
*/
inline int ROI_GET_BLOCKS(const int N) {
return std::max(
std::min(
(N + ROI_CUDA_NUM_THREADS - 1) / ROI_CUDA_NUM_THREADS,
ROI_MAXIMUM_NUM_BLOCKS),
// Use at least 1 block, since CUDA does not allow empty block
1);
}
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForwardKernel(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T* w1,
T* w2,
T* w3,
T* w4,
int* x_low,
int* x_high,
int* y_low,
int* y_high,
const int /*index*/ /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
*w1 = *w2 = *w3 = *w4 = 0.;
*x_low = *x_high = *y_low = *y_high = -1;
return;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
*y_low = static_cast<int>(y);
*x_low = static_cast<int>(x);
if (*y_low >= height - 1) {
*y_high = *y_low = height - 1;
y = (T)*y_low;
} else {
*y_high = *y_low + 1;
}
if (*x_low >= width - 1) {
*x_high = *x_low = width - 1;
x = (T)*x_low;
} else {
*x_high = *x_low + 1;
}
T ly = y - *y_low;
T lx = x - *x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
*w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx;
return;
}
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__ float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <>
inline __device__ double gpu_atomic_add(const double val, double* address) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull;
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return val;
}
template <typename T>
__global__ void RoIAlignBackwardKernel(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
&w1,
&w2,
&w3,
&w4,
&x_low,
&x_high,
&y_low,
&y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
/*
atomicAdd(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
*/
gpu_atomic_add(
static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low);
gpu_atomic_add(
static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high);
gpu_atomic_add(
static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low);
gpu_atomic_add(
static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high);
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
} // namespace
at::Tensor ROIAlign_Forward_CUDA(
const at::Tensor input,
const at::Tensor rois,
int64_t pooled_height,
int64_t pooled_width,
double spatial_scale,
int64_t sampling_ratio) {
AT_ASSERT(input.is_contiguous());
AT_ASSERT(rois.is_contiguous());
AT_ASSERT(input.ndimension() == 4);
AT_ASSERT(rois.ndimension() == 2);
AT_ASSERT(rois.size(1) == 5);
auto proposals = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
// Output Tensor is (num_rois, C, pooled_height, pooled_width)
auto output = at::zeros({proposals, channels, pooled_height, pooled_width}, input.options());
auto count = output.numel();
AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_Forward_CUDA", ([&] {
RoIAlignForwardKernel<scalar_t>
<<<ROI_GET_BLOCKS(count),
ROI_CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
count,
input.data<scalar_t>(),
static_cast<scalar_t>(spatial_scale),
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.data<scalar_t>(),
output.data<scalar_t>());
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return output;
}
at::Tensor ROIAlign_Backward_CUDA(
const at::Tensor rois,
const at::Tensor grad_output,
int64_t b_size,
int64_t channels,
int64_t height,
int64_t width,
int64_t pooled_height,
int64_t pooled_width,
double spatial_scale,
int64_t sampling_ratio) {
AT_ASSERT(rois.is_contiguous());
AT_ASSERT(rois.ndimension() == 2);
AT_ASSERT(rois.size(1) == 5);
auto roi_cols = rois.size(1);
AT_ASSERT(roi_cols == 4 || roi_cols == 5);
// Output Tensor is (num_rois, C, pooled_height, pooled_width)
// gradient wrt input features
auto grad_in = at::zeros({b_size, channels, height, width}, rois.options());
auto num_rois = rois.size(0);
auto count = grad_output.numel();
AT_DISPATCH_FLOATING_TYPES(rois.type(), "ROIAlign_Backward_CUDA", ([&] {
RoIAlignBackwardKernel<scalar_t>
<<<ROI_GET_BLOCKS(count),
ROI_CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
count,
grad_output.data<scalar_t>(),
num_rois,
static_cast<scalar_t>(spatial_scale),
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_in.data<scalar_t>(),
rois.data<scalar_t>());
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return grad_in;
}
|
745903e1b8262cad07b6f583e297d2424097dec2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zgecsrmv.cu normal z -> d, Fri Jan 30 19:00:28 2015
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 256
#else
#define BLOCK_SIZE 256
#endif
// CSR-SpMV kernel
__global__ void
dgecsrmv_kernel(
int num_rows,
int num_cols,
double alpha,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double * dx,
double beta,
double * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
double dot = MAGMA_D_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * dx[ dcolind[j] ];
dy[ row ] = dot *alpha + beta * dy[ row ];
}
}
// shifted CSR-SpMV kernel
__global__ void
dgecsrmv_kernel_shift(
int num_rows,
int num_cols,
double alpha,
double lambda,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double * dx,
double beta,
int offset,
int blocksize,
magma_index_t * addrows,
double * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
double dot = MAGMA_D_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * dx[ dcolind[j] ];
if( row<blocksize )
dy[ row ] = dot * alpha - lambda
* dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda
* dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
The input format is CSR (val, row, col).
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
alpha double
scalar multiplier
@param[in]
dval magmaDouble_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[out]
dy magmaDouble_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgecsrmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDouble_ptr dx,
double beta,
magmaDouble_ptr dy,
magma_queue_t queue )
{
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( dgecsrmv_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, alpha, dval, drowptr, dcolind, dx, beta, dy);
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU.
It is a shifted version of the CSR-SpMV.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
alpha double
scalar multiplier
@param[in]
lambda double
scalar multiplier
@param[in]
dval magmaDouble_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaDouble_ptr
output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgecsrmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
double alpha,
double lambda,
magmaDouble_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDouble_ptr dx,
double beta,
int offset,
int blocksize,
magma_index_t * addrows,
magmaDouble_ptr dy,
magma_queue_t queue )
{
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( dgecsrmv_kernel_shift), dim3(grid), dim3(threads), 0, queue ,
m, n, alpha, lambda, dval, drowptr, dcolind, dx,
beta, offset, blocksize, addrows, dy);
return MAGMA_SUCCESS;
}
| 745903e1b8262cad07b6f583e297d2424097dec2.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zgecsrmv.cu normal z -> d, Fri Jan 30 19:00:28 2015
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 256
#else
#define BLOCK_SIZE 256
#endif
// CSR-SpMV kernel
__global__ void
dgecsrmv_kernel(
int num_rows,
int num_cols,
double alpha,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double * dx,
double beta,
double * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
double dot = MAGMA_D_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * dx[ dcolind[j] ];
dy[ row ] = dot *alpha + beta * dy[ row ];
}
}
// shifted CSR-SpMV kernel
__global__ void
dgecsrmv_kernel_shift(
int num_rows,
int num_cols,
double alpha,
double lambda,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double * dx,
double beta,
int offset,
int blocksize,
magma_index_t * addrows,
double * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
double dot = MAGMA_D_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * dx[ dcolind[j] ];
if( row<blocksize )
dy[ row ] = dot * alpha - lambda
* dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda
* dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
The input format is CSR (val, row, col).
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
alpha double
scalar multiplier
@param[in]
dval magmaDouble_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[out]
dy magmaDouble_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgecsrmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDouble_ptr dx,
double beta,
magmaDouble_ptr dy,
magma_queue_t queue )
{
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
magma_int_t threads = BLOCK_SIZE;
dgecsrmv_kernel<<< grid, threads, 0, queue >>>
(m, n, alpha, dval, drowptr, dcolind, dx, beta, dy);
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU.
It is a shifted version of the CSR-SpMV.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
alpha double
scalar multiplier
@param[in]
lambda double
scalar multiplier
@param[in]
dval magmaDouble_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaDouble_ptr
output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgecsrmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
double alpha,
double lambda,
magmaDouble_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDouble_ptr dx,
double beta,
int offset,
int blocksize,
magma_index_t * addrows,
magmaDouble_ptr dy,
magma_queue_t queue )
{
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
magma_int_t threads = BLOCK_SIZE;
dgecsrmv_kernel_shift<<< grid, threads, 0, queue >>>
(m, n, alpha, lambda, dval, drowptr, dcolind, dx,
beta, offset, blocksize, addrows, dy);
return MAGMA_SUCCESS;
}
|
5c8280cf0d6e759bf8115cc74c2ce781412cd9c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "shared.h"
#include "efficient.h"
#include "device_launch_parameters.h"
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
//#define blockSize 128
int* dev_idata_shared;
int* padded_idata_shared;
namespace StreamCompaction {
namespace Shared {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
void printArray(int n, int *a, bool abridged = false) {
printf(" [ ");
for (int i = 0; i < n; i++) {
if (abridged && i + 2 == 15 && n > 16) {
i = n - 2;
printf("... ");
}
printf("%3d ", a[i]);
}
printf("]\n");
}
__global__ void upSweep(int n, int d, int* A) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
int stride = 1 << (d + 1);
int other_index = 1 << d;
if ((index) % stride == 0) {
A[index + stride - 1] += A[index + other_index - 1];
}
}
__global__ void downSweep(int n, int d, int* A) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
int left_index = 1 << (d);
int right_index = 1 << (d + 1);
if (index % right_index == 0) {
int temp = A[index + left_index - 1];
A[index + left_index - 1] = A[index + right_index - 1];
A[index + right_index - 1] += temp;
}
}
__global__ void scan_array(int n, int* A, int* B, int* intermediate) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
int BLOCKSIZE = blockDim.x;
__shared__ int s[512];
int tid = threadIdx.x;
//Copy Global Memory to Shared
s[tid] = A[threadIdx.x + (blockIdx.x * BLOCKSIZE)];
int iterations = ilog2ceil(BLOCKSIZE);
//Up Sweep
for (int d = 0; d < ilog2(BLOCKSIZE); d++) {
__syncthreads();
int stride = 1 << (d + 1);
int other_index = 1 << d;
if ((tid) % stride == 0) {
s[tid + stride - 1] += s[tid + other_index - 1];
}
}
//__syncthreads();
if (threadIdx.x == BLOCKSIZE - 1) {
//Add last element of block (upsweep) to intermediate array
intermediate[blockIdx.x] = s[BLOCKSIZE - 1];
s[BLOCKSIZE - 1] = 0;
}
//__syncthreads();
//Down Sweep
for (int d = iterations - 1; d >= 0; d--) {
__syncthreads();
int left_index = 1 << (d);
int right_index = 1 << (d + 1);
if (tid % right_index == 0) {
int temp = s[tid + left_index - 1];
s[tid + left_index - 1] = s[tid + right_index - 1];
s[tid + right_index - 1] += temp;
}
}
//Copy Result Back to Global Memory
B[threadIdx.x + (blockIdx.x * BLOCKSIZE)] = s[threadIdx.x];
}
__global__ void scan_array_old(int n, int* A) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
int iterations = ilog2ceil(n);
//Up Sweep
for (int d = 0; d < ilog2(n); d++) {
__syncthreads();
int stride = 1 << (d + 1);
int other_index = 1 << d;
if ((index) % stride == 0) {
A[index + stride - 1] += A[index + other_index - 1];
}
}
__syncthreads();
//Down Sweep
A[n - 1] = 0;
for (int d = iterations - 1; d >= 0; d--) {
__syncthreads();
int left_index = 1 << (d);
int right_index = 1 << (d + 1);
if (index % right_index == 0) {
int temp = A[index + left_index - 1];
A[index + left_index - 1] = A[index + right_index - 1];
A[index + right_index - 1] += temp;
}
}
}
__global__ void merge(int n, int* A, int* intermediate) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
if(blockIdx.x > 0)
A[index] = A[index] + intermediate[blockIdx.x];
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata, int blockSize) {
int* dev_odata;
int* dev_intermediate;
int* dev_intermediate_scan;
int padded_size = 1 << (ilog2ceil(n));
int numberOfBlocks = (padded_size + blockSize - 1) / blockSize;
hipMalloc((void**)&padded_idata_shared, padded_size * sizeof(int));
checkCUDAErrorWithLine("hipMalloc padded_idata_shared failed!");
hipMalloc((void**)&dev_odata, padded_size * sizeof(int));
checkCUDAErrorWithLine("hipMalloc padded_idata_shared failed!");
hipMalloc((void**)&dev_intermediate, numberOfBlocks * sizeof(int));
checkCUDAErrorWithLine("hipMalloc padded_idata_shared failed!");
hipMalloc((void**)&dev_intermediate_scan, numberOfBlocks * sizeof(int));
checkCUDAErrorWithLine("hipMalloc padded_idata_shared failed!");
hipMemset(padded_idata_shared, 0, padded_size * sizeof(int));
hipMemcpy(padded_idata_shared, idata, sizeof(int) * n, hipMemcpyHostToDevice);
bool caught = false;
try {
timer().startGpuTimer();
}
catch (const std::exception& e) {
caught = true;
}
dim3 fullBlocksPerGrid(numberOfBlocks);
scan_array << <fullBlocksPerGrid, blockSize >> > (padded_size, padded_idata_shared, dev_odata, dev_intermediate);
int* inter = (int*)malloc(numberOfBlocks * sizeof(int));
/*printf("Number: %d", numberOfBlocks);
printf("\nINTERMEDIATE\n");
hipMemcpy(inter, dev_intermediate, sizeof(int) * numberOfBlocks, hipMemcpyDeviceToHost);
printArray(numberOfBlocks, inter, true);*/
//Scan the intermediate array (without shared memory in GPU)
StreamCompaction::Efficient::scan_device(numberOfBlocks, dev_intermediate_scan, dev_intermediate, blockSize);
int* inter2 = (int*)malloc(numberOfBlocks * sizeof(int));
/*printf("INTERMEDIATE SCAN\n");
hipMemcpy(inter2, dev_intermediate_scan, sizeof(int) * numberOfBlocks, hipMemcpyDeviceToHost);
printArray(numberOfBlocks, inter2, true);*/
//Add the elements of dev_intermediate to dev_odata
merge << <fullBlocksPerGrid, blockSize >> > (padded_size, dev_odata, dev_intermediate_scan);
//scan_array_old << <fullBlocksPerGrid, blockSize >> > (padded_size, padded_idata_shared);
if (!caught) {
timer().endGpuTimer();
}
hipMemcpy(odata, dev_odata, sizeof(int) * padded_size, hipMemcpyDeviceToHost);
/*hipFree(padded_idata_shared);
hipFree(dev_odata);
hipFree(dev_intermediate);
hipFree(dev_intermediate_scan);*/
}
//void scan_device(int n, int *odata, const int *idata, int blockSize) {
// int padded_size = 1 << (ilog2ceil(n));
// hipMalloc((void**)&padded_idata_shared, padded_size * sizeof(int));
// checkCUDAErrorWithLine("hipMalloc padded_idata_shared failed!");
// hipMemset(padded_idata_shared, 0, padded_size * sizeof(int));
// hipMemcpy(padded_idata_shared, idata, sizeof(int) * n, hipMemcpyDeviceToDevice);
// bool caught = false;
// try {
// timer().startGpuTimer();
// }
// catch (const std::exception& e) {
// caught = true;
// }
// int iterations = ilog2(padded_size);
// dim3 fullBlocksPerGrid((padded_size + blockSize - 1) / blockSize);
// bool optimized = false;
// //Up-Sweep
// if (optimized) {
// int number_of_threads = padded_size;
// for (int d = 0; d < iterations; d++) {
// number_of_threads /= 2;
// dim3 fullBlocksPerGridUpSweep((number_of_threads + blockSize - 1) / blockSize);
// upSweepOptimized << <fullBlocksPerGridUpSweep, blockSize >> > (padded_size, d, padded_idata_shared);
// }
// }
// else {
// for (int d = 0; d < iterations; d++) {
// dim3 fullBlocksPerGrid((padded_size + blockSize - 1) / blockSize);
// upSweep << <fullBlocksPerGrid, blockSize >> > (padded_size, d, padded_idata_shared);
// }
// }
// //Down-Sweep
// hipMemset(padded_idata_shared + (padded_size - 1), 0, sizeof(int));
// if (optimized) {
// int number_of_threads = 1;
// for (int d = iterations - 1; d >= 0; d--) {
// dim3 fullBlocksPerGridDownSweep((number_of_threads + blockSize - 1) / blockSize);
// downSweepOptimized << <fullBlocksPerGridDownSweep, blockSize >> > (padded_size, d, padded_idata_shared);
// number_of_threads *= 2;
// }
// }
// else {
// for (int d = iterations - 1; d >= 0; d--) {
// dim3 fullBlocksPerGrid((padded_size + blockSize - 1) / blockSize);
// downSweep << <fullBlocksPerGrid, blockSize >> > (padded_size, d, padded_idata_shared);
// }
// }
// if (!caught) {
// timer().endGpuTimer();
// }
// hipMemcpy(odata, padded_idata_shared, sizeof(int) * n, hipMemcpyDeviceToDevice);
//}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata, int blockSize) {
hipMalloc((void**)&dev_idata_shared, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_idata_shared failed!");
hipMemcpy(dev_idata_shared, idata, sizeof(int) * n, hipMemcpyHostToDevice);
int* dev_bools;
hipMalloc((void**)&dev_bools, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_bools failed!");
int* bools;
bools = (int*)malloc(n * sizeof(int));
int* indices;
indices = (int*)malloc(n * sizeof(int));
int *dev_indices;
hipMalloc((void**)&dev_indices, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_bools failed!");
timer().startGpuTimer();
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
StreamCompaction::Common::kernMapToBoolean << <fullBlocksPerGrid, blockSize >> > (n, dev_bools, dev_idata_shared);
hipMemcpy(bools, dev_bools, sizeof(int) * n, hipMemcpyDeviceToHost);
scan(n, indices, bools, blockSize);
int output_length = indices[n - 1] + bools[n - 1];
hipMemcpy(dev_indices, indices, sizeof(int) * n, hipMemcpyHostToDevice);
int *dev_odata;
hipMalloc((void**)&dev_odata, output_length * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_odata failed!");
StreamCompaction::Common::kernScatter << <fullBlocksPerGrid, blockSize >> > (n, dev_odata, dev_idata_shared, dev_bools, dev_indices);
hipMemcpy(odata, dev_odata, sizeof(int) * output_length, hipMemcpyDeviceToHost);
timer().endGpuTimer();
return output_length;
}
}
}
| 5c8280cf0d6e759bf8115cc74c2ce781412cd9c0.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "shared.h"
#include "efficient.h"
#include "device_launch_parameters.h"
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
//#define blockSize 128
int* dev_idata_shared;
int* padded_idata_shared;
namespace StreamCompaction {
namespace Shared {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
void printArray(int n, int *a, bool abridged = false) {
printf(" [ ");
for (int i = 0; i < n; i++) {
if (abridged && i + 2 == 15 && n > 16) {
i = n - 2;
printf("... ");
}
printf("%3d ", a[i]);
}
printf("]\n");
}
__global__ void upSweep(int n, int d, int* A) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
int stride = 1 << (d + 1);
int other_index = 1 << d;
if ((index) % stride == 0) {
A[index + stride - 1] += A[index + other_index - 1];
}
}
__global__ void downSweep(int n, int d, int* A) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
int left_index = 1 << (d);
int right_index = 1 << (d + 1);
if (index % right_index == 0) {
int temp = A[index + left_index - 1];
A[index + left_index - 1] = A[index + right_index - 1];
A[index + right_index - 1] += temp;
}
}
__global__ void scan_array(int n, int* A, int* B, int* intermediate) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
int BLOCKSIZE = blockDim.x;
__shared__ int s[512];
int tid = threadIdx.x;
//Copy Global Memory to Shared
s[tid] = A[threadIdx.x + (blockIdx.x * BLOCKSIZE)];
int iterations = ilog2ceil(BLOCKSIZE);
//Up Sweep
for (int d = 0; d < ilog2(BLOCKSIZE); d++) {
__syncthreads();
int stride = 1 << (d + 1);
int other_index = 1 << d;
if ((tid) % stride == 0) {
s[tid + stride - 1] += s[tid + other_index - 1];
}
}
//__syncthreads();
if (threadIdx.x == BLOCKSIZE - 1) {
//Add last element of block (upsweep) to intermediate array
intermediate[blockIdx.x] = s[BLOCKSIZE - 1];
s[BLOCKSIZE - 1] = 0;
}
//__syncthreads();
//Down Sweep
for (int d = iterations - 1; d >= 0; d--) {
__syncthreads();
int left_index = 1 << (d);
int right_index = 1 << (d + 1);
if (tid % right_index == 0) {
int temp = s[tid + left_index - 1];
s[tid + left_index - 1] = s[tid + right_index - 1];
s[tid + right_index - 1] += temp;
}
}
//Copy Result Back to Global Memory
B[threadIdx.x + (blockIdx.x * BLOCKSIZE)] = s[threadIdx.x];
}
__global__ void scan_array_old(int n, int* A) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
int iterations = ilog2ceil(n);
//Up Sweep
for (int d = 0; d < ilog2(n); d++) {
__syncthreads();
int stride = 1 << (d + 1);
int other_index = 1 << d;
if ((index) % stride == 0) {
A[index + stride - 1] += A[index + other_index - 1];
}
}
__syncthreads();
//Down Sweep
A[n - 1] = 0;
for (int d = iterations - 1; d >= 0; d--) {
__syncthreads();
int left_index = 1 << (d);
int right_index = 1 << (d + 1);
if (index % right_index == 0) {
int temp = A[index + left_index - 1];
A[index + left_index - 1] = A[index + right_index - 1];
A[index + right_index - 1] += temp;
}
}
}
__global__ void merge(int n, int* A, int* intermediate) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
if(blockIdx.x > 0)
A[index] = A[index] + intermediate[blockIdx.x];
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata, int blockSize) {
int* dev_odata;
int* dev_intermediate;
int* dev_intermediate_scan;
int padded_size = 1 << (ilog2ceil(n));
int numberOfBlocks = (padded_size + blockSize - 1) / blockSize;
cudaMalloc((void**)&padded_idata_shared, padded_size * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc padded_idata_shared failed!");
cudaMalloc((void**)&dev_odata, padded_size * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc padded_idata_shared failed!");
cudaMalloc((void**)&dev_intermediate, numberOfBlocks * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc padded_idata_shared failed!");
cudaMalloc((void**)&dev_intermediate_scan, numberOfBlocks * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc padded_idata_shared failed!");
cudaMemset(padded_idata_shared, 0, padded_size * sizeof(int));
cudaMemcpy(padded_idata_shared, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
bool caught = false;
try {
timer().startGpuTimer();
}
catch (const std::exception& e) {
caught = true;
}
dim3 fullBlocksPerGrid(numberOfBlocks);
scan_array << <fullBlocksPerGrid, blockSize >> > (padded_size, padded_idata_shared, dev_odata, dev_intermediate);
int* inter = (int*)malloc(numberOfBlocks * sizeof(int));
/*printf("Number: %d", numberOfBlocks);
printf("\nINTERMEDIATE\n");
cudaMemcpy(inter, dev_intermediate, sizeof(int) * numberOfBlocks, cudaMemcpyDeviceToHost);
printArray(numberOfBlocks, inter, true);*/
//Scan the intermediate array (without shared memory in GPU)
StreamCompaction::Efficient::scan_device(numberOfBlocks, dev_intermediate_scan, dev_intermediate, blockSize);
int* inter2 = (int*)malloc(numberOfBlocks * sizeof(int));
/*printf("INTERMEDIATE SCAN\n");
cudaMemcpy(inter2, dev_intermediate_scan, sizeof(int) * numberOfBlocks, cudaMemcpyDeviceToHost);
printArray(numberOfBlocks, inter2, true);*/
//Add the elements of dev_intermediate to dev_odata
merge << <fullBlocksPerGrid, blockSize >> > (padded_size, dev_odata, dev_intermediate_scan);
//scan_array_old << <fullBlocksPerGrid, blockSize >> > (padded_size, padded_idata_shared);
if (!caught) {
timer().endGpuTimer();
}
cudaMemcpy(odata, dev_odata, sizeof(int) * padded_size, cudaMemcpyDeviceToHost);
/*cudaFree(padded_idata_shared);
cudaFree(dev_odata);
cudaFree(dev_intermediate);
cudaFree(dev_intermediate_scan);*/
}
//void scan_device(int n, int *odata, const int *idata, int blockSize) {
// int padded_size = 1 << (ilog2ceil(n));
// cudaMalloc((void**)&padded_idata_shared, padded_size * sizeof(int));
// checkCUDAErrorWithLine("cudaMalloc padded_idata_shared failed!");
// cudaMemset(padded_idata_shared, 0, padded_size * sizeof(int));
// cudaMemcpy(padded_idata_shared, idata, sizeof(int) * n, cudaMemcpyDeviceToDevice);
// bool caught = false;
// try {
// timer().startGpuTimer();
// }
// catch (const std::exception& e) {
// caught = true;
// }
// int iterations = ilog2(padded_size);
// dim3 fullBlocksPerGrid((padded_size + blockSize - 1) / blockSize);
// bool optimized = false;
// //Up-Sweep
// if (optimized) {
// int number_of_threads = padded_size;
// for (int d = 0; d < iterations; d++) {
// number_of_threads /= 2;
// dim3 fullBlocksPerGridUpSweep((number_of_threads + blockSize - 1) / blockSize);
// upSweepOptimized << <fullBlocksPerGridUpSweep, blockSize >> > (padded_size, d, padded_idata_shared);
// }
// }
// else {
// for (int d = 0; d < iterations; d++) {
// dim3 fullBlocksPerGrid((padded_size + blockSize - 1) / blockSize);
// upSweep << <fullBlocksPerGrid, blockSize >> > (padded_size, d, padded_idata_shared);
// }
// }
// //Down-Sweep
// cudaMemset(padded_idata_shared + (padded_size - 1), 0, sizeof(int));
// if (optimized) {
// int number_of_threads = 1;
// for (int d = iterations - 1; d >= 0; d--) {
// dim3 fullBlocksPerGridDownSweep((number_of_threads + blockSize - 1) / blockSize);
// downSweepOptimized << <fullBlocksPerGridDownSweep, blockSize >> > (padded_size, d, padded_idata_shared);
// number_of_threads *= 2;
// }
// }
// else {
// for (int d = iterations - 1; d >= 0; d--) {
// dim3 fullBlocksPerGrid((padded_size + blockSize - 1) / blockSize);
// downSweep << <fullBlocksPerGrid, blockSize >> > (padded_size, d, padded_idata_shared);
// }
// }
// if (!caught) {
// timer().endGpuTimer();
// }
// cudaMemcpy(odata, padded_idata_shared, sizeof(int) * n, cudaMemcpyDeviceToDevice);
//}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata, int blockSize) {
cudaMalloc((void**)&dev_idata_shared, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_idata_shared failed!");
cudaMemcpy(dev_idata_shared, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
int* dev_bools;
cudaMalloc((void**)&dev_bools, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_bools failed!");
int* bools;
bools = (int*)malloc(n * sizeof(int));
int* indices;
indices = (int*)malloc(n * sizeof(int));
int *dev_indices;
cudaMalloc((void**)&dev_indices, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_bools failed!");
timer().startGpuTimer();
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
StreamCompaction::Common::kernMapToBoolean << <fullBlocksPerGrid, blockSize >> > (n, dev_bools, dev_idata_shared);
cudaMemcpy(bools, dev_bools, sizeof(int) * n, cudaMemcpyDeviceToHost);
scan(n, indices, bools, blockSize);
int output_length = indices[n - 1] + bools[n - 1];
cudaMemcpy(dev_indices, indices, sizeof(int) * n, cudaMemcpyHostToDevice);
int *dev_odata;
cudaMalloc((void**)&dev_odata, output_length * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_odata failed!");
StreamCompaction::Common::kernScatter << <fullBlocksPerGrid, blockSize >> > (n, dev_odata, dev_idata_shared, dev_bools, dev_indices);
cudaMemcpy(odata, dev_odata, sizeof(int) * output_length, cudaMemcpyDeviceToHost);
timer().endGpuTimer();
return output_length;
}
}
}
|
6f9ce54d997bfb3b601f272c22fd3e65ff766596.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <time.h>
#include <helper_cuda.h>
#include <helper_functions.h>
__global__
void reco3d(int n, double *r, double *s,double *phi,double *z)
{
// printf("check %d\n",n);
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<n){
// printf("%f %f %f %f\n",r[i],s[i],phi[i],z[i]);
double a=r[i]*cos(phi[i]);
}
}
__global__
void copy(int *n,int *dummy,int nslice)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<nslice){
dummy[i]=n[i];
printf("check %d %d\n",i,n[i]);
}
//blockIdx variabile di tipo dim3, contiene l'indice del blocco
//threadIdx variabile di tipo dim3, contiene l'indice del thread
}
int main(void ) {
time_t Start, Stop;
const int nslice=100;
int Nevents=1000000;
double xmin=-20;
double xmax=20;
double zmin=-25;
double zmax=25;
double ymin=-20;
double ymax=20;
double voxelsize=0.125;
int Nvoxelz=nslice;
double voxelsizez=(zmax-zmin)/Nvoxelz;
int Nvoxel=(xmax-xmin)/voxelsize;//512;
//double **hx=(double**)malloc(nslice*sizeof(double));
thrust::host_vector<double> hs[nslice];
thrust::host_vector<double> hr[nslice];
thrust::host_vector<double> hz[nslice];
thrust::host_vector<double> hphi[nslice];
int idxslice[nslice];
for(int j=0;j<nslice;j++){
idxslice[j]=0;
hr[j].reserve(5);
hs[j].reserve(5);
hz[j].reserve(5);
hphi[j].reserve(5);
}
const int Nmaxcoinc=10;
double ene1[Nmaxcoinc],ene2[Nmaxcoinc],deltat[Nmaxcoinc],x[Nmaxcoinc],y[Nmaxcoinc],z[Nmaxcoinc],sign[Nmaxcoinc],xhit1[Nmaxcoinc],yhit1[Nmaxcoinc],zhit1[Nmaxcoinc],xhit2[Nmaxcoinc],yhit2[Nmaxcoinc],zhit2[Nmaxcoinc],phi[Nmaxcoinc],theta[Nmaxcoinc];
int pad1[Nmaxcoinc],pad2[Nmaxcoinc],cell1[Nmaxcoinc],cell2[Nmaxcoinc],chip1[Nmaxcoinc],chip2[Nmaxcoinc],pix1[Nmaxcoinc],pix2[Nmaxcoinc],samecell1[Nmaxcoinc],sharedcharge1[Nmaxcoinc],samecell2[Nmaxcoinc],sharedcharge2[Nmaxcoinc],gammaID1[Nmaxcoinc],triggered,NEntries;
FILE *filein=std::fopen("./binary_10000_11000_0.bin","r");
std::fread(&NEntries,4,1,filein);
for(int i=0;i<Nevents;i++){
//std::cout<<NEntries<<std::endl;
if(i%1000==0)std::cout<<i<<std::endl;
std::fread(&(int&)triggered,4,1,filein);
//std::cout<<triggered<<std::endl;
for(int k=0;k<triggered;k++){
std::fread(&(double&)xhit1[k],8,1,filein);
std::fread(&(double&)xhit2[k],8,1,filein);
std::fread(&(double&)yhit1[k],8,1,filein);
std::fread(&(double&)yhit2[k],8,1,filein);
std::fread(&(double&)ene1[k],8,1,filein);
std::fread(&(double&)ene2[k],8,1,filein);
std::fread(&(int&)cell1[k],4,1,filein);
std::fread(&(int&)cell2[k],4,1,filein);
std::fread(&(int&)pad1[k],4,1,filein);
std::fread(&(int&)pad2[k],4,1,filein);
std::fread(&(int&)chip1[k],4,1,filein);
std::fread(&(int&)chip2[k],4,1,filein);
std::fread(&(int&)pix1[k],4,1,filein);
std::fread(&(int&)pix2[k],4,1,filein);
std::fread(&(double&)deltat[k],8,1,filein);
std::fread(&(double&)sign[k],8,1,filein); //for sinogram
std::fread(&(double&)x[k],8,1,filein);
std::fread(&(double&)y[k],8,1,filein);
std::fread(&(double&)z[k],8,1,filein);
std::fread(&(double&)phi[k],8,1,filein);
std::fread(&(double&)theta[k],8,1,filein);
double c=299.792458;//mm/ns
for(int j=0;j<nslice;j++){
if(z[k]>zmin+j*voxelsizez && z[k]<zmin+(j+1)*voxelsizez){
double s1=xhit1[k]*cos(phi[k])+yhit1[k]*sin(phi[k]);
double s2=xhit2[k]*cos(phi[k])+yhit2[k]*sin(phi[k]);
double sm=(s1+s2)/2.;
double ssino=sm-(c*deltat[k]*0.5)*sin(theta[k]);
hz[j].push_back(z[k]);
hs[j].push_back(ssino);
hr[j].push_back(-sqrt(x[k]*x[k]+y[k]*y[k])*sign[k]);
hphi[j].push_back(phi[k]);
idxslice[j]++;
}
}
}
}
//end of data sorting
//copy to device
thrust::device_vector<double> ds[nslice];
thrust::device_vector<double> dr[nslice];
thrust::device_vector<double> dz[nslice];
thrust::device_vector<double> dphi[nslice];
hipEvent_t start;
hipEvent_t stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
for(int j=0;j<nslice;j++){
ds[j].reserve(idxslice[j]);
dr[j].reserve(idxslice[j]);
dz[j].reserve(idxslice[j]);
dphi[j].reserve(idxslice[j]);
ds[j]=hs[j];
dr[j]=hr[j];
dz[j]=hz[j];
dphi[j]=hphi[j];
}
checkCudaErrors(hipEventRecord(start, NULL)); //start
int Nthreads=1000;
double *ps[nslice],*pr[nslice],*pz[nslice],*pphi[nslice];
for(int j=0;j<nslice;j++){
ps[j]=thrust::raw_pointer_cast(ds[j].data());
pr[j]=thrust::raw_pointer_cast(dr[j].data());
pz[j]=thrust::raw_pointer_cast(dz[j].data());
pphi[j]=thrust::raw_pointer_cast(dphi[j].data());
hipLaunchKernelGGL(( reco3d), dim3((idxslice[j]+(Nthreads-1))/Nthreads),dim3(Nthreads), 0, 0, idxslice[j],ps[j],pr[j],pphi[j],pz[j]);
}
checkCudaErrors(hipEventRecord(stop, NULL));//stop
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
time(&Start);
checkCudaErrors(hipEventRecord(start, NULL)); //start
for(int j=0;j<nslice;j++){
for(int k=0;k<idxslice[j];k++)double a=hr[j][k]*cos(hphi[j][k]);
}
checkCudaErrors(hipEventRecord(stop, NULL));//stop
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal1 = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal1, start, stop));
time(&Stop);
printf("Processing time cpu: %d (sec)\n", Stop - Start);
printf("Processing time cpu: %f (msec)\n", msecTotal1);
printf("Processing time gpu: %f (msec)\n", msecTotal);
for(int j=0;j<nslice;j++){
hr[j].clear();
hs[j].clear();
hz[j].clear();
hphi[j].clear();
dr[j].clear();
ds[j].clear();
dz[j].clear();
dphi[j].clear();
}
}
| 6f9ce54d997bfb3b601f272c22fd3e65ff766596.cu | #include <stdio.h>
#include <iostream>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <time.h>
#include <helper_cuda.h>
#include <helper_functions.h>
__global__
void reco3d(int n, double *r, double *s,double *phi,double *z)
{
// printf("check %d\n",n);
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<n){
// printf("%f %f %f %f\n",r[i],s[i],phi[i],z[i]);
double a=r[i]*cos(phi[i]);
}
}
__global__
void copy(int *n,int *dummy,int nslice)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<nslice){
dummy[i]=n[i];
printf("check %d %d\n",i,n[i]);
}
//blockIdx variabile di tipo dim3, contiene l'indice del blocco
//threadIdx variabile di tipo dim3, contiene l'indice del thread
}
int main(void ) {
time_t Start, Stop;
const int nslice=100;
int Nevents=1000000;
double xmin=-20;
double xmax=20;
double zmin=-25;
double zmax=25;
double ymin=-20;
double ymax=20;
double voxelsize=0.125;
int Nvoxelz=nslice;
double voxelsizez=(zmax-zmin)/Nvoxelz;
int Nvoxel=(xmax-xmin)/voxelsize;//512;
//double **hx=(double**)malloc(nslice*sizeof(double));
thrust::host_vector<double> hs[nslice];
thrust::host_vector<double> hr[nslice];
thrust::host_vector<double> hz[nslice];
thrust::host_vector<double> hphi[nslice];
int idxslice[nslice];
for(int j=0;j<nslice;j++){
idxslice[j]=0;
hr[j].reserve(5);
hs[j].reserve(5);
hz[j].reserve(5);
hphi[j].reserve(5);
}
const int Nmaxcoinc=10;
double ene1[Nmaxcoinc],ene2[Nmaxcoinc],deltat[Nmaxcoinc],x[Nmaxcoinc],y[Nmaxcoinc],z[Nmaxcoinc],sign[Nmaxcoinc],xhit1[Nmaxcoinc],yhit1[Nmaxcoinc],zhit1[Nmaxcoinc],xhit2[Nmaxcoinc],yhit2[Nmaxcoinc],zhit2[Nmaxcoinc],phi[Nmaxcoinc],theta[Nmaxcoinc];
int pad1[Nmaxcoinc],pad2[Nmaxcoinc],cell1[Nmaxcoinc],cell2[Nmaxcoinc],chip1[Nmaxcoinc],chip2[Nmaxcoinc],pix1[Nmaxcoinc],pix2[Nmaxcoinc],samecell1[Nmaxcoinc],sharedcharge1[Nmaxcoinc],samecell2[Nmaxcoinc],sharedcharge2[Nmaxcoinc],gammaID1[Nmaxcoinc],triggered,NEntries;
FILE *filein=std::fopen("./binary_10000_11000_0.bin","r");
std::fread(&NEntries,4,1,filein);
for(int i=0;i<Nevents;i++){
//std::cout<<NEntries<<std::endl;
if(i%1000==0)std::cout<<i<<std::endl;
std::fread(&(int&)triggered,4,1,filein);
//std::cout<<triggered<<std::endl;
for(int k=0;k<triggered;k++){
std::fread(&(double&)xhit1[k],8,1,filein);
std::fread(&(double&)xhit2[k],8,1,filein);
std::fread(&(double&)yhit1[k],8,1,filein);
std::fread(&(double&)yhit2[k],8,1,filein);
std::fread(&(double&)ene1[k],8,1,filein);
std::fread(&(double&)ene2[k],8,1,filein);
std::fread(&(int&)cell1[k],4,1,filein);
std::fread(&(int&)cell2[k],4,1,filein);
std::fread(&(int&)pad1[k],4,1,filein);
std::fread(&(int&)pad2[k],4,1,filein);
std::fread(&(int&)chip1[k],4,1,filein);
std::fread(&(int&)chip2[k],4,1,filein);
std::fread(&(int&)pix1[k],4,1,filein);
std::fread(&(int&)pix2[k],4,1,filein);
std::fread(&(double&)deltat[k],8,1,filein);
std::fread(&(double&)sign[k],8,1,filein); //for sinogram
std::fread(&(double&)x[k],8,1,filein);
std::fread(&(double&)y[k],8,1,filein);
std::fread(&(double&)z[k],8,1,filein);
std::fread(&(double&)phi[k],8,1,filein);
std::fread(&(double&)theta[k],8,1,filein);
double c=299.792458;//mm/ns
for(int j=0;j<nslice;j++){
if(z[k]>zmin+j*voxelsizez && z[k]<zmin+(j+1)*voxelsizez){
double s1=xhit1[k]*cos(phi[k])+yhit1[k]*sin(phi[k]);
double s2=xhit2[k]*cos(phi[k])+yhit2[k]*sin(phi[k]);
double sm=(s1+s2)/2.;
double ssino=sm-(c*deltat[k]*0.5)*sin(theta[k]);
hz[j].push_back(z[k]);
hs[j].push_back(ssino);
hr[j].push_back(-sqrt(x[k]*x[k]+y[k]*y[k])*sign[k]);
hphi[j].push_back(phi[k]);
idxslice[j]++;
}
}
}
}
//end of data sorting
//copy to device
thrust::device_vector<double> ds[nslice];
thrust::device_vector<double> dr[nslice];
thrust::device_vector<double> dz[nslice];
thrust::device_vector<double> dphi[nslice];
cudaEvent_t start;
cudaEvent_t stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
for(int j=0;j<nslice;j++){
ds[j].reserve(idxslice[j]);
dr[j].reserve(idxslice[j]);
dz[j].reserve(idxslice[j]);
dphi[j].reserve(idxslice[j]);
ds[j]=hs[j];
dr[j]=hr[j];
dz[j]=hz[j];
dphi[j]=hphi[j];
}
checkCudaErrors(cudaEventRecord(start, NULL)); //start
int Nthreads=1000;
double *ps[nslice],*pr[nslice],*pz[nslice],*pphi[nslice];
for(int j=0;j<nslice;j++){
ps[j]=thrust::raw_pointer_cast(ds[j].data());
pr[j]=thrust::raw_pointer_cast(dr[j].data());
pz[j]=thrust::raw_pointer_cast(dz[j].data());
pphi[j]=thrust::raw_pointer_cast(dphi[j].data());
reco3d<<<(idxslice[j]+(Nthreads-1))/Nthreads,Nthreads>>>(idxslice[j],ps[j],pr[j],pphi[j],pz[j]);
}
checkCudaErrors(cudaEventRecord(stop, NULL));//stop
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
time(&Start);
checkCudaErrors(cudaEventRecord(start, NULL)); //start
for(int j=0;j<nslice;j++){
for(int k=0;k<idxslice[j];k++)double a=hr[j][k]*cos(hphi[j][k]);
}
checkCudaErrors(cudaEventRecord(stop, NULL));//stop
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal1 = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal1, start, stop));
time(&Stop);
printf("Processing time cpu: %d (sec)\n", Stop - Start);
printf("Processing time cpu: %f (msec)\n", msecTotal1);
printf("Processing time gpu: %f (msec)\n", msecTotal);
for(int j=0;j<nslice;j++){
hr[j].clear();
hs[j].clear();
hz[j].clear();
hphi[j].clear();
dr[j].clear();
ds[j].clear();
dz[j].clear();
dphi[j].clear();
}
}
|
246a3059dcf64d27bbf843e023d88bc34c507c47.hip | // !!! This is a file automatically generated by hipify!!!
/*------------------------------*/
/* ALL DEVICE MEMORY OPERATIONS */
/* IMPLEMENTATION */
/* (HIGH LEVEL) */
/*------------------------------*/
#include <iostream>
/*****************
* NAMESPACES *
* __________ *
*****************/
using namespace std;
using HostSpace = HostMemory<datatype>;
/*-------------*/
/* Constructor */
/*-------------*/
template < typename T>
DeviceMemory<T>::DeviceMemory(HostSpace& hostMemory)
: POINTERarray(DeviceMemory<T>::MAX_ELEMENTS + 1, 0), THRUSTarray(DeviceMemory<T>::MAX_ELEMENTS + 1) {
// Set up memory metrics first!
// Total used memory
this->consumed =
(Globals::rowsX * Globals::colsX) +
(Globals::rowsD * Globals::colsD) +
(Globals::colsD) +
(Globals::colsX) +
(Globals::rowsD * Globals::colsD * 1024) +
(Globals::colsD * Globals::colsD) +
(Globals::colsD * Globals::colsX) +
(Globals::colsD * Globals::colsX) +
(Tdata * Globals::colsX) +
(Globals::colsD * Globals::colsX) +
(Globals::colsD * Globals::rowsX) +
(Globals::rowsD * Globals::colsD) +
(Globals::rowsX * Globals::colsX) +
(Globals::colsX) +
(Globals::colsD) +
(1) +
(Globals::colsX);
this->consumed = this->consumed * sizeof(datatype);
// Total available global memory
size_t free_byte;
size_t total_byte;
hipMemGetInfo(&free_byte, &total_byte);
this->free = (double)free_byte;
this->total = (double)total_byte;
this->used = this->total - this->free;
// Now the arrays!
// X_ARRAY (rowsX * colsX)
this->THRUSTarray[X_ARRAY] = thrust::device_vector<T>(
hostMemory.get(HostSpace::X_ARRAY),
hostMemory.get(HostSpace::X_ARRAY) + Globals::rowsX*Globals::colsX);
this->POINTERarray[X_ARRAY] = thrust::raw_pointer_cast(&(this->THRUSTarray[X_ARRAY])[0]);
/*_______________*/
/* More arrays */
/*_______________*/
// TEMP_ROWSD_BY_COLSD
this->THRUSTarray[TEMP_ROWSD_BY_COLSD] = thrust::device_vector<T>(Globals::rowsD*Globals::colsD);
this->POINTERarray[TEMP_ROWSD_BY_COLSD] = thrust::raw_pointer_cast(
&(this->THRUSTarray[TEMP_ROWSD_BY_COLSD])[0]);
// TEMP_1_BY_COLSD
this->THRUSTarray[TEMP_1_BY_COLSD] = thrust::device_vector<T>(Globals::colsD);
this->POINTERarray[TEMP_1_BY_COLSD] = thrust::raw_pointer_cast(
&(this->THRUSTarray[TEMP_1_BY_COLSD])[0]);
// TEMP_SINGLE_VALUE ( 1 !by colsX threads )
this->THRUSTarray[TEMP_SINGLE_VALUE] = thrust::device_vector<T>(Globals::colsX);
this->POINTERarray[TEMP_SINGLE_VALUE] = thrust::raw_pointer_cast(
&(this->THRUSTarray[TEMP_SINGLE_VALUE])[0]);
// REDUCTION_BUFFER ( rowsD * colsD * 1024 )
// The maximum buffer needed! ( in collincomb )
this->THRUSTarray[REDUCTION_BUFFER] = thrust::device_vector<T>(Globals::colsD*Globals::rowsD*1024);
this->POINTERarray[REDUCTION_BUFFER] = thrust::raw_pointer_cast(
&(this->THRUSTarray[REDUCTION_BUFFER])[0]);
// G ( colsD * colsD)
this->THRUSTarray[G] = thrust::device_vector<T>(Globals::colsD*Globals::colsD);
this->POINTERarray[G] = thrust::raw_pointer_cast(
&(this->THRUSTarray[G])[0]);
// DtX ( colsD * colsX)
this->THRUSTarray[DtX] = thrust::device_vector<T>(Globals::colsD*Globals::colsX);
this->POINTERarray[DtX] = thrust::raw_pointer_cast(
&(this->THRUSTarray[DtX])[0]);
hipMemset(this->POINTERarray[DtX], 0, Globals::colsD*Globals::colsX);
// SELECTED_ATOMS ( colsD !by colsX threads )
this->THRUSTarray[SELECTED_ATOMS] = thrust::device_vector<T>(Globals::colsD*Globals::colsX);
this->POINTERarray[SELECTED_ATOMS] = thrust::raw_pointer_cast(
&(this->THRUSTarray[SELECTED_ATOMS])[0]);
hipMemset(this->POINTERarray[SELECTED_ATOMS], 0, Globals::colsD*Globals::colsX);
// c ( Tdata !by colsX threads )
this->THRUSTarray[c] = thrust::device_vector<T>(Tdata*Globals::colsX);
this->POINTERarray[c] = thrust::raw_pointer_cast(
&(this->THRUSTarray[c])[0]);
hipMemset(this->POINTERarray[c], 0, Tdata*Globals::colsX);
// alpha ( colsD !by colsX threads )
this->THRUSTarray[alpha] = thrust::device_vector<T>(Globals::colsD*Globals::colsX);
this->POINTERarray[alpha] = thrust::raw_pointer_cast(
&(this->THRUSTarray[alpha])[0]);
// TEMP_COLSD_BY_ROWSX ( colsD * rowsX )
this->THRUSTarray[TEMP_COLSD_BY_ROWSX] = thrust::device_vector<T>(Globals::colsD*Globals::rowsX);
this->POINTERarray[TEMP_COLSD_BY_ROWSX] = thrust::raw_pointer_cast(
&(this->THRUSTarray[TEMP_COLSD_BY_ROWSX])[0]);
// TEMP_ROWSX_BY_COLSX ( rowsX * colsX )
this->THRUSTarray[TEMP_ROWSX_BY_COLSX] = thrust::device_vector<T>(Globals::rowsX*Globals::colsX);
this->POINTERarray[TEMP_ROWSX_BY_COLSX] = thrust::raw_pointer_cast(
&(this->THRUSTarray[TEMP_ROWSX_BY_COLSX])[0]);
// ERR ( 1 * colsX )
this->THRUSTarray[ERR] = thrust::device_vector<T>(Globals::colsX);
this->POINTERarray[ERR] = thrust::raw_pointer_cast(
&(this->THRUSTarray[ERR])[0]);
// UNUSED_SIGS ( 1 * colsX )
this->THRUSTarray[UNUSED_SIGS] = thrust::device_vector<T>(Globals::colsX);
this->POINTERarray[UNUSED_SIGS] = thrust::raw_pointer_cast(
&(this->THRUSTarray[UNUSED_SIGS])[0]);
// REPLACED_ATOMS ( 1 * colsD)
this->THRUSTarray[REPLACED_ATOMS] = thrust::device_vector<T>(Globals::colsD);
this->POINTERarray[REPLACED_ATOMS] = thrust::raw_pointer_cast(
&(this->THRUSTarray[REPLACED_ATOMS])[0]);
// UNUSED_SIGS_COUNTER ( 1 )
this->THRUSTarray[UNUSED_SIGS_COUNTER] = thrust::device_vector<T>(1);
this->POINTERarray[UNUSED_SIGS_COUNTER] = thrust::raw_pointer_cast(
&(this->THRUSTarray[UNUSED_SIGS_COUNTER])[0]);
// UNUSED_SIGS_BITMAP ( 1 * colsX )
this->THRUSTarray[UNUSED_SIGS_BITMAP] = thrust::device_vector<T>(Globals::colsX);
this->POINTERarray[UNUSED_SIGS_BITMAP] = thrust::raw_pointer_cast(
&(this->THRUSTarray[UNUSED_SIGS_BITMAP])[0]);
/*_______________*/
/* End of arrays */
/*_______________*/
// D_ARRAY
this->THRUSTarray[D_ARRAY] = thrust::device_vector<T>(
hostMemory.get(HostSpace::D_ARRAY),
hostMemory.get(HostSpace::D_ARRAY) + Globals::rowsD*Globals::colsD);
this->POINTERarray[D_ARRAY] = thrust::raw_pointer_cast(&(this->THRUSTarray[D_ARRAY])[0]);
}
/*------------*/
/* Destructor */
/*------------*/
template < typename T>
DeviceMemory<T>::~DeviceMemory() {
cout << "# Device Memory Cleared!" << endl;
}
/*----------------*/
/* Public Methods */
/*----------------*/
// Return an array pointer
template < typename T> inline
T* DeviceMemory<T>::get(unsigned int index) {
return this->POINTERarray.at(index);
}
// Return a thrust device array pointer
template < typename T> inline
thrust::device_vector<T>&
DeviceMemory<T>::getThrust(unsigned int index) {
return this->THRUSTarray.at(index);
}
// Return total available device memory
template < typename T> inline
double DeviceMemory<T>::getTotal() {
return this->total;
}
// Return total memory used by the OS
template < typename T> inline
double DeviceMemory<T>::getUsed() {
return this->used;
}
// Return free global memory
template < typename T> inline
double DeviceMemory<T>::getFree() {
return this->free;
}
// Return total memory used by our program
template < typename T> inline
double DeviceMemory<T>::getConsumed() {
return this->consumed;
} | 246a3059dcf64d27bbf843e023d88bc34c507c47.cu | /*------------------------------*/
/* ALL DEVICE MEMORY OPERATIONS */
/* IMPLEMENTATION */
/* (HIGH LEVEL) */
/*------------------------------*/
#include <iostream>
/*****************
* NAMESPACES *
* __________ *
*****************/
using namespace std;
using HostSpace = HostMemory<datatype>;
/*-------------*/
/* Constructor */
/*-------------*/
template < typename T>
DeviceMemory<T>::DeviceMemory(HostSpace& hostMemory)
: POINTERarray(DeviceMemory<T>::MAX_ELEMENTS + 1, 0), THRUSTarray(DeviceMemory<T>::MAX_ELEMENTS + 1) {
// Set up memory metrics first!
// Total used memory
this->consumed =
(Globals::rowsX * Globals::colsX) +
(Globals::rowsD * Globals::colsD) +
(Globals::colsD) +
(Globals::colsX) +
(Globals::rowsD * Globals::colsD * 1024) +
(Globals::colsD * Globals::colsD) +
(Globals::colsD * Globals::colsX) +
(Globals::colsD * Globals::colsX) +
(Tdata * Globals::colsX) +
(Globals::colsD * Globals::colsX) +
(Globals::colsD * Globals::rowsX) +
(Globals::rowsD * Globals::colsD) +
(Globals::rowsX * Globals::colsX) +
(Globals::colsX) +
(Globals::colsD) +
(1) +
(Globals::colsX);
this->consumed = this->consumed * sizeof(datatype);
// Total available global memory
size_t free_byte;
size_t total_byte;
cudaMemGetInfo(&free_byte, &total_byte);
this->free = (double)free_byte;
this->total = (double)total_byte;
this->used = this->total - this->free;
// Now the arrays!
// X_ARRAY (rowsX * colsX)
this->THRUSTarray[X_ARRAY] = thrust::device_vector<T>(
hostMemory.get(HostSpace::X_ARRAY),
hostMemory.get(HostSpace::X_ARRAY) + Globals::rowsX*Globals::colsX);
this->POINTERarray[X_ARRAY] = thrust::raw_pointer_cast(&(this->THRUSTarray[X_ARRAY])[0]);
/*_______________*/
/* More arrays */
/*_______________*/
// TEMP_ROWSD_BY_COLSD
this->THRUSTarray[TEMP_ROWSD_BY_COLSD] = thrust::device_vector<T>(Globals::rowsD*Globals::colsD);
this->POINTERarray[TEMP_ROWSD_BY_COLSD] = thrust::raw_pointer_cast(
&(this->THRUSTarray[TEMP_ROWSD_BY_COLSD])[0]);
// TEMP_1_BY_COLSD
this->THRUSTarray[TEMP_1_BY_COLSD] = thrust::device_vector<T>(Globals::colsD);
this->POINTERarray[TEMP_1_BY_COLSD] = thrust::raw_pointer_cast(
&(this->THRUSTarray[TEMP_1_BY_COLSD])[0]);
// TEMP_SINGLE_VALUE ( 1 !by colsX threads )
this->THRUSTarray[TEMP_SINGLE_VALUE] = thrust::device_vector<T>(Globals::colsX);
this->POINTERarray[TEMP_SINGLE_VALUE] = thrust::raw_pointer_cast(
&(this->THRUSTarray[TEMP_SINGLE_VALUE])[0]);
// REDUCTION_BUFFER ( rowsD * colsD * 1024 )
// The maximum buffer needed! ( in collincomb )
this->THRUSTarray[REDUCTION_BUFFER] = thrust::device_vector<T>(Globals::colsD*Globals::rowsD*1024);
this->POINTERarray[REDUCTION_BUFFER] = thrust::raw_pointer_cast(
&(this->THRUSTarray[REDUCTION_BUFFER])[0]);
// G ( colsD * colsD)
this->THRUSTarray[G] = thrust::device_vector<T>(Globals::colsD*Globals::colsD);
this->POINTERarray[G] = thrust::raw_pointer_cast(
&(this->THRUSTarray[G])[0]);
// DtX ( colsD * colsX)
this->THRUSTarray[DtX] = thrust::device_vector<T>(Globals::colsD*Globals::colsX);
this->POINTERarray[DtX] = thrust::raw_pointer_cast(
&(this->THRUSTarray[DtX])[0]);
cudaMemset(this->POINTERarray[DtX], 0, Globals::colsD*Globals::colsX);
// SELECTED_ATOMS ( colsD !by colsX threads )
this->THRUSTarray[SELECTED_ATOMS] = thrust::device_vector<T>(Globals::colsD*Globals::colsX);
this->POINTERarray[SELECTED_ATOMS] = thrust::raw_pointer_cast(
&(this->THRUSTarray[SELECTED_ATOMS])[0]);
cudaMemset(this->POINTERarray[SELECTED_ATOMS], 0, Globals::colsD*Globals::colsX);
// c ( Tdata !by colsX threads )
this->THRUSTarray[c] = thrust::device_vector<T>(Tdata*Globals::colsX);
this->POINTERarray[c] = thrust::raw_pointer_cast(
&(this->THRUSTarray[c])[0]);
cudaMemset(this->POINTERarray[c], 0, Tdata*Globals::colsX);
// alpha ( colsD !by colsX threads )
this->THRUSTarray[alpha] = thrust::device_vector<T>(Globals::colsD*Globals::colsX);
this->POINTERarray[alpha] = thrust::raw_pointer_cast(
&(this->THRUSTarray[alpha])[0]);
// TEMP_COLSD_BY_ROWSX ( colsD * rowsX )
this->THRUSTarray[TEMP_COLSD_BY_ROWSX] = thrust::device_vector<T>(Globals::colsD*Globals::rowsX);
this->POINTERarray[TEMP_COLSD_BY_ROWSX] = thrust::raw_pointer_cast(
&(this->THRUSTarray[TEMP_COLSD_BY_ROWSX])[0]);
// TEMP_ROWSX_BY_COLSX ( rowsX * colsX )
this->THRUSTarray[TEMP_ROWSX_BY_COLSX] = thrust::device_vector<T>(Globals::rowsX*Globals::colsX);
this->POINTERarray[TEMP_ROWSX_BY_COLSX] = thrust::raw_pointer_cast(
&(this->THRUSTarray[TEMP_ROWSX_BY_COLSX])[0]);
// ERR ( 1 * colsX )
this->THRUSTarray[ERR] = thrust::device_vector<T>(Globals::colsX);
this->POINTERarray[ERR] = thrust::raw_pointer_cast(
&(this->THRUSTarray[ERR])[0]);
// UNUSED_SIGS ( 1 * colsX )
this->THRUSTarray[UNUSED_SIGS] = thrust::device_vector<T>(Globals::colsX);
this->POINTERarray[UNUSED_SIGS] = thrust::raw_pointer_cast(
&(this->THRUSTarray[UNUSED_SIGS])[0]);
// REPLACED_ATOMS ( 1 * colsD)
this->THRUSTarray[REPLACED_ATOMS] = thrust::device_vector<T>(Globals::colsD);
this->POINTERarray[REPLACED_ATOMS] = thrust::raw_pointer_cast(
&(this->THRUSTarray[REPLACED_ATOMS])[0]);
// UNUSED_SIGS_COUNTER ( 1 )
this->THRUSTarray[UNUSED_SIGS_COUNTER] = thrust::device_vector<T>(1);
this->POINTERarray[UNUSED_SIGS_COUNTER] = thrust::raw_pointer_cast(
&(this->THRUSTarray[UNUSED_SIGS_COUNTER])[0]);
// UNUSED_SIGS_BITMAP ( 1 * colsX )
this->THRUSTarray[UNUSED_SIGS_BITMAP] = thrust::device_vector<T>(Globals::colsX);
this->POINTERarray[UNUSED_SIGS_BITMAP] = thrust::raw_pointer_cast(
&(this->THRUSTarray[UNUSED_SIGS_BITMAP])[0]);
/*_______________*/
/* End of arrays */
/*_______________*/
// D_ARRAY
this->THRUSTarray[D_ARRAY] = thrust::device_vector<T>(
hostMemory.get(HostSpace::D_ARRAY),
hostMemory.get(HostSpace::D_ARRAY) + Globals::rowsD*Globals::colsD);
this->POINTERarray[D_ARRAY] = thrust::raw_pointer_cast(&(this->THRUSTarray[D_ARRAY])[0]);
}
/*------------*/
/* Destructor */
/*------------*/
template < typename T>
DeviceMemory<T>::~DeviceMemory() {
cout << "# Device Memory Cleared!" << endl;
}
/*----------------*/
/* Public Methods */
/*----------------*/
// Return an array pointer
template < typename T> inline
T* DeviceMemory<T>::get(unsigned int index) {
return this->POINTERarray.at(index);
}
// Return a thrust device array pointer
template < typename T> inline
thrust::device_vector<T>&
DeviceMemory<T>::getThrust(unsigned int index) {
return this->THRUSTarray.at(index);
}
// Return total available device memory
template < typename T> inline
double DeviceMemory<T>::getTotal() {
return this->total;
}
// Return total memory used by the OS
template < typename T> inline
double DeviceMemory<T>::getUsed() {
return this->used;
}
// Return free global memory
template < typename T> inline
double DeviceMemory<T>::getFree() {
return this->free;
}
// Return total memory used by our program
template < typename T> inline
double DeviceMemory<T>::getConsumed() {
return this->consumed;
} |
b8f686c90dcc44a0314dcf323e19cc32c8938c9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include "hip/hip_runtime.h"
#include "stdio.h"
__global__ void gpuRecursiveReduce2(int *g_idata, int *g_odata, int iStride, int const iDim){
int *idata = g_idata + blockIdx.x*iDim;
// stop condition
if(iStride == 1 && threadIdx.x == 0){
g_odata[blockIdx.x] = idata[0]+idata[1];
return;
}
// in place reduction
idata[threadIdx.x] += idata[threadIdx.x + iStride];
// nested invocation to generate child grids
if(threadIdx.x == 0 && blockIdx.x == 0){
hipLaunchKernelGGL(( gpuRecursiveReduce2), dim3(gridDim.x), dim3(iStride/2), 0, 0, g_idata, g_odata, iStride/2, iDim);
}
}
// Recursive Implementation of Interleaved Pair Approach
int cpuRecursiveReduce(int *data, int const size)
{
// stop condition
if (size == 1) return data[0];
// renew the stride
int const stride = size / 2;
// in-place reduction
for (int i = 0; i < stride; i++)
{
data[i] += data[i + stride];
}
// call recursively
return cpuRecursiveReduce(data, stride);
}
// Neighbored Pair Implementation with divergence
__global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void gpuRecursiveReduce (int *g_idata, int *g_odata,
unsigned int isize)
{
// set thread ID
unsigned int tid = threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
int *odata = &g_odata[blockIdx.x];
// stop condition
if (isize == 2 && tid == 0)
{
g_odata[blockIdx.x] = idata[0] + idata[1];
return;
}
// nested invocation
int istride = isize >> 1;
if(istride > 1 && tid < istride)
{
// in place reduction
idata[tid] += idata[tid + istride];
}
// sync at block level
__syncthreads();
// nested invocation to generate child grids
if(tid == 0)
{
hipLaunchKernelGGL(( gpuRecursiveReduce), dim3(1), dim3(istride), 0, 0, idata, odata, istride);
// sync all child grids launched in this block
hipDeviceSynchronize();
}
// sync at block level again
__syncthreads();
}
__global__ void gpuRecursiveReduceNosync (int *g_idata, int *g_odata,
unsigned int isize)
{
// set thread ID
unsigned int tid = threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
int *odata = &g_odata[blockIdx.x];
// stop condition
if (isize == 2 && tid == 0)
{
g_odata[blockIdx.x] = idata[0] + idata[1];
return;
}
// nested invoke
int istride = isize >> 1;
if(istride > 1 && tid < istride)
{
idata[tid] += idata[tid + istride];
if(tid == 0)
{
hipLaunchKernelGGL(( gpuRecursiveReduceNosync), dim3(1), dim3(istride), 0, 0, idata, odata, istride);
}
}
}
// main from here
int main(int argc, char **argv)
{
// set up device
int dev = 0, gpu_sum;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
bool bResult = false;
// set up execution configuration
int nblock = 2048;
int nthread = 512; // initial block size
if(argc > 1)
{
nblock = atoi(argv[1]); // block size from command line argument
}
if(argc > 2)
{
nthread = atoi(argv[2]); // block size from command line argument
}
int size = nblock * nthread; // total number of elements to reduceNeighbored
dim3 block (nthread, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("array %d grid %d block %d\n", size, grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
// initialize the array
for (int i = 0; i < size; i++)
{
h_idata[i] = (int)( rand() & 0xFF );
h_idata[i] = 1;
}
memcpy (tmp, h_idata, bytes);
// allocate device memory
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(hipMalloc((void **) &d_idata, bytes));
CHECK(hipMalloc((void **) &d_odata, grid.x * sizeof(int)));
double iStart, iElaps;
// cpu recursive reduction
iStart = cpuSecond();
int cpu_sum = cpuRecursiveReduce (tmp, size);
iElaps = cpuSecond() - iStart;
printf("cpu reduce\t\telapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum);
// gpu reduceNeighbored
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
iStart = cpuSecond();
hipLaunchKernelGGL(( reduceNeighbored), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Neighbored\t\telapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
// gpu nested reduce kernel
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
iStart = cpuSecond();
hipLaunchKernelGGL(( gpuRecursiveReduce), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, block.x);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu nested\t\telapsed %f sec gpu_sum: %d <<<grid %d block %d>>>\n",
iElaps, gpu_sum, grid.x, block.x);
// gpu nested reduce kernel without synchronization
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
iStart = cpuSecond();
hipLaunchKernelGGL(( gpuRecursiveReduceNosync), dim3(grid), dim3(block.x), 0, 0, d_idata, d_odata, block.x);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu nestedNosyn\t\telapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
iStart = cpuSecond();
hipLaunchKernelGGL(( gpuRecursiveReduce2), dim3(grid), dim3(block.x / 2), 0, 0, d_idata, d_odata, block.x / 2,
block.x);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu nested2\t\telapsed %f sec gpu_sum: %d <<<grid %d block %d>>>\n",
iElaps, gpu_sum, grid.x, block.x);
// free host memory
free(h_idata);
free(h_odata);
// free device memory
CHECK(hipFree(d_idata));
CHECK(hipFree(d_odata));
// reset device
CHECK(hipDeviceReset());
// check the results
bResult = (gpu_sum == cpu_sum);
if(!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
| b8f686c90dcc44a0314dcf323e19cc32c8938c9f.cu | #include "../common/common.h"
#include "cuda_runtime.h"
#include "stdio.h"
__global__ void gpuRecursiveReduce2(int *g_idata, int *g_odata, int iStride, int const iDim){
int *idata = g_idata + blockIdx.x*iDim;
// stop condition
if(iStride == 1 && threadIdx.x == 0){
g_odata[blockIdx.x] = idata[0]+idata[1];
return;
}
// in place reduction
idata[threadIdx.x] += idata[threadIdx.x + iStride];
// nested invocation to generate child grids
if(threadIdx.x == 0 && blockIdx.x == 0){
gpuRecursiveReduce2<<<gridDim.x, iStride/2>>>(g_idata, g_odata, iStride/2, iDim);
}
}
// Recursive Implementation of Interleaved Pair Approach
int cpuRecursiveReduce(int *data, int const size)
{
// stop condition
if (size == 1) return data[0];
// renew the stride
int const stride = size / 2;
// in-place reduction
for (int i = 0; i < stride; i++)
{
data[i] += data[i + stride];
}
// call recursively
return cpuRecursiveReduce(data, stride);
}
// Neighbored Pair Implementation with divergence
__global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void gpuRecursiveReduce (int *g_idata, int *g_odata,
unsigned int isize)
{
// set thread ID
unsigned int tid = threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
int *odata = &g_odata[blockIdx.x];
// stop condition
if (isize == 2 && tid == 0)
{
g_odata[blockIdx.x] = idata[0] + idata[1];
return;
}
// nested invocation
int istride = isize >> 1;
if(istride > 1 && tid < istride)
{
// in place reduction
idata[tid] += idata[tid + istride];
}
// sync at block level
__syncthreads();
// nested invocation to generate child grids
if(tid == 0)
{
gpuRecursiveReduce<<<1, istride>>>(idata, odata, istride);
// sync all child grids launched in this block
cudaDeviceSynchronize();
}
// sync at block level again
__syncthreads();
}
__global__ void gpuRecursiveReduceNosync (int *g_idata, int *g_odata,
unsigned int isize)
{
// set thread ID
unsigned int tid = threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
int *odata = &g_odata[blockIdx.x];
// stop condition
if (isize == 2 && tid == 0)
{
g_odata[blockIdx.x] = idata[0] + idata[1];
return;
}
// nested invoke
int istride = isize >> 1;
if(istride > 1 && tid < istride)
{
idata[tid] += idata[tid + istride];
if(tid == 0)
{
gpuRecursiveReduceNosync<<<1, istride>>>(idata, odata, istride);
}
}
}
// main from here
int main(int argc, char **argv)
{
// set up device
int dev = 0, gpu_sum;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
bool bResult = false;
// set up execution configuration
int nblock = 2048;
int nthread = 512; // initial block size
if(argc > 1)
{
nblock = atoi(argv[1]); // block size from command line argument
}
if(argc > 2)
{
nthread = atoi(argv[2]); // block size from command line argument
}
int size = nblock * nthread; // total number of elements to reduceNeighbored
dim3 block (nthread, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("array %d grid %d block %d\n", size, grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
// initialize the array
for (int i = 0; i < size; i++)
{
h_idata[i] = (int)( rand() & 0xFF );
h_idata[i] = 1;
}
memcpy (tmp, h_idata, bytes);
// allocate device memory
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(cudaMalloc((void **) &d_idata, bytes));
CHECK(cudaMalloc((void **) &d_odata, grid.x * sizeof(int)));
double iStart, iElaps;
// cpu recursive reduction
iStart = cpuSecond();
int cpu_sum = cpuRecursiveReduce (tmp, size);
iElaps = cpuSecond() - iStart;
printf("cpu reduce\t\telapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum);
// gpu reduceNeighbored
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
iStart = cpuSecond();
reduceNeighbored<<<grid, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Neighbored\t\telapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
// gpu nested reduce kernel
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
iStart = cpuSecond();
gpuRecursiveReduce<<<grid, block>>>(d_idata, d_odata, block.x);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu nested\t\telapsed %f sec gpu_sum: %d <<<grid %d block %d>>>\n",
iElaps, gpu_sum, grid.x, block.x);
// gpu nested reduce kernel without synchronization
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
iStart = cpuSecond();
gpuRecursiveReduceNosync<<<grid, block.x>>>(d_idata, d_odata, block.x);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu nestedNosyn\t\telapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
iStart = cpuSecond();
gpuRecursiveReduce2<<<grid, block.x / 2>>>(d_idata, d_odata, block.x / 2,
block.x);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu nested2\t\telapsed %f sec gpu_sum: %d <<<grid %d block %d>>>\n",
iElaps, gpu_sum, grid.x, block.x);
// free host memory
free(h_idata);
free(h_odata);
// free device memory
CHECK(cudaFree(d_idata));
CHECK(cudaFree(d_odata));
// reset device
CHECK(cudaDeviceReset());
// check the results
bResult = (gpu_sum == cpu_sum);
if(!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
|
52600a0b751ae3da6e211e1f70e89880a060bddd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 15.11.2018
//
#include <loops/special_kernels.h>
namespace sd {
///////////////////////////////////////////////////////////////////////
template <typename T>
SD_DEVICE void averagingKernel(void **vdx, void *vdz, int n, sd::LongType length, bool propagate) {
auto dx = reinterpret_cast<T **>(vdx);
auto dz = reinterpret_cast<T *>(vdz);
__shared__ T *shmem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char sharedmem[];
shmem = (T *)sharedmem;
}
__syncthreads();
// each block cycles over it's own part of arrays
for (int r = blockDim.x * blockIdx.x; r < length; r += blockDim.x * gridDim.x) {
shmem[threadIdx.x] = (T)0.0f;
sd::LongType baseIdx = r;
// aggregation step, we roll over all arrays
for (int ar = 0; ar < n; ar++) {
T *cdata = (T *)dx[ar];
cdata += baseIdx;
if (baseIdx + threadIdx.x < length) shmem[threadIdx.x] += cdata[threadIdx.x];
}
// average data in shared memory
if (baseIdx + threadIdx.x < length) shmem[threadIdx.x] /= n;
// div step & write out step
if (dz != nullptr) {
T *wdata = dz + baseIdx;
if (baseIdx + threadIdx.x < length) {
wdata[threadIdx.x] = shmem[threadIdx.x];
}
}
// propagate averaged data to all arrays
if (propagate)
for (int ar = 0; ar < n; ar++) {
T *cdata = (T *)dx[ar];
cdata += baseIdx;
if (baseIdx + threadIdx.x < length) cdata[threadIdx.x] = shmem[threadIdx.x];
}
}
}
///////////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL void execAveragingKernel(void **vdx, void *vdz, int n, sd::LongType length, bool propagate) {
averagingKernel<T>(vdx, vdz, n, length, propagate);
}
///////////////////////////////////////////////////////////////////////
template <typename T>
SD_HOST void averagingKernelGeneric(dim3 &launchDims, hipStream_t *stream, void **vdx, void *vdz, int n,
sd::LongType length, bool propagate) {
hipLaunchKernelGGL(( execAveragingKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vdx, vdz, n, length, propagate);
sd::DebugHelper::checkErrorCode(stream, "averaging(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void averagingKernelGeneric,
(dim3 & launchDims, hipStream_t *stream, void **vdx, void *vdz, int n, sd::LongType length,
bool propagate),
SD_COMMON_TYPES);
} // namespace sd
| 52600a0b751ae3da6e211e1f70e89880a060bddd.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 15.11.2018
//
#include <loops/special_kernels.h>
namespace sd {
///////////////////////////////////////////////////////////////////////
template <typename T>
SD_DEVICE void averagingKernel(void **vdx, void *vdz, int n, sd::LongType length, bool propagate) {
auto dx = reinterpret_cast<T **>(vdx);
auto dz = reinterpret_cast<T *>(vdz);
__shared__ T *shmem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char sharedmem[];
shmem = (T *)sharedmem;
}
__syncthreads();
// each block cycles over it's own part of arrays
for (int r = blockDim.x * blockIdx.x; r < length; r += blockDim.x * gridDim.x) {
shmem[threadIdx.x] = (T)0.0f;
sd::LongType baseIdx = r;
// aggregation step, we roll over all arrays
for (int ar = 0; ar < n; ar++) {
T *cdata = (T *)dx[ar];
cdata += baseIdx;
if (baseIdx + threadIdx.x < length) shmem[threadIdx.x] += cdata[threadIdx.x];
}
// average data in shared memory
if (baseIdx + threadIdx.x < length) shmem[threadIdx.x] /= n;
// div step & write out step
if (dz != nullptr) {
T *wdata = dz + baseIdx;
if (baseIdx + threadIdx.x < length) {
wdata[threadIdx.x] = shmem[threadIdx.x];
}
}
// propagate averaged data to all arrays
if (propagate)
for (int ar = 0; ar < n; ar++) {
T *cdata = (T *)dx[ar];
cdata += baseIdx;
if (baseIdx + threadIdx.x < length) cdata[threadIdx.x] = shmem[threadIdx.x];
}
}
}
///////////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL void execAveragingKernel(void **vdx, void *vdz, int n, sd::LongType length, bool propagate) {
averagingKernel<T>(vdx, vdz, n, length, propagate);
}
///////////////////////////////////////////////////////////////////////
template <typename T>
SD_HOST void averagingKernelGeneric(dim3 &launchDims, cudaStream_t *stream, void **vdx, void *vdz, int n,
sd::LongType length, bool propagate) {
execAveragingKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vdx, vdz, n, length, propagate);
sd::DebugHelper::checkErrorCode(stream, "averaging(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void averagingKernelGeneric,
(dim3 & launchDims, cudaStream_t *stream, void **vdx, void *vdz, int n, sd::LongType length,
bool propagate),
SD_COMMON_TYPES);
} // namespace sd
|
920801b4bda983df86d41945cdb42fd9e97f5e84.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cuml/common/cuml_allocator.hpp>
#include <iostream>
#include <metrics/completenessScore.cuh>
#include <random>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
//parameter structure definition
struct completenessParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
};
//test fixture class
template <typename T>
class completenessTest : public ::testing::TestWithParam<completenessParam> {
protected:
//the constructor
void SetUp() override {
//getting the parameters
params = ::testing::TestWithParam<completenessParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
//generating random value test input
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange,
upperLabelRange);
std::generate(arr1.begin(), arr1.end(),
[&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(),
[&]() { return intGenerator(dre); });
}
//allocating and initializing memory to the GPU
CUDA_CHECK(hipStreamCreate(&stream));
raft::allocate(truthClusterArray, nElements, true);
raft::allocate(predClusterArray, nElements, true);
raft::update_device(truthClusterArray, &arr1[0], (int)nElements, stream);
raft::update_device(predClusterArray, &arr2[0], (int)nElements, stream);
std::shared_ptr<MLCommon::deviceAllocator> allocator(
new raft::mr::device::default_allocator);
//calculating the golden output
double truthMI, truthEntropy;
truthMI = MLCommon::Metrics::mutualInfoScore(
truthClusterArray, predClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
truthEntropy =
MLCommon::Metrics::entropy(predClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
if (truthEntropy) {
truthCompleteness = truthMI / truthEntropy;
} else
truthCompleteness = 1.0;
if (nElements == 0) truthCompleteness = 1.0;
//calling the completeness CUDA implementation
computedCompleteness = MLCommon::Metrics::completenessScore(
truthClusterArray, predClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
}
//the destructor
void TearDown() override {
CUDA_CHECK(hipFree(truthClusterArray));
CUDA_CHECK(hipFree(predClusterArray));
CUDA_CHECK(hipStreamDestroy(stream));
}
//declaring the data values
completenessParam params;
T lowerLabelRange, upperLabelRange;
T* truthClusterArray = nullptr;
T* predClusterArray = nullptr;
int nElements = 0;
double truthCompleteness = 0;
double computedCompleteness = 0;
hipStream_t stream;
};
//setting test parameter values
const std::vector<completenessParam> inputs = {
{199, 1, 10, false, 0.000001}, {200, 15, 100, false, 0.000001},
{100, 1, 20, false, 0.000001}, {10, 1, 10, false, 0.000001},
{198, 1, 100, false, 0.000001}, {300, 3, 99, false, 0.000001},
{199, 1, 10, true, 0.000001}, {200, 15, 100, true, 0.000001},
{100, 1, 20, true, 0.000001}, {10, 1, 10, true, 0.000001},
{198, 1, 100, true, 0.000001}, {300, 3, 99, true, 0.000001}};
//writing the test suite
typedef completenessTest<int> completenessTestClass;
TEST_P(completenessTestClass, Result) {
ASSERT_NEAR(computedCompleteness, truthCompleteness, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(completeness, completenessTestClass,
::testing::ValuesIn(inputs));
} //end namespace Metrics
} //end namespace MLCommon
| 920801b4bda983df86d41945cdb42fd9e97f5e84.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cuml/common/cuml_allocator.hpp>
#include <iostream>
#include <metrics/completenessScore.cuh>
#include <random>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
//parameter structure definition
struct completenessParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
};
//test fixture class
template <typename T>
class completenessTest : public ::testing::TestWithParam<completenessParam> {
protected:
//the constructor
void SetUp() override {
//getting the parameters
params = ::testing::TestWithParam<completenessParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
//generating random value test input
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange,
upperLabelRange);
std::generate(arr1.begin(), arr1.end(),
[&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(),
[&]() { return intGenerator(dre); });
}
//allocating and initializing memory to the GPU
CUDA_CHECK(cudaStreamCreate(&stream));
raft::allocate(truthClusterArray, nElements, true);
raft::allocate(predClusterArray, nElements, true);
raft::update_device(truthClusterArray, &arr1[0], (int)nElements, stream);
raft::update_device(predClusterArray, &arr2[0], (int)nElements, stream);
std::shared_ptr<MLCommon::deviceAllocator> allocator(
new raft::mr::device::default_allocator);
//calculating the golden output
double truthMI, truthEntropy;
truthMI = MLCommon::Metrics::mutualInfoScore(
truthClusterArray, predClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
truthEntropy =
MLCommon::Metrics::entropy(predClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
if (truthEntropy) {
truthCompleteness = truthMI / truthEntropy;
} else
truthCompleteness = 1.0;
if (nElements == 0) truthCompleteness = 1.0;
//calling the completeness CUDA implementation
computedCompleteness = MLCommon::Metrics::completenessScore(
truthClusterArray, predClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
}
//the destructor
void TearDown() override {
CUDA_CHECK(cudaFree(truthClusterArray));
CUDA_CHECK(cudaFree(predClusterArray));
CUDA_CHECK(cudaStreamDestroy(stream));
}
//declaring the data values
completenessParam params;
T lowerLabelRange, upperLabelRange;
T* truthClusterArray = nullptr;
T* predClusterArray = nullptr;
int nElements = 0;
double truthCompleteness = 0;
double computedCompleteness = 0;
cudaStream_t stream;
};
//setting test parameter values
const std::vector<completenessParam> inputs = {
{199, 1, 10, false, 0.000001}, {200, 15, 100, false, 0.000001},
{100, 1, 20, false, 0.000001}, {10, 1, 10, false, 0.000001},
{198, 1, 100, false, 0.000001}, {300, 3, 99, false, 0.000001},
{199, 1, 10, true, 0.000001}, {200, 15, 100, true, 0.000001},
{100, 1, 20, true, 0.000001}, {10, 1, 10, true, 0.000001},
{198, 1, 100, true, 0.000001}, {300, 3, 99, true, 0.000001}};
//writing the test suite
typedef completenessTest<int> completenessTestClass;
TEST_P(completenessTestClass, Result) {
ASSERT_NEAR(computedCompleteness, truthCompleteness, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(completeness, completenessTestClass,
::testing::ValuesIn(inputs));
} //end namespace Metrics
} //end namespace MLCommon
|
8e2b156af1878047d1edf7b76da86dc781bb81b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* Shady Boukhary
* Midwestern State University
* CMPS 4563 - Parallel Distributed Computing - GPU Programming
* February 26th, 2018
*
*
* CUDA Parallel Code that computes the matrix multiplication of 2 matrices of 512x512 size
* using 1 block with 1024 threads. The process of multiplication is timed. The resulting
* matrix and the time it took to be computed are printed to an output file. The GPU Code uses
* shared memory to speed up performance
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+*/
#include <stdio.h>
#include "timer.h"
#define TILE 32
const int N = 8192;
/*+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
* matrixMuKernell()
* GPU (device) kernel
* @param: int *, int *, int *
* @return: void
* Description: multiplies 2 matrices and stores result in 3rd matrix using shared mem
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-*/
__global__ void matrixMulKernel(float * A_d, float * B_d, float * C_d, int width) {
__shared__ int Ads[TILE][TILE];
__shared__ int Bds[TILE][TILE];
int tx = threadIdx.x + TILE * blockIdx.x;
int ty = threadIdx.y + TILE * blockIdx.y;
float PValue = 0;
for (int m = 0; m < width/TILE; m++) {
// load A_d and B_d tiles into shared memory
Ads[threadIdx.y][threadIdx.x] = A_d[ty * width + (m * TILE + threadIdx.x)];
Bds[threadIdx.y][threadIdx.x]= B_d[(m * TILE + threadIdx.y) * width + tx];
__syncthreads();
for (int k = 0; k < TILE; k++) {
PValue += Ads[threadIdx.y][k] * Bds[k][threadIdx.x];
}
__syncthreads();
}
C_d[ty * width + tx] = PValue;
}
void printMatrix(float* , FILE*);
int main() {
FILE *outfile;
double timeStart, timeStop, timeElapsed;
printf("%c",'s');
float *A = (float *)malloc(N * N * sizeof(float));
float *B = (float *)malloc(N * N * sizeof(float));
float *C = (float *)malloc(N * N * sizeof(float));
int size = N * N * sizeof(float);
float * A_d;
float * B_d;
float * C_d;
float h = 0;
for (int x = 0; x < N; x++) {
for (int y = 0; y < N; y++){
A[x * N + y] = ++h;
B[x * N + y] = N * N - h;
C[x * N + y] = 0;
}
}
outfile = fopen("ShadyBoukhary1BOutputS.txt", "w");
if (outfile == NULL) {
printf("%s", "Failed to open file.\n");
exit(1);
}
// transfer A, B to device
hipMalloc((void **) &A_d, size);
hipMemcpy(A_d, A, size, hipMemcpyHostToDevice);
hipMalloc((void **) &B_d, size);
hipMemcpy(B_d, B, size, hipMemcpyHostToDevice);
hipMalloc((void **) &C_d, size);
// 1 block of 32x32 = 1024 threads
dim3 dimGrid0(N / 32, N / 32 , 1);
dim3 blockDim0(32, 32, 1);
printf("%c",'s');
GET_TIME(timeStart);
hipLaunchKernelGGL(( matrixMulKernel), dim3(dimGrid0), dim3(blockDim0), 0, 0, A_d, B_d, C_d, N);
hipDeviceSynchronize();
GET_TIME(timeStop);
timeElapsed = timeStop - timeStart;
//printMatrix(C, outfile);
hipMemcpy(C, C_d, size, hipMemcpyDeviceToHost);
//printMatrix(C, outfile);
fprintf(outfile, "The code to be timed took %e seconds\n", timeElapsed);
// free memory from device
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
}
/*+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
* printMatrix()
* @param: int[][], int[][], FILE*
* @return: void
* Description: prints matrix to an output file
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-*/
void printMatrix(float * matrix, FILE* outfile){
for (int x = 0; x < N; x++) {
for (int y = 0; y < N; y++){
fprintf(outfile, "%5f%s", matrix[x * N + y], " ");
}
fprintf(outfile, "\n");
}
fprintf(outfile, "\n");
}
| 8e2b156af1878047d1edf7b76da86dc781bb81b2.cu | /*+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* Shady Boukhary
* Midwestern State University
* CMPS 4563 - Parallel Distributed Computing - GPU Programming
* February 26th, 2018
*
*
* CUDA Parallel Code that computes the matrix multiplication of 2 matrices of 512x512 size
* using 1 block with 1024 threads. The process of multiplication is timed. The resulting
* matrix and the time it took to be computed are printed to an output file. The GPU Code uses
* shared memory to speed up performance
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+*/
#include <stdio.h>
#include "timer.h"
#define TILE 32
const int N = 8192;
/*+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
* matrixMuKernell()
* GPU (device) kernel
* @param: int *, int *, int *
* @return: void
* Description: multiplies 2 matrices and stores result in 3rd matrix using shared mem
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-*/
__global__ void matrixMulKernel(float * A_d, float * B_d, float * C_d, int width) {
__shared__ int Ads[TILE][TILE];
__shared__ int Bds[TILE][TILE];
int tx = threadIdx.x + TILE * blockIdx.x;
int ty = threadIdx.y + TILE * blockIdx.y;
float PValue = 0;
for (int m = 0; m < width/TILE; m++) {
// load A_d and B_d tiles into shared memory
Ads[threadIdx.y][threadIdx.x] = A_d[ty * width + (m * TILE + threadIdx.x)];
Bds[threadIdx.y][threadIdx.x]= B_d[(m * TILE + threadIdx.y) * width + tx];
__syncthreads();
for (int k = 0; k < TILE; k++) {
PValue += Ads[threadIdx.y][k] * Bds[k][threadIdx.x];
}
__syncthreads();
}
C_d[ty * width + tx] = PValue;
}
void printMatrix(float* , FILE*);
int main() {
FILE *outfile;
double timeStart, timeStop, timeElapsed;
printf("%c",'s');
float *A = (float *)malloc(N * N * sizeof(float));
float *B = (float *)malloc(N * N * sizeof(float));
float *C = (float *)malloc(N * N * sizeof(float));
int size = N * N * sizeof(float);
float * A_d;
float * B_d;
float * C_d;
float h = 0;
for (int x = 0; x < N; x++) {
for (int y = 0; y < N; y++){
A[x * N + y] = ++h;
B[x * N + y] = N * N - h;
C[x * N + y] = 0;
}
}
outfile = fopen("ShadyBoukhary1BOutputS.txt", "w");
if (outfile == NULL) {
printf("%s", "Failed to open file.\n");
exit(1);
}
// transfer A, B to device
cudaMalloc((void **) &A_d, size);
cudaMemcpy(A_d, A, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &B_d, size);
cudaMemcpy(B_d, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &C_d, size);
// 1 block of 32x32 = 1024 threads
dim3 dimGrid0(N / 32, N / 32 , 1);
dim3 blockDim0(32, 32, 1);
printf("%c",'s');
GET_TIME(timeStart);
matrixMulKernel<<<dimGrid0, blockDim0>>>(A_d, B_d, C_d, N);
cudaDeviceSynchronize();
GET_TIME(timeStop);
timeElapsed = timeStop - timeStart;
//printMatrix(C, outfile);
cudaMemcpy(C, C_d, size, cudaMemcpyDeviceToHost);
//printMatrix(C, outfile);
fprintf(outfile, "The code to be timed took %e seconds\n", timeElapsed);
// free memory from device
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
}
/*+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
* printMatrix()
* @param: int[][], int[][], FILE*
* @return: void
* Description: prints matrix to an output file
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-*/
void printMatrix(float * matrix, FILE* outfile){
for (int x = 0; x < N; x++) {
for (int y = 0; y < N; y++){
fprintf(outfile, "%5f%s", matrix[x * N + y], " ");
}
fprintf(outfile, "\n");
}
fprintf(outfile, "\n");
}
|
98ae64b40e5d892af0b07b351caf3a11644ff9b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "oneflow/core/common/data_type.h"
#include "oneflow/core/device/device_context.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/framework/random_generator.h"
#include "oneflow/user/kernels/op_kernel_state_wrapper.h"
#include "oneflow/user/kernels/range_kernel_util.h"
#include "oneflow/user/kernels/radix_sort.cuh"
#include "oneflow/user/kernels/distributions/common.h"
namespace oneflow {
__global__ void GeneKeysAndValues(const int32_t n, int32_t* values, int32_t* keys,
hiprandState_t* state) {
XPU_1D_KERNEL_LOOP(i, n) {
keys[i] = hiprand(state + i);
values[i] = i;
}
}
class GpuRandPermKernel final : public user_op::OpKernel {
public:
GpuRandPermKernel() = default;
~GpuRandPermKernel() = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
const auto& generator = CHECK_JUST(one::MakeGenerator(kGPU));
generator->set_current_seed(ctx->Attr<int64_t>("seed"));
return std::make_shared<DistributionKernelState>(generator);
}
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
int32_t* output = out->mut_dptr<int32_t>();
const int32_t n = ctx->Attr<int32_t>("n");
if (n == 0) { return; }
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
auto* distribution_state = dynamic_cast<DistributionKernelState*>(state);
CHECK_NOTNULL(distribution_state);
const auto& generator = distribution_state->generator();
const auto& gpu_generator = CHECK_JUST(generator->Get<one::CUDAGeneratorImpl>());
CHECK_NOTNULL(generator);
int32_t block_num = gpu_generator->max_block_num();
int32_t thread_num = gpu_generator->max_thread_num();
hiprandState_t* curand_states = gpu_generator->curand_states();
// layout for tmp |...key(in and out,2xN)..|....value....|.... space for sort function....|
// values are the desired indexes ,and keys are generated randomly.
void* tmp = tmp_buffer->mut_dptr<void>();
int32_t* key_base = reinterpret_cast<int32_t*>(tmp);
const int32_t key_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
int32_t* value_base =
reinterpret_cast<int32_t*>(reinterpret_cast<char*>(key_base) + 2 * key_aligned_bytes);
const int32_t indices_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
void* tmp_base =
reinterpret_cast<void*>(reinterpret_cast<char*>(value_base) + indices_aligned_bytes);
size_t temp_storage_bytes = InferTempStorageForSortPairsDescending<int32_t, int32_t>(1, n);
hipLaunchKernelGGL(( GeneKeysAndValues), dim3(block_num), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
n, value_base, key_base, curand_states);
auto err = hipcub::DeviceRadixSort::SortPairs(
/* d_temp_storage */ tmp_base,
/* temp_storage_bytes */ temp_storage_bytes,
/* d_keys_in */ key_base,
/* d_keys_out */ key_base + n,
/* d_values_in */ value_base,
/* d_values_out */ output,
/* num_items */ n,
/* begin_bit */ 0,
/* end_bit */ sizeof(int32_t) * 8,
/* stream */ ctx->device_ctx()->cuda_stream());
OF_CUDA_CHECK(err);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
REGISTER_USER_KERNEL("randperm")
.SetCreateFn<GpuRandPermKernel>()
.SetIsMatchedHob(user_op::HobDeviceTag() == "gpu")
.SetInferTmpSizeFn([](user_op::InferContext* ctx) {
const int32_t n = ctx->Attr<int32_t>("n");
/* Sorted In */
const int32_t sorted_in_aligned_bytes = 2 * GetCudaAlignedSize(n * sizeof(int32_t));
/* Indices */
const int32_t indices_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
/* CUB Temp Storage */
const int32_t temp_storage_bytes =
InferTempStorageForSortPairsDescending<int32_t, int32_t>(1, n);
return sorted_in_aligned_bytes + indices_aligned_bytes + temp_storage_bytes;
});
} // namespace oneflow
| 98ae64b40e5d892af0b07b351caf3a11644ff9b1.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <curand.h>
#include <curand_kernel.h>
#include "oneflow/core/common/data_type.h"
#include "oneflow/core/device/device_context.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/framework/random_generator.h"
#include "oneflow/user/kernels/op_kernel_state_wrapper.h"
#include "oneflow/user/kernels/range_kernel_util.h"
#include "oneflow/user/kernels/radix_sort.cuh"
#include "oneflow/user/kernels/distributions/common.h"
namespace oneflow {
__global__ void GeneKeysAndValues(const int32_t n, int32_t* values, int32_t* keys,
curandState* state) {
XPU_1D_KERNEL_LOOP(i, n) {
keys[i] = curand(state + i);
values[i] = i;
}
}
class GpuRandPermKernel final : public user_op::OpKernel {
public:
GpuRandPermKernel() = default;
~GpuRandPermKernel() = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
const auto& generator = CHECK_JUST(one::MakeGenerator(kGPU));
generator->set_current_seed(ctx->Attr<int64_t>("seed"));
return std::make_shared<DistributionKernelState>(generator);
}
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
int32_t* output = out->mut_dptr<int32_t>();
const int32_t n = ctx->Attr<int32_t>("n");
if (n == 0) { return; }
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
auto* distribution_state = dynamic_cast<DistributionKernelState*>(state);
CHECK_NOTNULL(distribution_state);
const auto& generator = distribution_state->generator();
const auto& gpu_generator = CHECK_JUST(generator->Get<one::CUDAGeneratorImpl>());
CHECK_NOTNULL(generator);
int32_t block_num = gpu_generator->max_block_num();
int32_t thread_num = gpu_generator->max_thread_num();
curandState* curand_states = gpu_generator->curand_states();
// layout for tmp |...key(in and out,2xN)..|....value....|.... space for sort function....|
// values are the desired indexes ,and keys are generated randomly.
void* tmp = tmp_buffer->mut_dptr<void>();
int32_t* key_base = reinterpret_cast<int32_t*>(tmp);
const int32_t key_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
int32_t* value_base =
reinterpret_cast<int32_t*>(reinterpret_cast<char*>(key_base) + 2 * key_aligned_bytes);
const int32_t indices_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
void* tmp_base =
reinterpret_cast<void*>(reinterpret_cast<char*>(value_base) + indices_aligned_bytes);
size_t temp_storage_bytes = InferTempStorageForSortPairsDescending<int32_t, int32_t>(1, n);
GeneKeysAndValues<<<block_num, kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
n, value_base, key_base, curand_states);
auto err = cub::DeviceRadixSort::SortPairs(
/* d_temp_storage */ tmp_base,
/* temp_storage_bytes */ temp_storage_bytes,
/* d_keys_in */ key_base,
/* d_keys_out */ key_base + n,
/* d_values_in */ value_base,
/* d_values_out */ output,
/* num_items */ n,
/* begin_bit */ 0,
/* end_bit */ sizeof(int32_t) * 8,
/* stream */ ctx->device_ctx()->cuda_stream());
OF_CUDA_CHECK(err);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
REGISTER_USER_KERNEL("randperm")
.SetCreateFn<GpuRandPermKernel>()
.SetIsMatchedHob(user_op::HobDeviceTag() == "gpu")
.SetInferTmpSizeFn([](user_op::InferContext* ctx) {
const int32_t n = ctx->Attr<int32_t>("n");
/* Sorted In */
const int32_t sorted_in_aligned_bytes = 2 * GetCudaAlignedSize(n * sizeof(int32_t));
/* Indices */
const int32_t indices_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
/* CUB Temp Storage */
const int32_t temp_storage_bytes =
InferTempStorageForSortPairsDescending<int32_t, int32_t>(1, n);
return sorted_in_aligned_bytes + indices_aligned_bytes + temp_storage_bytes;
});
} // namespace oneflow
|
37b915297928a62639ac2d094055bfa64507fa84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=[40,51] --blockDim=[8,8]
#include "common.h"
__global__ void NLM(
TColor *dst,
int imageW,
int imageH,
float Noise,
float lerpC
)
{
__requires(imageW == 320);
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
//Add half of a texel to always address exact texel centers
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
if (ix < imageW && iy < imageH)
{
//Normalized counter for the NLM weight threshold
float fCount = 0;
//Total sum of pixel weights
float sumWeights = 0;
//Result accumulator
float3 clr = {0, 0, 0};
//Cycle through NLM window, surrounding (x, y) texel
for (float i = -NLM_WINDOW_RADIUS; i <= NLM_WINDOW_RADIUS; i++)
for (float j = -NLM_WINDOW_RADIUS; j <= NLM_WINDOW_RADIUS; j++)
{
//Find color distance from (x, y) to (x + j, y + i)
float weightIJ = 0;
for (float n = -NLM_BLOCK_RADIUS; n <= NLM_BLOCK_RADIUS; n++)
for (float m = -NLM_BLOCK_RADIUS; m <= NLM_BLOCK_RADIUS; m++)
weightIJ += vecLen(
tex2D(texImage, x + j + m, y + i + n),
tex2D(texImage, x + m, y + n)
);
//Derive final weight from color and geometric distance
weightIJ = __expf(-(weightIJ * Noise + (i * i + j * j) * INV_NLM_WINDOW_AREA));
//Accumulate (x + j, y + i) texel color with computed weight
float4 clrIJ = tex2D(texImage, x + j, y + i);
clr.x += clrIJ.x * weightIJ;
clr.y += clrIJ.y * weightIJ;
clr.z += clrIJ.z * weightIJ;
//Sum of weights for color normalization to [0..1] range
sumWeights += weightIJ;
//Update weight counter, if NLM weight for current window texel
//exceeds the weight threshold
fCount += (weightIJ > NLM_WEIGHT_THRESHOLD) ? INV_NLM_WINDOW_AREA : 0;
}
//Normalize result color by sum of weights
sumWeights = 1.0f / sumWeights;
clr.x *= sumWeights;
clr.y *= sumWeights;
clr.z *= sumWeights;
//Choose LERP quotent basing on how many texels
//within the NLM window exceeded the weight threshold
float lerpQ = (fCount > NLM_LERP_THRESHOLD) ? lerpC : 1.0f - lerpC;
//Write final result to global memory
float4 clr00 = tex2D(texImage, x, y);
clr.x = lerpf(clr.x, clr00.x, lerpQ);
clr.y = lerpf(clr.y, clr00.y, lerpQ);
clr.z = lerpf(clr.z, clr00.z, lerpQ);
dst[imageW * iy + ix] = make_color(clr.x, clr.y, clr.z, 0);
}
}
| 37b915297928a62639ac2d094055bfa64507fa84.cu | //pass
//--gridDim=[40,51] --blockDim=[8,8]
#include "common.h"
__global__ void NLM(
TColor *dst,
int imageW,
int imageH,
float Noise,
float lerpC
)
{
__requires(imageW == 320);
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
//Add half of a texel to always address exact texel centers
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
if (ix < imageW && iy < imageH)
{
//Normalized counter for the NLM weight threshold
float fCount = 0;
//Total sum of pixel weights
float sumWeights = 0;
//Result accumulator
float3 clr = {0, 0, 0};
//Cycle through NLM window, surrounding (x, y) texel
for (float i = -NLM_WINDOW_RADIUS; i <= NLM_WINDOW_RADIUS; i++)
for (float j = -NLM_WINDOW_RADIUS; j <= NLM_WINDOW_RADIUS; j++)
{
//Find color distance from (x, y) to (x + j, y + i)
float weightIJ = 0;
for (float n = -NLM_BLOCK_RADIUS; n <= NLM_BLOCK_RADIUS; n++)
for (float m = -NLM_BLOCK_RADIUS; m <= NLM_BLOCK_RADIUS; m++)
weightIJ += vecLen(
tex2D(texImage, x + j + m, y + i + n),
tex2D(texImage, x + m, y + n)
);
//Derive final weight from color and geometric distance
weightIJ = __expf(-(weightIJ * Noise + (i * i + j * j) * INV_NLM_WINDOW_AREA));
//Accumulate (x + j, y + i) texel color with computed weight
float4 clrIJ = tex2D(texImage, x + j, y + i);
clr.x += clrIJ.x * weightIJ;
clr.y += clrIJ.y * weightIJ;
clr.z += clrIJ.z * weightIJ;
//Sum of weights for color normalization to [0..1] range
sumWeights += weightIJ;
//Update weight counter, if NLM weight for current window texel
//exceeds the weight threshold
fCount += (weightIJ > NLM_WEIGHT_THRESHOLD) ? INV_NLM_WINDOW_AREA : 0;
}
//Normalize result color by sum of weights
sumWeights = 1.0f / sumWeights;
clr.x *= sumWeights;
clr.y *= sumWeights;
clr.z *= sumWeights;
//Choose LERP quotent basing on how many texels
//within the NLM window exceeded the weight threshold
float lerpQ = (fCount > NLM_LERP_THRESHOLD) ? lerpC : 1.0f - lerpC;
//Write final result to global memory
float4 clr00 = tex2D(texImage, x, y);
clr.x = lerpf(clr.x, clr00.x, lerpQ);
clr.y = lerpf(clr.y, clr00.y, lerpQ);
clr.z = lerpf(clr.z, clr00.z, lerpQ);
dst[imageW * iy + ix] = make_color(clr.x, clr.y, clr.z, 0);
}
}
|
abe29fe57e9e63670519d3b7c17a8e8e8a359363.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
template <typename U, typename V>
constexpr __host__ __device__ auto divUp(U a, V b) -> decltype(a + b) {
return (a + b - 1) / b;
}
template <int FS, int SB, int padding_l, typename scalar_t>
__inline__ __device__ void zeroSharedMem(scalar_t* data) {
/*
Given an array of length FS + SB, zero out the first padding_l and last
(FS - padding_l) values in the array
*/
int tid = threadIdx.x;
if (FS < SB) {
// zero all if we have enough threads in a block to do all of them
if (tid < padding_l || tid > SB - FS + padding_l - 1) {
data[tid] = scalar_t(0.0);
}
} else {
// otherwise zero out one block at a time
const int numIterations = divUp<int, int>(FS, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if (tid + offset < padding_l) {
data[tid + offset] = scalar_t(0.0);
} else if (tid + offset < FS) {
data[SB + tid + offset] = scalar_t(0.0);
}
}
}
}
template <typename scalar_t>
__inline__ __device__ scalar_t warpReduce(scalar_t data) {
/*
Reduce an array within each warp. After processing all values in warp will
caontain the sum of all original values in that warp.
data - pointer to data to reduce
*/
data += __shfl_xor_sync(SHFL_MASK, data, 16);
data += __shfl_xor_sync(SHFL_MASK, data, 8);
data += __shfl_xor_sync(SHFL_MASK, data, 4);
data += __shfl_xor_sync(SHFL_MASK, data, 2);
data += __shfl_xor_sync(SHFL_MASK, data, 1);
return data;
}
template <typename scalar_t>
__inline__ __device__ scalar_t blockReduce(scalar_t data) {
/*
Reduce an entire array on the block level. After processing, the
first value in the array will contain the reduced sum.
data - pointer to data to reduce
*/
static __shared__ scalar_t warpSum[32];
const int tid = threadIdx.x;
int wid = tid / 32;
int lane = tid % 32;
__syncthreads();
// reduce each warp then write to shared memory
scalar_t sum = warpReduce(data);
if (lane == 0) {
warpSum[wid] = sum;
}
__syncthreads();
scalar_t v;
// perform final sum of partial warp sums
if (tid < blockDim.x / 32) {
v = warpSum[lane];
} else {
v = scalar_t(0.0);
}
if (wid == 0) {
v = warpReduce(v);
}
__syncthreads();
return v;
}
void checkCudaStatus(hipError_t status, int lineNumber = -1) {
if (status != hipSuccess) {
std::cout << hipGetErrorString(status) << " at line " << lineNumber
<< std::endl;
std::cout << "Exiting" << std::endl;
exit(1);
}
}
template <int FS, int SB, int padding_l, typename scalar_t>
__device__ void load_input_to_shared(
const scalar_t* input, // global memory
int inputOffset,
int sequenceLength,
int iteration,
int numIterations,
bool no_prev,
scalar_t* output /* shared memory */) {
/*
Load a block size of input into shared memory with
right and left overhang of total size FS. If previously
loaded memory, overlap will be shifted over to reduce
global memory access
input - pointer to start of channel sequence
inputOffset - how far in the sequence to start loading
sequenceLength - total length of sequence
iteration - which block of sequence we are loading
numIterations - total number of blocks to load
no_prev - whether to load the whole block if the previous block
wasn't loaded
output - shared memory to write input to
*/
const int tid = threadIdx.x;
// Load the left "overhang" of input
if (iteration > 0) {
if (padding_l < SB) {
// load all at once
if (tid < padding_l) {
output[tid] =
(no_prev) ? input[inputOffset - padding_l + tid] : output[tid + SB];
}
} else {
// load in chunks of size SB
int numIterations = divUp<int, int>(padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < padding_l) {
output[tid + offset] = (no_prev)
? input[inputOffset - padding_l + tid + offset]
: output[tid + offset + SB];
}
}
}
}
// Load the right "overhang" of input
if (iteration < (numIterations - 1)) {
const int elementsLeft = sequenceLength - (iteration + 1) * SB;
if ((FS - padding_l) < SB) {
// load all at once
if (tid < (FS - padding_l)) {
output[padding_l + SB + tid] = (tid < elementsLeft)
? input[inputOffset + SB + tid]
: scalar_t(0.0);
}
} else {
// load in chunks of size SB
int numIterations = divUp<int, int>(FS - padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < (FS - padding_l)) {
output[padding_l + SB + tid + offset] =
((tid + offset) < elementsLeft)
? input[inputOffset + SB + tid + offset]
: scalar_t(0.0);
}
}
}
}
// We should also clear out the right "overhang"
if (iteration == (numIterations - 1)) {
if ((FS - padding_l) < SB) {
// clear out all at once
if (tid < (FS - padding_l)) {
output[padding_l + SB + tid] = scalar_t(0.0);
}
} else {
// clear in chunks of size SB
int numIterations = divUp<int, int>(FS - padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < (FS - padding_l)) {
output[padding_l + SB + tid + offset] = scalar_t(0.0);
}
}
}
}
output[tid + padding_l] = ((inputOffset + tid) < sequenceLength)
? input[inputOffset + tid]
: scalar_t(0.0);
}
| abe29fe57e9e63670519d3b7c17a8e8e8a359363.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
template <typename U, typename V>
constexpr __host__ __device__ auto divUp(U a, V b) -> decltype(a + b) {
return (a + b - 1) / b;
}
template <int FS, int SB, int padding_l, typename scalar_t>
__inline__ __device__ void zeroSharedMem(scalar_t* data) {
/*
Given an array of length FS + SB, zero out the first padding_l and last
(FS - padding_l) values in the array
*/
int tid = threadIdx.x;
if (FS < SB) {
// zero all if we have enough threads in a block to do all of them
if (tid < padding_l || tid > SB - FS + padding_l - 1) {
data[tid] = scalar_t(0.0);
}
} else {
// otherwise zero out one block at a time
const int numIterations = divUp<int, int>(FS, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if (tid + offset < padding_l) {
data[tid + offset] = scalar_t(0.0);
} else if (tid + offset < FS) {
data[SB + tid + offset] = scalar_t(0.0);
}
}
}
}
template <typename scalar_t>
__inline__ __device__ scalar_t warpReduce(scalar_t data) {
/*
Reduce an array within each warp. After processing all values in warp will
caontain the sum of all original values in that warp.
data - pointer to data to reduce
*/
data += __shfl_xor_sync(SHFL_MASK, data, 16);
data += __shfl_xor_sync(SHFL_MASK, data, 8);
data += __shfl_xor_sync(SHFL_MASK, data, 4);
data += __shfl_xor_sync(SHFL_MASK, data, 2);
data += __shfl_xor_sync(SHFL_MASK, data, 1);
return data;
}
template <typename scalar_t>
__inline__ __device__ scalar_t blockReduce(scalar_t data) {
/*
Reduce an entire array on the block level. After processing, the
first value in the array will contain the reduced sum.
data - pointer to data to reduce
*/
static __shared__ scalar_t warpSum[32];
const int tid = threadIdx.x;
int wid = tid / 32;
int lane = tid % 32;
__syncthreads();
// reduce each warp then write to shared memory
scalar_t sum = warpReduce(data);
if (lane == 0) {
warpSum[wid] = sum;
}
__syncthreads();
scalar_t v;
// perform final sum of partial warp sums
if (tid < blockDim.x / 32) {
v = warpSum[lane];
} else {
v = scalar_t(0.0);
}
if (wid == 0) {
v = warpReduce(v);
}
__syncthreads();
return v;
}
void checkCudaStatus(cudaError_t status, int lineNumber = -1) {
if (status != cudaSuccess) {
std::cout << cudaGetErrorString(status) << " at line " << lineNumber
<< std::endl;
std::cout << "Exiting" << std::endl;
exit(1);
}
}
template <int FS, int SB, int padding_l, typename scalar_t>
__device__ void load_input_to_shared(
const scalar_t* input, // global memory
int inputOffset,
int sequenceLength,
int iteration,
int numIterations,
bool no_prev,
scalar_t* output /* shared memory */) {
/*
Load a block size of input into shared memory with
right and left overhang of total size FS. If previously
loaded memory, overlap will be shifted over to reduce
global memory access
input - pointer to start of channel sequence
inputOffset - how far in the sequence to start loading
sequenceLength - total length of sequence
iteration - which block of sequence we are loading
numIterations - total number of blocks to load
no_prev - whether to load the whole block if the previous block
wasn't loaded
output - shared memory to write input to
*/
const int tid = threadIdx.x;
// Load the left "overhang" of input
if (iteration > 0) {
if (padding_l < SB) {
// load all at once
if (tid < padding_l) {
output[tid] =
(no_prev) ? input[inputOffset - padding_l + tid] : output[tid + SB];
}
} else {
// load in chunks of size SB
int numIterations = divUp<int, int>(padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < padding_l) {
output[tid + offset] = (no_prev)
? input[inputOffset - padding_l + tid + offset]
: output[tid + offset + SB];
}
}
}
}
// Load the right "overhang" of input
if (iteration < (numIterations - 1)) {
const int elementsLeft = sequenceLength - (iteration + 1) * SB;
if ((FS - padding_l) < SB) {
// load all at once
if (tid < (FS - padding_l)) {
output[padding_l + SB + tid] = (tid < elementsLeft)
? input[inputOffset + SB + tid]
: scalar_t(0.0);
}
} else {
// load in chunks of size SB
int numIterations = divUp<int, int>(FS - padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < (FS - padding_l)) {
output[padding_l + SB + tid + offset] =
((tid + offset) < elementsLeft)
? input[inputOffset + SB + tid + offset]
: scalar_t(0.0);
}
}
}
}
// We should also clear out the right "overhang"
if (iteration == (numIterations - 1)) {
if ((FS - padding_l) < SB) {
// clear out all at once
if (tid < (FS - padding_l)) {
output[padding_l + SB + tid] = scalar_t(0.0);
}
} else {
// clear in chunks of size SB
int numIterations = divUp<int, int>(FS - padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < (FS - padding_l)) {
output[padding_l + SB + tid + offset] = scalar_t(0.0);
}
}
}
}
output[tid + padding_l] = ((inputOffset + tid) < sequenceLength)
? input[inputOffset + tid]
: scalar_t(0.0);
}
|
215a5d3cf5a777c1c45932134ae8921e0401094c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "AssignRCut.h"
#include "BoxGeometry.h"
AssignRCut::
AssignRCut ()
: malloced (false)
{
}
AssignRCut::
~AssignRCut ()
{
freeAll();
}
void AssignRCut::
reinit (const MDSystem & sys,
const AdaptRCut & arc,
const IndexType & NThread)
{
freeAll ();
myBlockDim.y = 1;
myBlockDim.z = 1;
myBlockDim.x = NThread;
IndexType nob;
if (sys.ddata.numAtom % myBlockDim.x == 0){
nob = sys.ddata.numAtom / myBlockDim.x;
} else {
nob = sys.ddata.numAtom / myBlockDim.x + 1;
}
atomGridDim = toGridDim (nob);
nx = arc.getNx();
ny = arc.getNy();
nz = arc.getNz();
nele = nx * ny * nz;
box = sys.box;
hrcut = (ScalorType *) malloc (sizeof(ScalorType ) * nele);
hipMalloc ((void **) &drcut, sizeof(ScalorType ) * nele);
checkCUDAError ("AssignRCut::reinit malloc drcut");
malloced = true;
}
void AssignRCut::
getRCut (const AdaptRCut & arc)
{
for (int i = 0; i < nele; ++i){
hrcut[i] = arc.getRCut()[i];
}
hipMemcpy (drcut, hrcut, sizeof(ScalorType) * nele, hipMemcpyHostToDevice);
checkCUDAError ("AssignRCut::getRCut copy");
}
void AssignRCut::
uniform (const double & rc)
{
for (int i = 0; i < nele; ++i){
hrcut[i] = rc;
}
hipMemcpy (drcut, hrcut, sizeof(ScalorType) * nele, hipMemcpyHostToDevice);
checkCUDAError ("AssignRCut::getRCut copy");
}
void AssignRCut::
freeAll ()
{
if (malloced) {
hipFree (drcut);
free (hrcut);
malloced = false;
}
}
using namespace RectangularBoxGeometry;
static void __global__
assignRCutToSystem (const ScalorType * rcutLattice,
const int nx,
const int ny,
const int nz,
const RectangularBox box,
const CoordType * coord,
const int numAtom,
ScalorType * rcut)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType ii = threadIdx.x + bid * blockDim.x;
if (ii < numAtom){
CoordType mycoord = coord[ii];
if (mycoord.x < 0) mycoord.x += box.size.x;
else if (mycoord.x >= box.size.x) mycoord.x -= box.size.x;
if (mycoord.y < 0) mycoord.y += box.size.y;
else if (mycoord.y >= box.size.y) mycoord.y -= box.size.y;
if (mycoord.z < 0) mycoord.z += box.size.z;
else if (mycoord.z >= box.size.z) mycoord.z -= box.size.z;
int ix = (mycoord.x * nx) / box.size.x;
int iy = (mycoord.y * ny) / box.size.y;
int iz = (mycoord.z * nz) / box.size.z;
int idx = iz + nz * (iy + ny * ix);
rcut[ii] = rcutLattice[idx];
}
}
void AssignRCut::
assign (MDSystem & sys)
{
hipLaunchKernelGGL(( assignRCutToSystem)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
drcut,
nx, ny, nz,
sys.box,
sys.ddata.coord,
sys.ddata.numAtom,
sys.ddata.rcut);
}
void AssignRCut::
print_x (const char * file) const
{
FILE * fp = fopen (file, "w");
if (fp == NULL){
std::cerr << "cannot open file " << file << std::endl;
exit(1);
}
for (int i = 0; i < nx; ++i){
// double sum = 0.;
// for (int j = 0; j < ny; ++j){
// for (int k = 0; k < nz; ++k){
// sum += profile[index3to1(i, j, k)];
// }
// }
fprintf (fp, "%f %e\n",
(i + 0.5) * box.size.x / double(nx),
hrcut [index3to1(i, 0, 0)]
);
}
fclose (fp);
}
void AssignRCut::
init_write (const char * file) const
{
fp_write = fopen (file, "w");
if (fp_write == NULL){
fprintf (stderr, "cannot open file %s\n", file);
exit(1);
}
double tmpbox[3];
tmpbox[0] = box.size.x;
tmpbox[1] = box.size.y;
tmpbox[2] = box.size.z;
int tmpnn[3];
tmpnn[0] = nx;
tmpnn[1] = ny;
tmpnn[2] = nz;
fwrite (tmpbox, sizeof(double), 3, fp_write);
fwrite (tmpnn, sizeof(int), 3, fp_write);
}
void AssignRCut::
end_write () const
{
fclose (fp_write);
}
void AssignRCut::
write (const ScalorType & time) const
{
ScalorType tmptime = time;
fwrite (&tmptime, sizeof(ScalorType), 1, fp_write);
fwrite (hrcut, sizeof(ScalorType), nele, fp_write);
}
| 215a5d3cf5a777c1c45932134ae8921e0401094c.cu | #include "AssignRCut.h"
#include "BoxGeometry.h"
AssignRCut::
AssignRCut ()
: malloced (false)
{
}
AssignRCut::
~AssignRCut ()
{
freeAll();
}
void AssignRCut::
reinit (const MDSystem & sys,
const AdaptRCut & arc,
const IndexType & NThread)
{
freeAll ();
myBlockDim.y = 1;
myBlockDim.z = 1;
myBlockDim.x = NThread;
IndexType nob;
if (sys.ddata.numAtom % myBlockDim.x == 0){
nob = sys.ddata.numAtom / myBlockDim.x;
} else {
nob = sys.ddata.numAtom / myBlockDim.x + 1;
}
atomGridDim = toGridDim (nob);
nx = arc.getNx();
ny = arc.getNy();
nz = arc.getNz();
nele = nx * ny * nz;
box = sys.box;
hrcut = (ScalorType *) malloc (sizeof(ScalorType ) * nele);
cudaMalloc ((void **) &drcut, sizeof(ScalorType ) * nele);
checkCUDAError ("AssignRCut::reinit malloc drcut");
malloced = true;
}
void AssignRCut::
getRCut (const AdaptRCut & arc)
{
for (int i = 0; i < nele; ++i){
hrcut[i] = arc.getRCut()[i];
}
cudaMemcpy (drcut, hrcut, sizeof(ScalorType) * nele, cudaMemcpyHostToDevice);
checkCUDAError ("AssignRCut::getRCut copy");
}
void AssignRCut::
uniform (const double & rc)
{
for (int i = 0; i < nele; ++i){
hrcut[i] = rc;
}
cudaMemcpy (drcut, hrcut, sizeof(ScalorType) * nele, cudaMemcpyHostToDevice);
checkCUDAError ("AssignRCut::getRCut copy");
}
void AssignRCut::
freeAll ()
{
if (malloced) {
cudaFree (drcut);
free (hrcut);
malloced = false;
}
}
using namespace RectangularBoxGeometry;
static void __global__
assignRCutToSystem (const ScalorType * rcutLattice,
const int nx,
const int ny,
const int nz,
const RectangularBox box,
const CoordType * coord,
const int numAtom,
ScalorType * rcut)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType ii = threadIdx.x + bid * blockDim.x;
if (ii < numAtom){
CoordType mycoord = coord[ii];
if (mycoord.x < 0) mycoord.x += box.size.x;
else if (mycoord.x >= box.size.x) mycoord.x -= box.size.x;
if (mycoord.y < 0) mycoord.y += box.size.y;
else if (mycoord.y >= box.size.y) mycoord.y -= box.size.y;
if (mycoord.z < 0) mycoord.z += box.size.z;
else if (mycoord.z >= box.size.z) mycoord.z -= box.size.z;
int ix = (mycoord.x * nx) / box.size.x;
int iy = (mycoord.y * ny) / box.size.y;
int iz = (mycoord.z * nz) / box.size.z;
int idx = iz + nz * (iy + ny * ix);
rcut[ii] = rcutLattice[idx];
}
}
void AssignRCut::
assign (MDSystem & sys)
{
assignRCutToSystem
<<<atomGridDim, myBlockDim>>> (
drcut,
nx, ny, nz,
sys.box,
sys.ddata.coord,
sys.ddata.numAtom,
sys.ddata.rcut);
}
void AssignRCut::
print_x (const char * file) const
{
FILE * fp = fopen (file, "w");
if (fp == NULL){
std::cerr << "cannot open file " << file << std::endl;
exit(1);
}
for (int i = 0; i < nx; ++i){
// double sum = 0.;
// for (int j = 0; j < ny; ++j){
// for (int k = 0; k < nz; ++k){
// sum += profile[index3to1(i, j, k)];
// }
// }
fprintf (fp, "%f %e\n",
(i + 0.5) * box.size.x / double(nx),
hrcut [index3to1(i, 0, 0)]
);
}
fclose (fp);
}
void AssignRCut::
init_write (const char * file) const
{
fp_write = fopen (file, "w");
if (fp_write == NULL){
fprintf (stderr, "cannot open file %s\n", file);
exit(1);
}
double tmpbox[3];
tmpbox[0] = box.size.x;
tmpbox[1] = box.size.y;
tmpbox[2] = box.size.z;
int tmpnn[3];
tmpnn[0] = nx;
tmpnn[1] = ny;
tmpnn[2] = nz;
fwrite (tmpbox, sizeof(double), 3, fp_write);
fwrite (tmpnn, sizeof(int), 3, fp_write);
}
void AssignRCut::
end_write () const
{
fclose (fp_write);
}
void AssignRCut::
write (const ScalorType & time) const
{
ScalorType tmptime = time;
fwrite (&tmptime, sizeof(ScalorType), 1, fp_write);
fwrite (hrcut, sizeof(ScalorType), nele, fp_write);
}
|
cf37449dfb1fc93294c610cb36c1b274ea69ea53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "DataLayerSpiking.h"
#include "opencv2/opencv.hpp"
#include <vector>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <math.h>
//#include <thread>
#include "../common/Config.h"
#include "../common/cuBase.h"
#include "../common/util.h"
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(min(outputDim * endTime, 1024));
*/
__global__ void g_dataLayer_spiking_feedforward(
bool** inputs,
bool* outputs,
int outputArea,
int outputCols);
DataLayerSpiking::DataLayerSpiking(std::string name){
m_name = name;
myId = 0;
ConfigDataSpiking* config = (ConfigDataSpiking*)Config::instance()->getLayerByName(m_name);
inputDim = config->m_inputNeurons;
outputDim = inputDim;
endTime = Config::instance()->getEndTime();
batch = Config::instance()->getBatchSize();
inputAmount = Config::instance()->getChannels();
outputAmount= inputAmount;
outputs = new cuMatrix<bool>(batch, outputDim * endTime, outputAmount);
outputs_time = new cuMatrix<int>(batch, outputDim * endTime, outputAmount);
fireCount = new cuMatrix<int>(batch, outputDim, outputAmount);
for(int i = 0; i < 2; ++i){
for(int j = 0; j < batch; j++){
batchSpeeches[i].push_back(new cuMatrix<bool>(endTime, inputDim, Config::instance()->getChannels()));
}
batchSpeeches[i].toGpu();
}
checkCudaErrors(hipStreamCreate(&stream1));
Layers::instance()->set(m_name, this);
}
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(min(outputDim * endTime, 1024));
*/
__global__ void g_dataLayer_spiking_feedforward(
bool** inputs,
bool* outputs,
int outputArea,
int outputCols)
{
int batchId = blockIdx.x;
int ok = blockIdx.y;
int outputAmount = gridDim.y;
bool* input = inputs[batchId];
bool* output = outputs + ok * outputArea+ batchId * outputCols * outputAmount;
for(int i = 0; i < outputCols; i += blockDim.x){
int idx = i + threadIdx.x;
if(idx < outputCols){
output[idx] = input[idx];
}
}
}
/*
* dim3 block = dim3(batch);
* dim3 thread= dim3(min(outputDim, 1024));
*/
__global__ void g_dataLayer_get_fireCount(
bool* outputs,
int* batchfireCount,
int outputDim,
int endTime)
{
int batchId = blockIdx.x;
bool* output = outputs + batchId * endTime * outputDim;
int* fireCount = batchfireCount + batchId * outputDim;
for(int i = 0; i < outputDim; i += blockDim.x)
{
int o_idx = i + threadIdx.x;
int sum = 0;
for(int time = 0; time < endTime; ++time) sum += output[o_idx + time * outputDim];
fireCount[o_idx] = sum;
}
}
//* simply copy the input data to the output
void DataLayerSpiking::feedforward(){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * endTime, 1024));
hipLaunchKernelGGL(( g_dataLayer_spiking_feedforward), dim3(block), dim3(thread), 0, 0,
batchSpeeches[myId].m_devPoint,
outputs->getDev(),
outputs->getArea(),
outputs->cols);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("DataLayerSpiking:feedforward");
//* get the fire counts for transforming the binary response to spike times
thread = dim3(min(outputDim, 1024));
hipLaunchKernelGGL(( g_dataLayer_get_fireCount), dim3(block), dim3(thread), 0, 0,
outputs->getDev(),
fireCount->getDev(),
outputDim,
endTime);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("DataLayerSpiking:g_dataLayer_get_fireCount");
hipLaunchKernelGGL(( g_response_2_spiketime), dim3(block), dim3(thread), 0, 0,
outputs->getDev(),
outputs_time->getDev(),
outputDim,
endTime);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("DataLayerSpiking:g_response_2_spiketime");
};
void DataLayerSpiking::trainData()
{
}
void DataLayerSpiking::testData()
{
}
void DataLayerSpiking::synchronize(){
myId = 1 - myId;
hipStreamSynchronize(this->stream1);
}
//* get the input spike trains in batch from the input speeches streams
void DataLayerSpiking::getBatchSpikesWithStreams(cuMatrixVector<bool>& inputs, int start){
int id = 1 - this->myId;
for(size_t i = 0; i < this->batchSpeeches[id].size(); i++){
inputs[i+start]->sparseToDense();
memcpy(this->batchSpeeches[id][i]->getHost(), inputs[i + start]->getHost(), sizeof(bool) * this->batchSpeeches[id][i]->getLen());
this->batchSpeeches[id][i]->toGpu(this->stream1);
inputs[i+start]->freeCpuMem();
//this->batchSpeeches[i]->toGpu();
}
}
| cf37449dfb1fc93294c610cb36c1b274ea69ea53.cu | #include "DataLayerSpiking.h"
#include "opencv2/opencv.hpp"
#include <vector>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <math.h>
//#include <thread>
#include "../common/Config.h"
#include "../common/cuBase.h"
#include "../common/util.h"
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(min(outputDim * endTime, 1024));
*/
__global__ void g_dataLayer_spiking_feedforward(
bool** inputs,
bool* outputs,
int outputArea,
int outputCols);
DataLayerSpiking::DataLayerSpiking(std::string name){
m_name = name;
myId = 0;
ConfigDataSpiking* config = (ConfigDataSpiking*)Config::instance()->getLayerByName(m_name);
inputDim = config->m_inputNeurons;
outputDim = inputDim;
endTime = Config::instance()->getEndTime();
batch = Config::instance()->getBatchSize();
inputAmount = Config::instance()->getChannels();
outputAmount= inputAmount;
outputs = new cuMatrix<bool>(batch, outputDim * endTime, outputAmount);
outputs_time = new cuMatrix<int>(batch, outputDim * endTime, outputAmount);
fireCount = new cuMatrix<int>(batch, outputDim, outputAmount);
for(int i = 0; i < 2; ++i){
for(int j = 0; j < batch; j++){
batchSpeeches[i].push_back(new cuMatrix<bool>(endTime, inputDim, Config::instance()->getChannels()));
}
batchSpeeches[i].toGpu();
}
checkCudaErrors(cudaStreamCreate(&stream1));
Layers::instance()->set(m_name, this);
}
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(min(outputDim * endTime, 1024));
*/
__global__ void g_dataLayer_spiking_feedforward(
bool** inputs,
bool* outputs,
int outputArea,
int outputCols)
{
int batchId = blockIdx.x;
int ok = blockIdx.y;
int outputAmount = gridDim.y;
bool* input = inputs[batchId];
bool* output = outputs + ok * outputArea+ batchId * outputCols * outputAmount;
for(int i = 0; i < outputCols; i += blockDim.x){
int idx = i + threadIdx.x;
if(idx < outputCols){
output[idx] = input[idx];
}
}
}
/*
* dim3 block = dim3(batch);
* dim3 thread= dim3(min(outputDim, 1024));
*/
__global__ void g_dataLayer_get_fireCount(
bool* outputs,
int* batchfireCount,
int outputDim,
int endTime)
{
int batchId = blockIdx.x;
bool* output = outputs + batchId * endTime * outputDim;
int* fireCount = batchfireCount + batchId * outputDim;
for(int i = 0; i < outputDim; i += blockDim.x)
{
int o_idx = i + threadIdx.x;
int sum = 0;
for(int time = 0; time < endTime; ++time) sum += output[o_idx + time * outputDim];
fireCount[o_idx] = sum;
}
}
//* simply copy the input data to the output
void DataLayerSpiking::feedforward(){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * endTime, 1024));
g_dataLayer_spiking_feedforward<<<block, thread>>>(
batchSpeeches[myId].m_devPoint,
outputs->getDev(),
outputs->getArea(),
outputs->cols);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("DataLayerSpiking:feedforward");
//* get the fire counts for transforming the binary response to spike times
thread = dim3(min(outputDim, 1024));
g_dataLayer_get_fireCount<<<block, thread>>>(
outputs->getDev(),
fireCount->getDev(),
outputDim,
endTime);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("DataLayerSpiking:g_dataLayer_get_fireCount");
g_response_2_spiketime<<<block, thread>>>(
outputs->getDev(),
outputs_time->getDev(),
outputDim,
endTime);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("DataLayerSpiking:g_response_2_spiketime");
};
void DataLayerSpiking::trainData()
{
}
void DataLayerSpiking::testData()
{
}
void DataLayerSpiking::synchronize(){
myId = 1 - myId;
cudaStreamSynchronize(this->stream1);
}
//* get the input spike trains in batch from the input speeches streams
void DataLayerSpiking::getBatchSpikesWithStreams(cuMatrixVector<bool>& inputs, int start){
int id = 1 - this->myId;
for(size_t i = 0; i < this->batchSpeeches[id].size(); i++){
inputs[i+start]->sparseToDense();
memcpy(this->batchSpeeches[id][i]->getHost(), inputs[i + start]->getHost(), sizeof(bool) * this->batchSpeeches[id][i]->getLen());
this->batchSpeeches[id][i]->toGpu(this->stream1);
inputs[i+start]->freeCpuMem();
//this->batchSpeeches[i]->toGpu();
}
}
|
425b549694ebfcd02a08f999b7e7a5779133bc50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "asvgf/asvgf.h"
#include "kernel/pt_common.h"
#include "cuda/cudadefs.h"
#include "cuda/helper_math.h"
#include "cuda/cudautil.h"
#include "cuda/cudamemory.h"
#include "aten4idaten.h"
#if 0
__device__ inline bool isEqualInt2(const int2& a, const int2& b)
{
return (a.x == b.x) && (a.y == b.y);
}
__global__ void createGradient(
idaten::TileDomain tileDomain,
int tileSize,
float4* outGradient,
const idaten::SVGFPathTracing::Path* __restrict__ paths,
const float4* __restrict__ prevAovColorUnfiltered,
const float4* __restrict__ curAovTexclrMeshid,
const int4* __restrict__ gradientSample,
int width, int height,
int widthInRealRes, int heightInRealRes)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= tileDomain.w || iy >= tileDomain.h) {
return;
}
ix += tileDomain.x;
iy += tileDomain.y;
const int idx = getIdx(ix, iy, width);
// Get positon in a tile.
int2 tilePos = make_int2(gradientSample[idx].x, gradientSample[idx].y);
// Convert tile positon to real resolution position.
int2 posInRealRes = make_int2(ix, iy) * tileSize + tilePos;
posInRealRes.x = aten::clamp(posInRealRes.x, 0, widthInRealRes - 1);
posInRealRes.y = aten::clamp(posInRealRes.y, 0, heightInRealRes - 1);
const int curIdxInRealRes = getIdx(posInRealRes.x, posInRealRes.y, widthInRealRes);
auto curColor = paths->contrib[curIdxInRealRes].v;
float curLum = AT_NAME::color::luminance(curColor.x, curColor.y, curColor.z);
outGradient[idx] = make_float4(0.0f);
// Get previous frame index in real resolution.
const int prevIdxInRealRes = gradientSample[idx].z;
// Only previous sample position is in resolution.
if (prevIdxInRealRes >= 0) {
auto prevColor = prevAovColorUnfiltered[prevIdxInRealRes];
float prevLum = AT_NAME::color::luminance(prevColor.x, prevColor.y, prevColor.z);
outGradient[idx].x = max(curLum, prevLum);
outGradient[idx].y = curLum - prevLum;
}
float2 moments = make_float2(curLum, curLum * curLum);
float sumW = 1.0f;
int centerMeshId = (int)curAovTexclrMeshid[curIdxInRealRes].w;
// Compute moment and variance in a tile.
for (int yy = 0; yy < tileSize; yy++) {
for (int xx = 0; xx < tileSize; xx++) {
int2 p = make_int2(ix, iy) * tileSize + make_int2(xx, yy);
p.x = aten::clamp(p.x, 0, widthInRealRes - 1);
p.y = aten::clamp(p.y, 0, heightInRealRes - 1);
if (!isEqualInt2(posInRealRes, p)) {
int pidx = getIdx(p.x, p.y, widthInRealRes);
auto clr = paths->contrib[pidx].v;
int meshId = (int)curAovTexclrMeshid[pidx].w;
float l = AT_NAME::color::luminance(clr.x, clr.y, clr.z);
float w = (centerMeshId == meshId ? 1.0f : 0.0f);
moments += make_float2(l, l * l) * w;
sumW += w;
}
}
}
moments /= sumW;
float variance = max(0.0f, moments.y - moments.x * moments.x);
outGradient[idx].z = moments.x;
outGradient[idx].w = variance;
}
namespace idaten
{
void AdvancedSVGFPathTracing::onCreateGradient(int width, int height)
{
// TODO
// `.
int tiledW = getTiledResolution(width);
int tiledH = getTiledResolution(height);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(tiledW + block.x - 1) / block.x,
(tiledH + block.y - 1) / block.y);
float cameraDistance = height / (2.0f * aten::tan(0.5f * m_camParam.vfov));
int curaov = getCurAovs();
int prevaov = getPrevAovs();
createGradient << <grid, block >> > (
m_tileDomain,
m_gradientTileSize,
m_gradient.ptr(),
m_paths.ptr(),
m_aovColorVariance[prevaov].ptr(),
m_aovTexclrMeshid[curaov].ptr(),
m_gradientSample.ptr(),
tiledW, tiledH,
width, height);
checkCudaKernel(createGradient);
}
}
#endif | 425b549694ebfcd02a08f999b7e7a5779133bc50.cu | #include "asvgf/asvgf.h"
#include "kernel/pt_common.h"
#include "cuda/cudadefs.h"
#include "cuda/helper_math.h"
#include "cuda/cudautil.h"
#include "cuda/cudamemory.h"
#include "aten4idaten.h"
#if 0
__device__ inline bool isEqualInt2(const int2& a, const int2& b)
{
return (a.x == b.x) && (a.y == b.y);
}
__global__ void createGradient(
idaten::TileDomain tileDomain,
int tileSize,
float4* outGradient,
const idaten::SVGFPathTracing::Path* __restrict__ paths,
const float4* __restrict__ prevAovColorUnfiltered,
const float4* __restrict__ curAovTexclrMeshid,
const int4* __restrict__ gradientSample,
int width, int height,
int widthInRealRes, int heightInRealRes)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= tileDomain.w || iy >= tileDomain.h) {
return;
}
ix += tileDomain.x;
iy += tileDomain.y;
const int idx = getIdx(ix, iy, width);
// Get positon in a tile.
int2 tilePos = make_int2(gradientSample[idx].x, gradientSample[idx].y);
// Convert tile positon to real resolution position.
int2 posInRealRes = make_int2(ix, iy) * tileSize + tilePos;
posInRealRes.x = aten::clamp(posInRealRes.x, 0, widthInRealRes - 1);
posInRealRes.y = aten::clamp(posInRealRes.y, 0, heightInRealRes - 1);
const int curIdxInRealRes = getIdx(posInRealRes.x, posInRealRes.y, widthInRealRes);
auto curColor = paths->contrib[curIdxInRealRes].v;
float curLum = AT_NAME::color::luminance(curColor.x, curColor.y, curColor.z);
outGradient[idx] = make_float4(0.0f);
// Get previous frame index in real resolution.
const int prevIdxInRealRes = gradientSample[idx].z;
// Only previous sample position is in resolution.
if (prevIdxInRealRes >= 0) {
auto prevColor = prevAovColorUnfiltered[prevIdxInRealRes];
float prevLum = AT_NAME::color::luminance(prevColor.x, prevColor.y, prevColor.z);
outGradient[idx].x = max(curLum, prevLum);
outGradient[idx].y = curLum - prevLum;
}
float2 moments = make_float2(curLum, curLum * curLum);
float sumW = 1.0f;
int centerMeshId = (int)curAovTexclrMeshid[curIdxInRealRes].w;
// Compute moment and variance in a tile.
for (int yy = 0; yy < tileSize; yy++) {
for (int xx = 0; xx < tileSize; xx++) {
int2 p = make_int2(ix, iy) * tileSize + make_int2(xx, yy);
p.x = aten::clamp(p.x, 0, widthInRealRes - 1);
p.y = aten::clamp(p.y, 0, heightInRealRes - 1);
if (!isEqualInt2(posInRealRes, p)) {
int pidx = getIdx(p.x, p.y, widthInRealRes);
auto clr = paths->contrib[pidx].v;
int meshId = (int)curAovTexclrMeshid[pidx].w;
float l = AT_NAME::color::luminance(clr.x, clr.y, clr.z);
float w = (centerMeshId == meshId ? 1.0f : 0.0f);
moments += make_float2(l, l * l) * w;
sumW += w;
}
}
}
moments /= sumW;
float variance = max(0.0f, moments.y - moments.x * moments.x);
outGradient[idx].z = moments.x;
outGradient[idx].w = variance;
}
namespace idaten
{
void AdvancedSVGFPathTracing::onCreateGradient(int width, int height)
{
// TODO
// •ªŠ„•`‰æ.
int tiledW = getTiledResolution(width);
int tiledH = getTiledResolution(height);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(tiledW + block.x - 1) / block.x,
(tiledH + block.y - 1) / block.y);
float cameraDistance = height / (2.0f * aten::tan(0.5f * m_camParam.vfov));
int curaov = getCurAovs();
int prevaov = getPrevAovs();
createGradient << <grid, block >> > (
m_tileDomain,
m_gradientTileSize,
m_gradient.ptr(),
m_paths.ptr(),
m_aovColorVariance[prevaov].ptr(),
m_aovTexclrMeshid[curaov].ptr(),
m_gradientSample.ptr(),
tiledW, tiledH,
width, height);
checkCudaKernel(createGradient);
}
}
#endif |
63ca046bff415aa5924232d7ef56557e903730f8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrixNorm.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_in = NULL;
hipMalloc(&d_in, XSIZE*YSIZE);
float *d_out = NULL;
hipMalloc(&d_out, XSIZE*YSIZE);
float *d_mean = NULL;
hipMalloc(&d_mean, XSIZE*YSIZE);
float *d_sd = NULL;
hipMalloc(&d_sd, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrixNorm), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_out,d_mean,d_sd,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrixNorm), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_out,d_mean,d_sd,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrixNorm), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_out,d_mean,d_sd,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 63ca046bff415aa5924232d7ef56557e903730f8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrixNorm.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_in = NULL;
cudaMalloc(&d_in, XSIZE*YSIZE);
float *d_out = NULL;
cudaMalloc(&d_out, XSIZE*YSIZE);
float *d_mean = NULL;
cudaMalloc(&d_mean, XSIZE*YSIZE);
float *d_sd = NULL;
cudaMalloc(&d_sd, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrixNorm<<<gridBlock,threadBlock>>>(d_in,d_out,d_mean,d_sd,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrixNorm<<<gridBlock,threadBlock>>>(d_in,d_out,d_mean,d_sd,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrixNorm<<<gridBlock,threadBlock>>>(d_in,d_out,d_mean,d_sd,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d7634eab56b9691776b75ef2701103cc772e52e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* CUDA kernels for the feature tracker
* feature_tracker_cuda_tools.cu
*
* Copyright (c) 2019-2020 Balazs Nagy,
* Robotics and Perception Group, University of Zurich
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "vilib/feature_tracker/feature_tracker_cuda_tools.h"
#include "vilib/feature_tracker/config.h"
#include "vilib/cuda_common.h"
namespace vilib {
namespace feature_tracker_cuda_tools {
// Warp preliminaries
#define WARP_SIZE 32
#define WARP_MASK 0xFFFFFFFF
// Precalculating feature patches
#define CANDIDATES_PER_BLOCK_UPDATE 3
/*
* Note to future self:
* - interestingly with reference patch interpolation, the tracking performance
* degrades. -> DISABLE
* - Int/Float does not seem to affect the tracking performance, but the float
* version is slower. -> INT
*/
#define REFERENCE_PATCH_INTERPOLATION 0
#define CANDIDATES_PER_BLOCK_TRACK 2
template <typename T, const bool affine_est_offset, const bool affine_est_gain>
__device__ __inline__ void perform_lk(const float & min_update_squared,
const int & img_width,
const int & img_height,
const int & img_pitch,
const unsigned char * __restrict__ d_in_cur_img,
const int & patch_size,
const int & half_patch_size,
const int & patch_stride,
const T * ref_patch,
const float * invH,
float2 & cur_px,
float2 & cur_alpha_beta,
bool & converged,
bool & go_to_next_level) {
converged = false;
go_to_next_level = false;
// Reference patch & actual image
const int patch_area = patch_size * patch_size;
const int x = threadIdx.x % patch_size;
const int y = threadIdx.x / patch_size;
const T * it_ref_start = ref_patch + (y+1)*patch_stride + (x+1);
const int it_ref_offset = patch_stride*WARP_SIZE/patch_size;
const unsigned char * it_start = (const unsigned char*) d_in_cur_img -
(img_pitch + 1)*half_patch_size + threadIdx.x +
(img_pitch - patch_size)*(threadIdx.x/patch_size);
const int it_offset = img_pitch*WARP_SIZE/patch_size;
const int pixels_per_thread = patch_area/WARP_SIZE;
#pragma unroll
for(int iter=0; iter<FEATURE_TRACKER_MAX_ITERATION_COUNT;++iter) {
if(isnan(cur_px.x) || isnan(cur_px.y)) {
break;
}
int u_r = floorf(cur_px.x);
int v_r = floorf(cur_px.y);
if(u_r < half_patch_size ||
v_r < half_patch_size ||
u_r >= (img_width-half_patch_size) ||
v_r >= (img_height-half_patch_size)) {
// don't change the state 'converged'
go_to_next_level = true;
break;
}
// compute interpolation weights
float subpix_x = cur_px.x-u_r;
float subpix_y = cur_px.y-v_r;
float wTL = (1.0f-subpix_x)*(1.0f-subpix_y);
float wTR = subpix_x * (1.0f-subpix_y);
float wBL = (1.0f-subpix_x)*subpix_y;
float wBR = subpix_x * subpix_y;
float Jres[4];
#pragma unroll
for(int i=0;i<4;++i) {
Jres[i] = 0.0f;
}
const uint8_t * it = it_start + u_r + v_r*img_pitch;
const T * it_ref = it_ref_start;
// Note: every thread computes (PATCH_SIZE*PATCH_SIZE/WARP_SIZE) pixels
#pragma unroll
for(int i=0;i<pixels_per_thread;++i,it+=it_offset,it_ref+=it_ref_offset) {
// Note it cannot be read as uchar2, because it would require proper alignment
float search_pixel = wTL*it[0] + wTR*it[1] + wBL*it[img_pitch] + wBR*it[img_pitch+1];
float res = search_pixel - (1.0f+cur_alpha_beta.x)*(*it_ref) - cur_alpha_beta.y;
Jres[0] += res * 0.5f * (it_ref[1] - it_ref[-1]);
Jres[1] += res * 0.5f * (it_ref[patch_stride] - it_ref[-patch_stride]);
// If affine compensation is used,
// set Jres with respect to affine parameters.
if(affine_est_offset && affine_est_gain) {
Jres[2] += res;
Jres[3] += res*(*it_ref);
} else if(affine_est_offset) {
Jres[2] += res;
} else if(affine_est_gain) {
Jres[2] += res*(*it_ref);
}
}
// Reduce it to all lanes
#pragma unroll
for (int offset = WARP_SIZE/2; offset > 0; offset /= 2) {
#pragma unroll
for(int i=0;i<4;++i) {
Jres[i] += __shfl_xor_sync(WARP_MASK, Jres[i], offset);
}
}
//update = Hinv * Jres
//broadcast the computed values in the warp from lane 0
float update[4];
if(affine_est_offset && affine_est_gain) {
update[0] = invH[0] * Jres[0] + invH[1] * Jres[1] + invH[2] * Jres[2] + invH[3] * Jres[3];
update[1] = invH[1] * Jres[0] + invH[4] * Jres[1] + invH[5] * Jres[2] + invH[6] * Jres[3];
update[2] = invH[2] * Jres[0] + invH[5] * Jres[1] + invH[7] * Jres[2] + invH[8] * Jres[3];
update[3] = invH[3] * Jres[0] + invH[6] * Jres[1] + invH[8] * Jres[2] + invH[9] * Jres[3];
} else if(affine_est_gain || affine_est_offset) {
update[0] = invH[0] * Jres[0] + invH[1] * Jres[1] + invH[2] * Jres[2];
update[1] = invH[1] * Jres[0] + invH[3] * Jres[1] + invH[4] * Jres[2];
update[2] = invH[2] * Jres[0] + invH[4] * Jres[1] + invH[5] * Jres[2];
} else {
update[0] = invH[0] * Jres[0] + invH[1] * Jres[1];
update[1] = invH[1] * Jres[0] + invH[2] * Jres[1];
}
// Updating our estimates
// Translational displacement
cur_px.x -= update[0];
cur_px.y -= update[1];
// Illumination changes
if(affine_est_offset && affine_est_gain) {
cur_alpha_beta.x += update[3];
cur_alpha_beta.y += update[2];
} else if (affine_est_offset) {
cur_alpha_beta.y += update[2];
} else if(affine_est_gain) {
cur_alpha_beta.x += update[2];
}
if(update[0]*update[0]+update[1]*update[1] < min_update_squared) {
converged=true;
break;
}
}
}
template <typename T, const bool affine_est_offset, const bool affine_est_gain>
__global__ void track_features_kernel(const int candidate_num,
const int min_level,
const int max_level,
const float min_update_squared,
const image_pyramid_descriptor_t pyramid_description,
const pyramid_patch_descriptor_t pyramid_patch_sizes,
const int * __restrict__ d_indir_data,
const T * __restrict__ d_patch_data,
const float * __restrict__ d_hessian_data,
const float2 * __restrict__ d_first_px,
float2 * __restrict__ d_cur_px,
float2 * __restrict__ d_cur_alpha_beta,
float4 * __restrict__ d_cur_f,
float * __restrict__ d_cur_disparity) {
const int cx = blockIdx.x * CANDIDATES_PER_BLOCK_TRACK + threadIdx.y; // candidate id
const int pyramid_levels = max_level - min_level + 1; // number of pyramid levels computed
if(cx < candidate_num) {
// Acquire buffer id for the candidate
const int bx = d_indir_data[cx];
// Initialize input and output references
// Remark: struct size: 64 bytes
// Tracking
float2 & d_cur_px_bx = d_cur_px[bx<<3]; // level 0
float2 & d_cur_alpha_beta_bx = d_cur_alpha_beta[bx<<3];
// Bearing vector
float4 & d_cur_f_bx = d_cur_f[bx<<2];
// Disparity calculation
const float2 & d_first_px_bx = d_first_px[bx<<3];
float & d_cur_disparity_bx = d_cur_disparity[bx<<4];
// Patch data
const T * d_patch_data_bx = d_patch_data + pyramid_patch_sizes.max_area*pyramid_levels*bx; // points to max_level
// Hessian data
const float * d_hessian_data_bx = d_hessian_data + 10*pyramid_levels*bx; // points to max_level
/*
* Iterate through all the selected levels,
* and refine the current patch location
*/
bool converged = false;
bool go_to_next_level = true;
float2 cur_px = d_cur_px_bx;
float2 cur_alpha_beta = d_cur_alpha_beta_bx;
float scale = 1.0f;
for(int level=max_level;
(converged || go_to_next_level) && level>=min_level;
cur_px.x *= scale,cur_px.y *= scale,--level,d_patch_data_bx+=pyramid_patch_sizes.max_area,d_hessian_data_bx+=10) {
// scale & patch size
scale = (float)(1<<level);
const float inv_scale = 1.0f/scale;
const int & patch_size = pyramid_patch_sizes.wh[level];
const int & half_patch_size = patch_size >> 1;
const int & patch_stride = patch_size + 2;
// image size
const int & d_in_img_width = pyramid_description.desc.w[level];
const int & d_in_img_height = pyramid_description.desc.h[level];
const int & d_in_img_pitch = pyramid_description.desc.p[level];
// update the pixel positions according to the current level
cur_px.x = cur_px.x * inv_scale;
cur_px.y = cur_px.y * inv_scale;
// TODO : maybe load it into shared memory later if size is small (8x8), also the inverse hessian!
// Check if the inverse hessian was computed for the level successfully
if(isnan(d_hessian_data_bx[0])) {
continue;
}
const unsigned char * d_in_cur_img = pyramid_description.data[level];
// do the Lukas-Kanade on the actual level, using the reference patch
perform_lk<T, affine_est_offset, affine_est_gain>(
min_update_squared,
d_in_img_width,
d_in_img_height,
d_in_img_pitch,
d_in_cur_img,
patch_size,
half_patch_size,
patch_stride,
d_patch_data_bx,
d_hessian_data_bx,
cur_px,
cur_alpha_beta,
converged,
go_to_next_level);
}
if(threadIdx.x == 0) {
if(converged) {
// Point location
d_cur_px_bx.x = cur_px.x;
d_cur_px_bx.y = cur_px.y;
// Alpha-beta estimation
if(affine_est_gain) {
d_cur_alpha_beta_bx.x = cur_alpha_beta.x;
}
if(affine_est_offset) {
d_cur_alpha_beta_bx.y = cur_alpha_beta.y;
}
// Bearing vector
// TODO
(void)d_cur_f_bx;
// Disparity
d_cur_disparity_bx = sqrtf((cur_px.x - d_first_px_bx.x)*(cur_px.x - d_first_px_bx.x) +
(cur_px.y - d_first_px_bx.y)*(cur_px.y - d_first_px_bx.y));
} else {
// Initialize output to be NAN
d_cur_px_bx.x = __int_as_float(0x7fffffff);
d_cur_px_bx.y = __int_as_float(0x7fffffff);
}
}
}
}
__host__ void track_features(const bool affine_est_offset,
const bool affine_est_gain,
const int candidate_num,
const int min_level,
const int max_level,
const float min_update_squared,
const image_pyramid_descriptor_t & pyramid_description,
const pyramid_patch_descriptor_t & pyramid_patch_sizes,
const int * d_indir_data,
const unsigned char * d_patch_data,
const float * d_hessian_data,
const float2 * d_in_first_px,
float2 * d_in_cur_px,
float2 * d_in_cur_alpha_beta,
float4 * d_in_cur_f,
float * d_in_cur_disparity,
hipStream_t stream) {
// Kernel parameters
dim3 threads_per_block;
threads_per_block.x = WARP_SIZE;
threads_per_block.y = CANDIDATES_PER_BLOCK_TRACK;
threads_per_block.z = 1;
const int blocks_per_grid = (candidate_num + CANDIDATES_PER_BLOCK_TRACK - 1) / CANDIDATES_PER_BLOCK_TRACK;
const int shm_per_block = 0; //CANDIDATES_PER_BLOCK_TRACK*(PATCH_STRIDE*(PATCH_SIZE+2))*sizeof(REFERENCE_PATCH_TYPE);
// Launch kernel
if(affine_est_offset && affine_est_gain) {
hipLaunchKernelGGL(( track_features_kernel<FEATURE_TRACKER_REFERENCE_PATCH_TYPE,true,true>), dim3(blocks_per_grid),dim3(threads_per_block),shm_per_block,stream,
candidate_num,
min_level,
max_level,
min_update_squared,
pyramid_description,
pyramid_patch_sizes,
d_indir_data,
(FEATURE_TRACKER_REFERENCE_PATCH_TYPE*)d_patch_data,
d_hessian_data,
d_in_first_px,
d_in_cur_px,
d_in_cur_alpha_beta,
d_in_cur_f,
d_in_cur_disparity
);
} else if(affine_est_offset) {
hipLaunchKernelGGL(( track_features_kernel<FEATURE_TRACKER_REFERENCE_PATCH_TYPE,true,false>), dim3(blocks_per_grid),dim3(threads_per_block),shm_per_block,stream,
candidate_num,
min_level,
max_level,
min_update_squared,
pyramid_description,
pyramid_patch_sizes,
d_indir_data,
(FEATURE_TRACKER_REFERENCE_PATCH_TYPE*)d_patch_data,
d_hessian_data,
d_in_first_px,
d_in_cur_px,
d_in_cur_alpha_beta,
d_in_cur_f,
d_in_cur_disparity
);
} else if(affine_est_gain) {
hipLaunchKernelGGL(( track_features_kernel<FEATURE_TRACKER_REFERENCE_PATCH_TYPE,false,true>), dim3(blocks_per_grid),dim3(threads_per_block),shm_per_block,stream,
candidate_num,
min_level,
max_level,
min_update_squared,
pyramid_description,
pyramid_patch_sizes,
d_indir_data,
(FEATURE_TRACKER_REFERENCE_PATCH_TYPE*)d_patch_data,
d_hessian_data,
d_in_first_px,
d_in_cur_px,
d_in_cur_alpha_beta,
d_in_cur_f,
d_in_cur_disparity
);
} else {
hipLaunchKernelGGL(( track_features_kernel<FEATURE_TRACKER_REFERENCE_PATCH_TYPE,false,false>), dim3(blocks_per_grid),dim3(threads_per_block),shm_per_block,stream,
candidate_num,
min_level,
max_level,
min_update_squared,
pyramid_description,
pyramid_patch_sizes,
d_indir_data,
(FEATURE_TRACKER_REFERENCE_PATCH_TYPE*)d_patch_data,
d_hessian_data,
d_in_first_px,
d_in_cur_px,
d_in_cur_alpha_beta,
d_in_cur_f,
d_in_cur_disparity
);
}
CUDA_KERNEL_CHECK();
}
/* Precalculate patches & precalculate inverse Hessians --------------------- */
template<typename T>
__device__ __inline__ bool load_ref_patch(const unsigned char * __restrict__ d_in_ref_img,
const float2 & ref_px,
const int & img_width,
const int & img_height,
const int & img_pitch,
const int & patch_size,
const int & half_patch_size,
T * __restrict__ d_out_ref_patch) {
// Calculate top left corner
// and verify that the patch fits in the image
float x_tl_f = ref_px.x - (half_patch_size+1);
float y_tl_f = ref_px.y - (half_patch_size+1);
int x_tl_i = floorf(x_tl_f);
int y_tl_i = floorf(y_tl_f);
// Note: on the right side of the rectangle, there's a +1 because of the
// intensity interpolation
if(x_tl_i < 0 ||
y_tl_i < 0 ||
#if REFERENCE_PATCH_INTERPOLATION
(x_tl_i+patch_size+2)>=img_width ||
(y_tl_i+patch_size+2)>=img_height
#else
(x_tl_i+patch_size+1)>=img_width ||
(y_tl_i+patch_size+1)>=img_height
#endif /* REFERENCE_PATCH_INTERPOLATION */
) {
return false;
}
#if REFERENCE_PATCH_INTERPOLATION
float subpix_x = x_tl_f-x_tl_i;
float subpix_y = y_tl_f-y_tl_i;
float wTL = (1.0f-subpix_x)*(1.0f-subpix_y);
float wTR = subpix_x*(1.0f-subpix_y);
float wBL = (1.0f-subpix_x)*subpix_y;
float wBR = 1.0f - wTL - wTR - wBL;
#endif /* REFERENCE_PATCH_INTERPOLATION */
T * patch_ptr = d_out_ref_patch + threadIdx.x;
#pragma unroll
for(int id = threadIdx.x; id < (patch_size+2)*(patch_size+2); id += WARP_SIZE, patch_ptr += WARP_SIZE) {
int x_no_offs = (id % (patch_size+2));
int y_no_offs = (id / (patch_size+2));
int xi = x_no_offs + x_tl_i;
int yi = y_no_offs + y_tl_i;
const unsigned char * ptr = d_in_ref_img + yi*img_pitch + xi;
*patch_ptr =
#if REFERENCE_PATCH_INTERPOLATION
(T)(wTL*((float)ptr[0]) +
wBL*((float)ptr[img_pitch]) +
wTR*((float)ptr[1]) +
wBR*((float)ptr[img_pitch+1]));
#else
(T)ptr[0];
#endif /* REFERENCE_PATCH_INTERPOLATION */
}
return true;
}
template <typename T, const bool affine_est_offset, const bool affine_est_gain>
__device__ __inline__ void calc_hessian(const int & img_width,
const int & img_height,
const int & img_pitch,
const T * ref_patch,
const int & patch_size,
const int & half_patch_size,
const int & patch_stride,
float * __restrict__ d_inv_hessian) {
/*
* We're exploiting the fact that H is going to be symmetric !
* - When we estimate also affine offset AND gain:
* J = [ 0 1 2 3 ]
* H = | 0 1 2 3 |
* | x 4 5 6 |
* | x x 7 8 |
* | x x x 9 |
*
* - When we estimate only affine offset OR gain
* J = [ 0 1 2 ]
* H = | 0 1 2 |
* | x 3 4 |
* | x x 5 |
*
* - When we don't estimate the affine offset and gain
* J = [ 0 1 ]
* H = | 0 1 |
* | x 2 |
*/
float H[10];
float J[4];
#pragma unroll
for(int i=0;i<10;++i) {
H[i] = 0.0f;
}
const int patch_area = patch_size * patch_size;
const int x = threadIdx.x % patch_size;
const int y = threadIdx.x / patch_size;
const T * it_ref_start = ref_patch + (y+1)*patch_stride + (x+1); // +1 due to the 1-1-1-1 borders
const T * it_ref = it_ref_start;
const int it_ref_offset = patch_stride * WARP_SIZE / patch_size;
const int pixels_per_thread = patch_area/WARP_SIZE;
#pragma unroll
for(int i=0;i<pixels_per_thread;++i,it_ref+=it_ref_offset) {
// Compute J
J[0] = 0.5f * (it_ref[1] - it_ref[-1]);
J[1] = 0.5f * (it_ref[patch_stride] - it_ref[-patch_stride]);
// Affine parameter estimation
if(affine_est_offset && affine_est_gain) {
J[2] = 1.0f;
J[3] = it_ref[0];
} else if(affine_est_offset) {
J[2] = 1.0f;
} else if(affine_est_gain) {
J[2] = it_ref[0];
}
// H += J*J^Transpose (using the fact that J*J^T is going to be symmetric)
if(affine_est_offset && affine_est_gain) {
/*
* H: 4x4 matrix
* 0 1 2 3
* 0 (0x0) * (0x1) (0x2) (0x3)
* 1 x (1x1) (1x2) (1x3)
* 2 x x (2x2) (2x3)
* 3 x x x (3x3)
*/
H[0] += J[0]*J[0];
H[1] += J[0]*J[1];
H[2] += J[0]*J[2];
H[3] += J[0]*J[3];
H[4] += J[1]*J[1];
H[5] += J[1]*J[2];
H[6] += J[1]*J[3];
H[7] += J[2]*J[2];
H[8] += J[2]*J[3];
H[9] += J[3]*J[3];
} else if(affine_est_offset || affine_est_gain) {
/*
* H: 3x3 matrix
* 0 1 2
* 0 (0x0) (0x1) (0x2)
* 1 x (1x1) (1x2)
* 2 x x (2x2)
*/
H[0] += J[0]*J[0];
H[1] += J[0]*J[1];
H[2] += J[0]*J[2];
H[3] += J[1]*J[1];
H[4] += J[1]*J[2];
H[5] += J[2]*J[2];
} else {
/*
img_cur_width_pitch_diff* H: 2x2 matrix
* 0 1
* 0 (0x0) (0x1)
* 1 x (1x1)
*/
H[0] += J[0]*J[0];
H[1] += J[0]*J[1];
H[2] += J[1]*J[1];
}
}
// Reduce it down to lane 0
#pragma unroll
for(int offset = WARP_SIZE/2; offset > 0; offset /= 2) {
#pragma unroll
for(int i=0;i<10;++i) {
H[i] += __shfl_down_sync(WARP_MASK, H[i], offset);
}
}
// Calculate the inverse of H
float inv_detH;
if(threadIdx.x == 0) {
if(affine_est_gain && affine_est_offset) {
// Inverse of a symmetric 4x4 matrix
inv_detH = 1.0f /
(H[0]*H[4]*H[7]*H[9] + 2*H[0]*H[5]*H[6]*H[8] + H[1]*H[1]*H[8]*H[8]
+ 2*H[1]*H[2]*H[5]*H[9] + 2*H[1]*H[3]*H[6]*H[7]
+ 2*H[2]*H[3]*H[4]*H[8] + H[2]*H[2]*H[6]*H[6] + H[3]*H[3]*H[5]*H[5]
- H[0]*H[4]*H[8]*H[8] - H[0]*H[5]*H[5]*H[9] - H[0]*H[6]*H[6]*H[7]
- H[1]*H[1]*H[7]*H[9] - 2*H[1]*H[3]*H[5]*H[8] - 2*H[1]*H[2]*H[6]*H[8]
- H[2]*H[2]*H[4]*H[9] - 2*H[2]*H[3]*H[5]*H[6] - H[3]*H[3]*H[4]*H[7]);
d_inv_hessian[0] = (H[4]*H[7]*H[9] + 2*H[5]*H[6]*H[8] - H[4]*H[8]*H[8] - H[5]*H[5]*H[9] - H[6]*H[6]*H[7]) * inv_detH;
d_inv_hessian[1] = (H[1]*H[8]*H[8] + H[2]*H[5]*H[9] + H[3]*H[6]*H[7] - H[1]*H[7]*H[9] - H[2]*H[6]*H[8] - H[3]*H[5]*H[8]) * inv_detH;
d_inv_hessian[2] = (H[1]*H[5]*H[9] + H[2]*H[6]*H[6] + H[3]*H[4]*H[8] - H[1]*H[6]*H[8] - H[2]*H[4]*H[9] - H[3]*H[5]*H[6]) * inv_detH;
d_inv_hessian[3] = (H[1]*H[6]*H[7] + H[2]*H[4]*H[8] + H[3]*H[5]*H[5] - H[1]*H[5]*H[8] - H[2]*H[6]*H[5] - H[3]*H[4]*H[7]) * inv_detH;
d_inv_hessian[4] = (H[0]*H[7]*H[9] + 2*H[2]*H[3]*H[8] - H[0]*H[8]*H[8] - H[2]*H[2]*H[9] - H[3]*H[3]*H[7]) * inv_detH;
d_inv_hessian[5] = (H[0]*H[6]*H[8] + H[1]*H[2]*H[9] + H[3]*H[3]*H[5] - H[0]*H[5]*H[9] - H[2]*H[3]*H[6] - H[1]*H[3]*H[8]) * inv_detH;
d_inv_hessian[6] = (H[0]*H[5]*H[8] + H[2]*H[2]*H[6] + H[1]*H[3]*H[7] - H[0]*H[6]*H[7] - H[1]*H[2]*H[8] - H[2]*H[3]*H[5]) * inv_detH;
d_inv_hessian[7] = (H[0]*H[4]*H[9] + 2*H[1]*H[3]*H[6] - H[0]*H[6]*H[6] - H[1]*H[1]*H[9] - H[3]*H[3]*H[4]) * inv_detH;
d_inv_hessian[8] = (H[0]*H[5]*H[6] + H[1]*H[1]*H[8] + H[2]*H[3]*H[4] - H[0]*H[4]*H[8] - H[1]*H[2]*H[6] - H[1]*H[3]*H[5]) * inv_detH;
d_inv_hessian[9] = (H[0]*H[4]*H[7] + 2*H[1]*H[2]*H[5] - H[0]*H[5]*H[5] - H[1]*H[1]*H[7] - H[2]*H[2]*H[4]) * inv_detH;
} else if(affine_est_gain || affine_est_offset) {
// Inverse of a symmetric 3x3 matrix
inv_detH = 1.0f / (H[0]*H[3]*H[5] + 2*H[1]*H[4]*H[2] - H[0]*H[4]*H[4] - H[2]*H[3]*H[2] - H[1]*H[1]*H[5]);
d_inv_hessian[0] = (H[3]*H[5] - H[4]*H[4]) * inv_detH;
d_inv_hessian[1] = (H[2]*H[4] - H[1]*H[5]) * inv_detH;
d_inv_hessian[2] = (H[1]*H[4] - H[2]*H[3]) * inv_detH;
d_inv_hessian[3] = (H[0]*H[5] - H[2]*H[2]) * inv_detH;
d_inv_hessian[4] = (H[1]*H[2] - H[0]*H[4]) * inv_detH;
d_inv_hessian[5] = (H[0]*H[3] - H[1]*H[1]) * inv_detH;
} else {
// Inverse of a symmetric 2x2 matrix
inv_detH = 1.0f / (H[0]*H[2] - H[1]*H[1]);
d_inv_hessian[0] = H[2] * inv_detH;
d_inv_hessian[1] = -1.0f * H[1] * inv_detH;
d_inv_hessian[2] = H[0] * inv_detH;
}
}
}
template <typename T, const bool affine_est_offset, const bool affine_est_gain>
__global__ void update_tracks_kernel(const int candidate_num,
const int min_level,
const int max_level,
const image_pyramid_descriptor_t pyramid_description,
const pyramid_patch_descriptor_t pyramid_patch_sizes,
const int * __restrict__ d_indir_data,
const float2 * __restrict__ d_in_ref_px,
T * __restrict__ d_patch_data,
float * __restrict__ d_hessian_data) {
const int cx = blockIdx.x * CANDIDATES_PER_BLOCK_UPDATE + threadIdx.y; // candidate id
const int pyramid_levels = max_level - min_level + 1; // number of pyramid levels computed
if(cx < candidate_num) {
// Get the buffer id
const int bx = d_indir_data[cx];
// Metadata array size: 64 bytes
const float2 & ref_px_bx = d_in_ref_px[bx<<3];
// Patch data
T * d_patch_data_bx = d_patch_data + pyramid_patch_sizes.max_area*pyramid_levels*bx;
// Hessian data
float * d_hessian_data_bx = d_hessian_data + 10*pyramid_levels*bx;
// For all the levels, precompute the interpolated patch, and the resulting inverse Hessian
for(int level=max_level;
level>=min_level;
--level,d_patch_data_bx+=pyramid_patch_sizes.max_area,d_hessian_data_bx+=10) {
const float scale = (float)(1<<level);
const float inv_scale = 1.0f/scale;
const float2 ref_px_scaled = {.x = ref_px_bx.x * inv_scale, .y = ref_px_bx.y * inv_scale};
const unsigned char * img_ref = pyramid_description.data[level];
const int img_width = pyramid_description.desc.w[level];
const int img_height = pyramid_description.desc.h[level];
const int img_pitch = pyramid_description.desc.p[level];
const int patch_size = pyramid_patch_sizes.wh[level];
const int half_patch_size = patch_size >> 1;
const int patch_stride = patch_size + 2;
// Create the reference patch if possible with borders
if(load_ref_patch<T>(img_ref,
ref_px_scaled,
img_width,
img_height,
img_pitch,
patch_size,
half_patch_size,
d_patch_data_bx) == false) {
// To notify the subsequent kernel of this behaviour,
// use the 0th index Hessian
d_hessian_data_bx[0] = __int_as_float(0x7fffffff);
continue;
}
__syncwarp();
// Calculate the Hessian and its inverse
calc_hessian<T,affine_est_offset,affine_est_gain>(img_width,
img_height,
img_pitch,
d_patch_data_bx,
patch_size,
half_patch_size,
patch_stride,
d_hessian_data_bx);
}
}
}
__host__ void update_tracks(const int candidate_num,
const bool affine_est_offset,
const bool affine_est_gain,
const int min_level,
const int max_level,
const image_pyramid_descriptor_t & pyramid_description,
const pyramid_patch_descriptor_t & pyramid_patch_sizes,
const int * d_indir_data,
const float2 * d_in_ref_px,
unsigned char * d_patch_data,
float * d_hessian_data,
hipStream_t stream) {
dim3 threads_per_block;
threads_per_block.x = WARP_SIZE;
threads_per_block.y = CANDIDATES_PER_BLOCK_UPDATE;
threads_per_block.z = 1;
const int blocks_per_grid = (candidate_num + CANDIDATES_PER_BLOCK_UPDATE - 1) / CANDIDATES_PER_BLOCK_UPDATE;
// Launch kernel
if(affine_est_offset && affine_est_gain) {
hipLaunchKernelGGL(( update_tracks_kernel<FEATURE_TRACKER_REFERENCE_PATCH_TYPE,true,true>), dim3(blocks_per_grid),dim3(threads_per_block),0,stream,
candidate_num,
min_level,
max_level,
pyramid_description,
pyramid_patch_sizes,
d_indir_data,
d_in_ref_px,
(FEATURE_TRACKER_REFERENCE_PATCH_TYPE*)d_patch_data,
d_hessian_data);
} else if(affine_est_offset) {
hipLaunchKernelGGL(( update_tracks_kernel<FEATURE_TRACKER_REFERENCE_PATCH_TYPE,true,false>), dim3(blocks_per_grid),dim3(threads_per_block),0,stream,
candidate_num,
min_level,
max_level,
pyramid_description,
pyramid_patch_sizes,
d_indir_data,
d_in_ref_px,
(FEATURE_TRACKER_REFERENCE_PATCH_TYPE*)d_patch_data,
d_hessian_data);
} else if(affine_est_gain) {
hipLaunchKernelGGL(( update_tracks_kernel<FEATURE_TRACKER_REFERENCE_PATCH_TYPE,false,true>), dim3(blocks_per_grid),dim3(threads_per_block),0,stream,
candidate_num,
min_level,
max_level,
pyramid_description,
pyramid_patch_sizes,
d_indir_data,
d_in_ref_px,
(FEATURE_TRACKER_REFERENCE_PATCH_TYPE*)d_patch_data,
d_hessian_data);
} else {
hipLaunchKernelGGL(( update_tracks_kernel<FEATURE_TRACKER_REFERENCE_PATCH_TYPE,false,false>), dim3(blocks_per_grid),dim3(threads_per_block),0,stream,
candidate_num,
min_level,
max_level,
pyramid_description,
pyramid_patch_sizes,
d_indir_data,
d_in_ref_px,
(FEATURE_TRACKER_REFERENCE_PATCH_TYPE*)d_patch_data,
d_hessian_data);
}
CUDA_KERNEL_CHECK();
}
} // namespace feature_tracker_cuda_tools
} // namespace vilib
| d7634eab56b9691776b75ef2701103cc772e52e3.cu | /*
* CUDA kernels for the feature tracker
* feature_tracker_cuda_tools.cu
*
* Copyright (c) 2019-2020 Balazs Nagy,
* Robotics and Perception Group, University of Zurich
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "vilib/feature_tracker/feature_tracker_cuda_tools.h"
#include "vilib/feature_tracker/config.h"
#include "vilib/cuda_common.h"
namespace vilib {
namespace feature_tracker_cuda_tools {
// Warp preliminaries
#define WARP_SIZE 32
#define WARP_MASK 0xFFFFFFFF
// Precalculating feature patches
#define CANDIDATES_PER_BLOCK_UPDATE 3
/*
* Note to future self:
* - interestingly with reference patch interpolation, the tracking performance
* degrades. -> DISABLE
* - Int/Float does not seem to affect the tracking performance, but the float
* version is slower. -> INT
*/
#define REFERENCE_PATCH_INTERPOLATION 0
#define CANDIDATES_PER_BLOCK_TRACK 2
template <typename T, const bool affine_est_offset, const bool affine_est_gain>
__device__ __inline__ void perform_lk(const float & min_update_squared,
const int & img_width,
const int & img_height,
const int & img_pitch,
const unsigned char * __restrict__ d_in_cur_img,
const int & patch_size,
const int & half_patch_size,
const int & patch_stride,
const T * ref_patch,
const float * invH,
float2 & cur_px,
float2 & cur_alpha_beta,
bool & converged,
bool & go_to_next_level) {
converged = false;
go_to_next_level = false;
// Reference patch & actual image
const int patch_area = patch_size * patch_size;
const int x = threadIdx.x % patch_size;
const int y = threadIdx.x / patch_size;
const T * it_ref_start = ref_patch + (y+1)*patch_stride + (x+1);
const int it_ref_offset = patch_stride*WARP_SIZE/patch_size;
const unsigned char * it_start = (const unsigned char*) d_in_cur_img -
(img_pitch + 1)*half_patch_size + threadIdx.x +
(img_pitch - patch_size)*(threadIdx.x/patch_size);
const int it_offset = img_pitch*WARP_SIZE/patch_size;
const int pixels_per_thread = patch_area/WARP_SIZE;
#pragma unroll
for(int iter=0; iter<FEATURE_TRACKER_MAX_ITERATION_COUNT;++iter) {
if(isnan(cur_px.x) || isnan(cur_px.y)) {
break;
}
int u_r = floorf(cur_px.x);
int v_r = floorf(cur_px.y);
if(u_r < half_patch_size ||
v_r < half_patch_size ||
u_r >= (img_width-half_patch_size) ||
v_r >= (img_height-half_patch_size)) {
// don't change the state 'converged'
go_to_next_level = true;
break;
}
// compute interpolation weights
float subpix_x = cur_px.x-u_r;
float subpix_y = cur_px.y-v_r;
float wTL = (1.0f-subpix_x)*(1.0f-subpix_y);
float wTR = subpix_x * (1.0f-subpix_y);
float wBL = (1.0f-subpix_x)*subpix_y;
float wBR = subpix_x * subpix_y;
float Jres[4];
#pragma unroll
for(int i=0;i<4;++i) {
Jres[i] = 0.0f;
}
const uint8_t * it = it_start + u_r + v_r*img_pitch;
const T * it_ref = it_ref_start;
// Note: every thread computes (PATCH_SIZE*PATCH_SIZE/WARP_SIZE) pixels
#pragma unroll
for(int i=0;i<pixels_per_thread;++i,it+=it_offset,it_ref+=it_ref_offset) {
// Note it cannot be read as uchar2, because it would require proper alignment
float search_pixel = wTL*it[0] + wTR*it[1] + wBL*it[img_pitch] + wBR*it[img_pitch+1];
float res = search_pixel - (1.0f+cur_alpha_beta.x)*(*it_ref) - cur_alpha_beta.y;
Jres[0] += res * 0.5f * (it_ref[1] - it_ref[-1]);
Jres[1] += res * 0.5f * (it_ref[patch_stride] - it_ref[-patch_stride]);
// If affine compensation is used,
// set Jres with respect to affine parameters.
if(affine_est_offset && affine_est_gain) {
Jres[2] += res;
Jres[3] += res*(*it_ref);
} else if(affine_est_offset) {
Jres[2] += res;
} else if(affine_est_gain) {
Jres[2] += res*(*it_ref);
}
}
// Reduce it to all lanes
#pragma unroll
for (int offset = WARP_SIZE/2; offset > 0; offset /= 2) {
#pragma unroll
for(int i=0;i<4;++i) {
Jres[i] += __shfl_xor_sync(WARP_MASK, Jres[i], offset);
}
}
//update = Hinv * Jres
//broadcast the computed values in the warp from lane 0
float update[4];
if(affine_est_offset && affine_est_gain) {
update[0] = invH[0] * Jres[0] + invH[1] * Jres[1] + invH[2] * Jres[2] + invH[3] * Jres[3];
update[1] = invH[1] * Jres[0] + invH[4] * Jres[1] + invH[5] * Jres[2] + invH[6] * Jres[3];
update[2] = invH[2] * Jres[0] + invH[5] * Jres[1] + invH[7] * Jres[2] + invH[8] * Jres[3];
update[3] = invH[3] * Jres[0] + invH[6] * Jres[1] + invH[8] * Jres[2] + invH[9] * Jres[3];
} else if(affine_est_gain || affine_est_offset) {
update[0] = invH[0] * Jres[0] + invH[1] * Jres[1] + invH[2] * Jres[2];
update[1] = invH[1] * Jres[0] + invH[3] * Jres[1] + invH[4] * Jres[2];
update[2] = invH[2] * Jres[0] + invH[4] * Jres[1] + invH[5] * Jres[2];
} else {
update[0] = invH[0] * Jres[0] + invH[1] * Jres[1];
update[1] = invH[1] * Jres[0] + invH[2] * Jres[1];
}
// Updating our estimates
// Translational displacement
cur_px.x -= update[0];
cur_px.y -= update[1];
// Illumination changes
if(affine_est_offset && affine_est_gain) {
cur_alpha_beta.x += update[3];
cur_alpha_beta.y += update[2];
} else if (affine_est_offset) {
cur_alpha_beta.y += update[2];
} else if(affine_est_gain) {
cur_alpha_beta.x += update[2];
}
if(update[0]*update[0]+update[1]*update[1] < min_update_squared) {
converged=true;
break;
}
}
}
template <typename T, const bool affine_est_offset, const bool affine_est_gain>
__global__ void track_features_kernel(const int candidate_num,
const int min_level,
const int max_level,
const float min_update_squared,
const image_pyramid_descriptor_t pyramid_description,
const pyramid_patch_descriptor_t pyramid_patch_sizes,
const int * __restrict__ d_indir_data,
const T * __restrict__ d_patch_data,
const float * __restrict__ d_hessian_data,
const float2 * __restrict__ d_first_px,
float2 * __restrict__ d_cur_px,
float2 * __restrict__ d_cur_alpha_beta,
float4 * __restrict__ d_cur_f,
float * __restrict__ d_cur_disparity) {
const int cx = blockIdx.x * CANDIDATES_PER_BLOCK_TRACK + threadIdx.y; // candidate id
const int pyramid_levels = max_level - min_level + 1; // number of pyramid levels computed
if(cx < candidate_num) {
// Acquire buffer id for the candidate
const int bx = d_indir_data[cx];
// Initialize input and output references
// Remark: struct size: 64 bytes
// Tracking
float2 & d_cur_px_bx = d_cur_px[bx<<3]; // level 0
float2 & d_cur_alpha_beta_bx = d_cur_alpha_beta[bx<<3];
// Bearing vector
float4 & d_cur_f_bx = d_cur_f[bx<<2];
// Disparity calculation
const float2 & d_first_px_bx = d_first_px[bx<<3];
float & d_cur_disparity_bx = d_cur_disparity[bx<<4];
// Patch data
const T * d_patch_data_bx = d_patch_data + pyramid_patch_sizes.max_area*pyramid_levels*bx; // points to max_level
// Hessian data
const float * d_hessian_data_bx = d_hessian_data + 10*pyramid_levels*bx; // points to max_level
/*
* Iterate through all the selected levels,
* and refine the current patch location
*/
bool converged = false;
bool go_to_next_level = true;
float2 cur_px = d_cur_px_bx;
float2 cur_alpha_beta = d_cur_alpha_beta_bx;
float scale = 1.0f;
for(int level=max_level;
(converged || go_to_next_level) && level>=min_level;
cur_px.x *= scale,cur_px.y *= scale,--level,d_patch_data_bx+=pyramid_patch_sizes.max_area,d_hessian_data_bx+=10) {
// scale & patch size
scale = (float)(1<<level);
const float inv_scale = 1.0f/scale;
const int & patch_size = pyramid_patch_sizes.wh[level];
const int & half_patch_size = patch_size >> 1;
const int & patch_stride = patch_size + 2;
// image size
const int & d_in_img_width = pyramid_description.desc.w[level];
const int & d_in_img_height = pyramid_description.desc.h[level];
const int & d_in_img_pitch = pyramid_description.desc.p[level];
// update the pixel positions according to the current level
cur_px.x = cur_px.x * inv_scale;
cur_px.y = cur_px.y * inv_scale;
// TODO : maybe load it into shared memory later if size is small (8x8), also the inverse hessian!
// Check if the inverse hessian was computed for the level successfully
if(isnan(d_hessian_data_bx[0])) {
continue;
}
const unsigned char * d_in_cur_img = pyramid_description.data[level];
// do the Lukas-Kanade on the actual level, using the reference patch
perform_lk<T, affine_est_offset, affine_est_gain>(
min_update_squared,
d_in_img_width,
d_in_img_height,
d_in_img_pitch,
d_in_cur_img,
patch_size,
half_patch_size,
patch_stride,
d_patch_data_bx,
d_hessian_data_bx,
cur_px,
cur_alpha_beta,
converged,
go_to_next_level);
}
if(threadIdx.x == 0) {
if(converged) {
// Point location
d_cur_px_bx.x = cur_px.x;
d_cur_px_bx.y = cur_px.y;
// Alpha-beta estimation
if(affine_est_gain) {
d_cur_alpha_beta_bx.x = cur_alpha_beta.x;
}
if(affine_est_offset) {
d_cur_alpha_beta_bx.y = cur_alpha_beta.y;
}
// Bearing vector
// TODO
(void)d_cur_f_bx;
// Disparity
d_cur_disparity_bx = sqrtf((cur_px.x - d_first_px_bx.x)*(cur_px.x - d_first_px_bx.x) +
(cur_px.y - d_first_px_bx.y)*(cur_px.y - d_first_px_bx.y));
} else {
// Initialize output to be NAN
d_cur_px_bx.x = __int_as_float(0x7fffffff);
d_cur_px_bx.y = __int_as_float(0x7fffffff);
}
}
}
}
__host__ void track_features(const bool affine_est_offset,
const bool affine_est_gain,
const int candidate_num,
const int min_level,
const int max_level,
const float min_update_squared,
const image_pyramid_descriptor_t & pyramid_description,
const pyramid_patch_descriptor_t & pyramid_patch_sizes,
const int * d_indir_data,
const unsigned char * d_patch_data,
const float * d_hessian_data,
const float2 * d_in_first_px,
float2 * d_in_cur_px,
float2 * d_in_cur_alpha_beta,
float4 * d_in_cur_f,
float * d_in_cur_disparity,
cudaStream_t stream) {
// Kernel parameters
dim3 threads_per_block;
threads_per_block.x = WARP_SIZE;
threads_per_block.y = CANDIDATES_PER_BLOCK_TRACK;
threads_per_block.z = 1;
const int blocks_per_grid = (candidate_num + CANDIDATES_PER_BLOCK_TRACK - 1) / CANDIDATES_PER_BLOCK_TRACK;
const int shm_per_block = 0; //CANDIDATES_PER_BLOCK_TRACK*(PATCH_STRIDE*(PATCH_SIZE+2))*sizeof(REFERENCE_PATCH_TYPE);
// Launch kernel
if(affine_est_offset && affine_est_gain) {
track_features_kernel<FEATURE_TRACKER_REFERENCE_PATCH_TYPE,true,true><<<blocks_per_grid,threads_per_block,shm_per_block,stream>>>(
candidate_num,
min_level,
max_level,
min_update_squared,
pyramid_description,
pyramid_patch_sizes,
d_indir_data,
(FEATURE_TRACKER_REFERENCE_PATCH_TYPE*)d_patch_data,
d_hessian_data,
d_in_first_px,
d_in_cur_px,
d_in_cur_alpha_beta,
d_in_cur_f,
d_in_cur_disparity
);
} else if(affine_est_offset) {
track_features_kernel<FEATURE_TRACKER_REFERENCE_PATCH_TYPE,true,false><<<blocks_per_grid,threads_per_block,shm_per_block,stream>>>(
candidate_num,
min_level,
max_level,
min_update_squared,
pyramid_description,
pyramid_patch_sizes,
d_indir_data,
(FEATURE_TRACKER_REFERENCE_PATCH_TYPE*)d_patch_data,
d_hessian_data,
d_in_first_px,
d_in_cur_px,
d_in_cur_alpha_beta,
d_in_cur_f,
d_in_cur_disparity
);
} else if(affine_est_gain) {
track_features_kernel<FEATURE_TRACKER_REFERENCE_PATCH_TYPE,false,true><<<blocks_per_grid,threads_per_block,shm_per_block,stream>>>(
candidate_num,
min_level,
max_level,
min_update_squared,
pyramid_description,
pyramid_patch_sizes,
d_indir_data,
(FEATURE_TRACKER_REFERENCE_PATCH_TYPE*)d_patch_data,
d_hessian_data,
d_in_first_px,
d_in_cur_px,
d_in_cur_alpha_beta,
d_in_cur_f,
d_in_cur_disparity
);
} else {
track_features_kernel<FEATURE_TRACKER_REFERENCE_PATCH_TYPE,false,false><<<blocks_per_grid,threads_per_block,shm_per_block,stream>>>(
candidate_num,
min_level,
max_level,
min_update_squared,
pyramid_description,
pyramid_patch_sizes,
d_indir_data,
(FEATURE_TRACKER_REFERENCE_PATCH_TYPE*)d_patch_data,
d_hessian_data,
d_in_first_px,
d_in_cur_px,
d_in_cur_alpha_beta,
d_in_cur_f,
d_in_cur_disparity
);
}
CUDA_KERNEL_CHECK();
}
/* Precalculate patches & precalculate inverse Hessians --------------------- */
template<typename T>
__device__ __inline__ bool load_ref_patch(const unsigned char * __restrict__ d_in_ref_img,
const float2 & ref_px,
const int & img_width,
const int & img_height,
const int & img_pitch,
const int & patch_size,
const int & half_patch_size,
T * __restrict__ d_out_ref_patch) {
// Calculate top left corner
// and verify that the patch fits in the image
float x_tl_f = ref_px.x - (half_patch_size+1);
float y_tl_f = ref_px.y - (half_patch_size+1);
int x_tl_i = floorf(x_tl_f);
int y_tl_i = floorf(y_tl_f);
// Note: on the right side of the rectangle, there's a +1 because of the
// intensity interpolation
if(x_tl_i < 0 ||
y_tl_i < 0 ||
#if REFERENCE_PATCH_INTERPOLATION
(x_tl_i+patch_size+2)>=img_width ||
(y_tl_i+patch_size+2)>=img_height
#else
(x_tl_i+patch_size+1)>=img_width ||
(y_tl_i+patch_size+1)>=img_height
#endif /* REFERENCE_PATCH_INTERPOLATION */
) {
return false;
}
#if REFERENCE_PATCH_INTERPOLATION
float subpix_x = x_tl_f-x_tl_i;
float subpix_y = y_tl_f-y_tl_i;
float wTL = (1.0f-subpix_x)*(1.0f-subpix_y);
float wTR = subpix_x*(1.0f-subpix_y);
float wBL = (1.0f-subpix_x)*subpix_y;
float wBR = 1.0f - wTL - wTR - wBL;
#endif /* REFERENCE_PATCH_INTERPOLATION */
T * patch_ptr = d_out_ref_patch + threadIdx.x;
#pragma unroll
for(int id = threadIdx.x; id < (patch_size+2)*(patch_size+2); id += WARP_SIZE, patch_ptr += WARP_SIZE) {
int x_no_offs = (id % (patch_size+2));
int y_no_offs = (id / (patch_size+2));
int xi = x_no_offs + x_tl_i;
int yi = y_no_offs + y_tl_i;
const unsigned char * ptr = d_in_ref_img + yi*img_pitch + xi;
*patch_ptr =
#if REFERENCE_PATCH_INTERPOLATION
(T)(wTL*((float)ptr[0]) +
wBL*((float)ptr[img_pitch]) +
wTR*((float)ptr[1]) +
wBR*((float)ptr[img_pitch+1]));
#else
(T)ptr[0];
#endif /* REFERENCE_PATCH_INTERPOLATION */
}
return true;
}
template <typename T, const bool affine_est_offset, const bool affine_est_gain>
__device__ __inline__ void calc_hessian(const int & img_width,
const int & img_height,
const int & img_pitch,
const T * ref_patch,
const int & patch_size,
const int & half_patch_size,
const int & patch_stride,
float * __restrict__ d_inv_hessian) {
/*
* We're exploiting the fact that H is going to be symmetric !
* - When we estimate also affine offset AND gain:
* J = [ 0 1 2 3 ]
* H = | 0 1 2 3 |
* | x 4 5 6 |
* | x x 7 8 |
* | x x x 9 |
*
* - When we estimate only affine offset OR gain
* J = [ 0 1 2 ]
* H = | 0 1 2 |
* | x 3 4 |
* | x x 5 |
*
* - When we don't estimate the affine offset and gain
* J = [ 0 1 ]
* H = | 0 1 |
* | x 2 |
*/
float H[10];
float J[4];
#pragma unroll
for(int i=0;i<10;++i) {
H[i] = 0.0f;
}
const int patch_area = patch_size * patch_size;
const int x = threadIdx.x % patch_size;
const int y = threadIdx.x / patch_size;
const T * it_ref_start = ref_patch + (y+1)*patch_stride + (x+1); // +1 due to the 1-1-1-1 borders
const T * it_ref = it_ref_start;
const int it_ref_offset = patch_stride * WARP_SIZE / patch_size;
const int pixels_per_thread = patch_area/WARP_SIZE;
#pragma unroll
for(int i=0;i<pixels_per_thread;++i,it_ref+=it_ref_offset) {
// Compute J
J[0] = 0.5f * (it_ref[1] - it_ref[-1]);
J[1] = 0.5f * (it_ref[patch_stride] - it_ref[-patch_stride]);
// Affine parameter estimation
if(affine_est_offset && affine_est_gain) {
J[2] = 1.0f;
J[3] = it_ref[0];
} else if(affine_est_offset) {
J[2] = 1.0f;
} else if(affine_est_gain) {
J[2] = it_ref[0];
}
// H += J*J^Transpose (using the fact that J*J^T is going to be symmetric)
if(affine_est_offset && affine_est_gain) {
/*
* H: 4x4 matrix
* 0 1 2 3
* 0 (0x0) * (0x1) (0x2) (0x3)
* 1 x (1x1) (1x2) (1x3)
* 2 x x (2x2) (2x3)
* 3 x x x (3x3)
*/
H[0] += J[0]*J[0];
H[1] += J[0]*J[1];
H[2] += J[0]*J[2];
H[3] += J[0]*J[3];
H[4] += J[1]*J[1];
H[5] += J[1]*J[2];
H[6] += J[1]*J[3];
H[7] += J[2]*J[2];
H[8] += J[2]*J[3];
H[9] += J[3]*J[3];
} else if(affine_est_offset || affine_est_gain) {
/*
* H: 3x3 matrix
* 0 1 2
* 0 (0x0) (0x1) (0x2)
* 1 x (1x1) (1x2)
* 2 x x (2x2)
*/
H[0] += J[0]*J[0];
H[1] += J[0]*J[1];
H[2] += J[0]*J[2];
H[3] += J[1]*J[1];
H[4] += J[1]*J[2];
H[5] += J[2]*J[2];
} else {
/*
img_cur_width_pitch_diff* H: 2x2 matrix
* 0 1
* 0 (0x0) (0x1)
* 1 x (1x1)
*/
H[0] += J[0]*J[0];
H[1] += J[0]*J[1];
H[2] += J[1]*J[1];
}
}
// Reduce it down to lane 0
#pragma unroll
for(int offset = WARP_SIZE/2; offset > 0; offset /= 2) {
#pragma unroll
for(int i=0;i<10;++i) {
H[i] += __shfl_down_sync(WARP_MASK, H[i], offset);
}
}
// Calculate the inverse of H
float inv_detH;
if(threadIdx.x == 0) {
if(affine_est_gain && affine_est_offset) {
// Inverse of a symmetric 4x4 matrix
inv_detH = 1.0f /
(H[0]*H[4]*H[7]*H[9] + 2*H[0]*H[5]*H[6]*H[8] + H[1]*H[1]*H[8]*H[8]
+ 2*H[1]*H[2]*H[5]*H[9] + 2*H[1]*H[3]*H[6]*H[7]
+ 2*H[2]*H[3]*H[4]*H[8] + H[2]*H[2]*H[6]*H[6] + H[3]*H[3]*H[5]*H[5]
- H[0]*H[4]*H[8]*H[8] - H[0]*H[5]*H[5]*H[9] - H[0]*H[6]*H[6]*H[7]
- H[1]*H[1]*H[7]*H[9] - 2*H[1]*H[3]*H[5]*H[8] - 2*H[1]*H[2]*H[6]*H[8]
- H[2]*H[2]*H[4]*H[9] - 2*H[2]*H[3]*H[5]*H[6] - H[3]*H[3]*H[4]*H[7]);
d_inv_hessian[0] = (H[4]*H[7]*H[9] + 2*H[5]*H[6]*H[8] - H[4]*H[8]*H[8] - H[5]*H[5]*H[9] - H[6]*H[6]*H[7]) * inv_detH;
d_inv_hessian[1] = (H[1]*H[8]*H[8] + H[2]*H[5]*H[9] + H[3]*H[6]*H[7] - H[1]*H[7]*H[9] - H[2]*H[6]*H[8] - H[3]*H[5]*H[8]) * inv_detH;
d_inv_hessian[2] = (H[1]*H[5]*H[9] + H[2]*H[6]*H[6] + H[3]*H[4]*H[8] - H[1]*H[6]*H[8] - H[2]*H[4]*H[9] - H[3]*H[5]*H[6]) * inv_detH;
d_inv_hessian[3] = (H[1]*H[6]*H[7] + H[2]*H[4]*H[8] + H[3]*H[5]*H[5] - H[1]*H[5]*H[8] - H[2]*H[6]*H[5] - H[3]*H[4]*H[7]) * inv_detH;
d_inv_hessian[4] = (H[0]*H[7]*H[9] + 2*H[2]*H[3]*H[8] - H[0]*H[8]*H[8] - H[2]*H[2]*H[9] - H[3]*H[3]*H[7]) * inv_detH;
d_inv_hessian[5] = (H[0]*H[6]*H[8] + H[1]*H[2]*H[9] + H[3]*H[3]*H[5] - H[0]*H[5]*H[9] - H[2]*H[3]*H[6] - H[1]*H[3]*H[8]) * inv_detH;
d_inv_hessian[6] = (H[0]*H[5]*H[8] + H[2]*H[2]*H[6] + H[1]*H[3]*H[7] - H[0]*H[6]*H[7] - H[1]*H[2]*H[8] - H[2]*H[3]*H[5]) * inv_detH;
d_inv_hessian[7] = (H[0]*H[4]*H[9] + 2*H[1]*H[3]*H[6] - H[0]*H[6]*H[6] - H[1]*H[1]*H[9] - H[3]*H[3]*H[4]) * inv_detH;
d_inv_hessian[8] = (H[0]*H[5]*H[6] + H[1]*H[1]*H[8] + H[2]*H[3]*H[4] - H[0]*H[4]*H[8] - H[1]*H[2]*H[6] - H[1]*H[3]*H[5]) * inv_detH;
d_inv_hessian[9] = (H[0]*H[4]*H[7] + 2*H[1]*H[2]*H[5] - H[0]*H[5]*H[5] - H[1]*H[1]*H[7] - H[2]*H[2]*H[4]) * inv_detH;
} else if(affine_est_gain || affine_est_offset) {
// Inverse of a symmetric 3x3 matrix
inv_detH = 1.0f / (H[0]*H[3]*H[5] + 2*H[1]*H[4]*H[2] - H[0]*H[4]*H[4] - H[2]*H[3]*H[2] - H[1]*H[1]*H[5]);
d_inv_hessian[0] = (H[3]*H[5] - H[4]*H[4]) * inv_detH;
d_inv_hessian[1] = (H[2]*H[4] - H[1]*H[5]) * inv_detH;
d_inv_hessian[2] = (H[1]*H[4] - H[2]*H[3]) * inv_detH;
d_inv_hessian[3] = (H[0]*H[5] - H[2]*H[2]) * inv_detH;
d_inv_hessian[4] = (H[1]*H[2] - H[0]*H[4]) * inv_detH;
d_inv_hessian[5] = (H[0]*H[3] - H[1]*H[1]) * inv_detH;
} else {
// Inverse of a symmetric 2x2 matrix
inv_detH = 1.0f / (H[0]*H[2] - H[1]*H[1]);
d_inv_hessian[0] = H[2] * inv_detH;
d_inv_hessian[1] = -1.0f * H[1] * inv_detH;
d_inv_hessian[2] = H[0] * inv_detH;
}
}
}
template <typename T, const bool affine_est_offset, const bool affine_est_gain>
__global__ void update_tracks_kernel(const int candidate_num,
const int min_level,
const int max_level,
const image_pyramid_descriptor_t pyramid_description,
const pyramid_patch_descriptor_t pyramid_patch_sizes,
const int * __restrict__ d_indir_data,
const float2 * __restrict__ d_in_ref_px,
T * __restrict__ d_patch_data,
float * __restrict__ d_hessian_data) {
const int cx = blockIdx.x * CANDIDATES_PER_BLOCK_UPDATE + threadIdx.y; // candidate id
const int pyramid_levels = max_level - min_level + 1; // number of pyramid levels computed
if(cx < candidate_num) {
// Get the buffer id
const int bx = d_indir_data[cx];
// Metadata array size: 64 bytes
const float2 & ref_px_bx = d_in_ref_px[bx<<3];
// Patch data
T * d_patch_data_bx = d_patch_data + pyramid_patch_sizes.max_area*pyramid_levels*bx;
// Hessian data
float * d_hessian_data_bx = d_hessian_data + 10*pyramid_levels*bx;
// For all the levels, precompute the interpolated patch, and the resulting inverse Hessian
for(int level=max_level;
level>=min_level;
--level,d_patch_data_bx+=pyramid_patch_sizes.max_area,d_hessian_data_bx+=10) {
const float scale = (float)(1<<level);
const float inv_scale = 1.0f/scale;
const float2 ref_px_scaled = {.x = ref_px_bx.x * inv_scale, .y = ref_px_bx.y * inv_scale};
const unsigned char * img_ref = pyramid_description.data[level];
const int img_width = pyramid_description.desc.w[level];
const int img_height = pyramid_description.desc.h[level];
const int img_pitch = pyramid_description.desc.p[level];
const int patch_size = pyramid_patch_sizes.wh[level];
const int half_patch_size = patch_size >> 1;
const int patch_stride = patch_size + 2;
// Create the reference patch if possible with borders
if(load_ref_patch<T>(img_ref,
ref_px_scaled,
img_width,
img_height,
img_pitch,
patch_size,
half_patch_size,
d_patch_data_bx) == false) {
// To notify the subsequent kernel of this behaviour,
// use the 0th index Hessian
d_hessian_data_bx[0] = __int_as_float(0x7fffffff);
continue;
}
__syncwarp();
// Calculate the Hessian and its inverse
calc_hessian<T,affine_est_offset,affine_est_gain>(img_width,
img_height,
img_pitch,
d_patch_data_bx,
patch_size,
half_patch_size,
patch_stride,
d_hessian_data_bx);
}
}
}
__host__ void update_tracks(const int candidate_num,
const bool affine_est_offset,
const bool affine_est_gain,
const int min_level,
const int max_level,
const image_pyramid_descriptor_t & pyramid_description,
const pyramid_patch_descriptor_t & pyramid_patch_sizes,
const int * d_indir_data,
const float2 * d_in_ref_px,
unsigned char * d_patch_data,
float * d_hessian_data,
cudaStream_t stream) {
dim3 threads_per_block;
threads_per_block.x = WARP_SIZE;
threads_per_block.y = CANDIDATES_PER_BLOCK_UPDATE;
threads_per_block.z = 1;
const int blocks_per_grid = (candidate_num + CANDIDATES_PER_BLOCK_UPDATE - 1) / CANDIDATES_PER_BLOCK_UPDATE;
// Launch kernel
if(affine_est_offset && affine_est_gain) {
update_tracks_kernel<FEATURE_TRACKER_REFERENCE_PATCH_TYPE,true,true><<<blocks_per_grid,threads_per_block,0,stream>>>(
candidate_num,
min_level,
max_level,
pyramid_description,
pyramid_patch_sizes,
d_indir_data,
d_in_ref_px,
(FEATURE_TRACKER_REFERENCE_PATCH_TYPE*)d_patch_data,
d_hessian_data);
} else if(affine_est_offset) {
update_tracks_kernel<FEATURE_TRACKER_REFERENCE_PATCH_TYPE,true,false><<<blocks_per_grid,threads_per_block,0,stream>>>(
candidate_num,
min_level,
max_level,
pyramid_description,
pyramid_patch_sizes,
d_indir_data,
d_in_ref_px,
(FEATURE_TRACKER_REFERENCE_PATCH_TYPE*)d_patch_data,
d_hessian_data);
} else if(affine_est_gain) {
update_tracks_kernel<FEATURE_TRACKER_REFERENCE_PATCH_TYPE,false,true><<<blocks_per_grid,threads_per_block,0,stream>>>(
candidate_num,
min_level,
max_level,
pyramid_description,
pyramid_patch_sizes,
d_indir_data,
d_in_ref_px,
(FEATURE_TRACKER_REFERENCE_PATCH_TYPE*)d_patch_data,
d_hessian_data);
} else {
update_tracks_kernel<FEATURE_TRACKER_REFERENCE_PATCH_TYPE,false,false><<<blocks_per_grid,threads_per_block,0,stream>>>(
candidate_num,
min_level,
max_level,
pyramid_description,
pyramid_patch_sizes,
d_indir_data,
d_in_ref_px,
(FEATURE_TRACKER_REFERENCE_PATCH_TYPE*)d_patch_data,
d_hessian_data);
}
CUDA_KERNEL_CHECK();
}
} // namespace feature_tracker_cuda_tools
} // namespace vilib
|
755ea1cb9284cda21f0040c0b5b1e19f27e20113.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
using Point = double3;
struct Ref {
Point* pos;
Point* dir;
double* distance;
};
struct View {
int size;
Point* pos;
Point* dir;
double* distance;
__device__ Ref operator[](int i) const {
return {pos + i, dir + i, distance + i};
}
};
__device__ inline void move_impl(const Ref& ref) {
const double nextdist = *ref.distance;
ref.pos->x += ref.dir->x * nextdist;
ref.pos->y += ref.dir->y * nextdist;
ref.pos->z += ref.dir->z * nextdist;
}
__global__ void move(View view) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < view.size) {
move_impl(view[idx]);
}
}
| 755ea1cb9284cda21f0040c0b5b1e19f27e20113.cu | using Point = double3;
struct Ref {
Point* pos;
Point* dir;
double* distance;
};
struct View {
int size;
Point* pos;
Point* dir;
double* distance;
__device__ Ref operator[](int i) const {
return {pos + i, dir + i, distance + i};
}
};
__device__ inline void move_impl(const Ref& ref) {
const double nextdist = *ref.distance;
ref.pos->x += ref.dir->x * nextdist;
ref.pos->y += ref.dir->y * nextdist;
ref.pos->z += ref.dir->z * nextdist;
}
__global__ void move(View view) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < view.size) {
move_impl(view[idx]);
}
}
|
beca52fe62dde351d3a54bc7ace2c3ecf8dad9ec.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <hipfft.h>
// includes, project
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <iostream>
typedef float2 Complex;
//hsl=-2./((pi^2)*(4*n.*n-1))/dYL;
struct genKernel
{
float _dYL;
genKernel(const float _dyl):_dYL(_dyl){}
__host__ __device__ float operator()(float n) const
{
return -2.0 / (9.869604401089358 * (4.0 * n * n - 1.0)) / _dYL;
}
};
// Generate the Hilbert Filtering Kernel
__global__ void copyKernel(hipfftComplex* output,const float* HS, const int NN)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < NN)
{
output[id].x = HS[id];
output[id].y = 0;
}
}
__global__ void copyKernel(Complex* output, const hipfftComplex* HS, const int NN)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < NN)
{
output[id].x = HS[id].x;
output[id].y = HS[id].y;
}
}
__global__ void copyKernel(float* output, const hipfftComplex* HS, const int NN)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < NN)
{
output[id] = HS[id].x;
}
}
int genHilbertKer(
thrust::device_vector<hipfftComplex>& FtS,
const int YL,
const float dYL)
{
thrust::device_vector<float> n(2 * YL + 1, 0);
thrust::sequence(n.begin(),n.end(),static_cast<float>(-YL));
thrust::transform(n.begin(),n.end(),n.begin(),genKernel(dYL));
const int NN = pow(2.0,ceil(log2(YL * 3.0)));
FtS.resize(NN);
thrust::device_vector<float> HS(NN,0);
thrust::copy(n.begin(),n.begin() + YL,HS.end() - YL);
thrust::copy(n.begin()+YL,n.end(),HS.begin());
// To Do the CUFFT
hipfftHandle plan;
dim3 blk(1024);
dim3 gid((NN + blk.x - 1) / blk.x);
hipLaunchKernelGGL(( copyKernel), dim3(gid),dim3(blk), 0, 0, thrust::raw_pointer_cast(&FtS[0]),thrust::raw_pointer_cast(&HS[0]), NN);
hipfftPlan1d(&plan, NN, HIPFFT_C2C,1);
hipfftExecC2C(plan, thrust::raw_pointer_cast(&FtS[0]), thrust::raw_pointer_cast(&FtS[0]), HIPFFT_FORWARD);
hipfftDestroy(plan);
HS.clear();
n.clear();
return NN;
}
__global__ void copyExpandedProjectionData(hipfftComplex* output, const float* input,
const int YL, const int ZL, const int ViewN, const int NN)
{
int curPos = threadIdx.x + blockIdx.x * blockDim.x;
int curBatch = threadIdx.y + blockIdx.y * blockDim.y;
if(curPos < YL && curBatch < ZL * ViewN)
{
output[curBatch * NN + curPos].x = input[curBatch * YL + curPos];
output[curBatch * NN + curPos].y = 0;
}
}
__global__ void multiplyProjectionWithKernel(hipfftComplex* proj, const hipfftComplex* kernel, const int kernelLength, const int batchSize)
{
int kerIdx = threadIdx.x + blockIdx.x * blockDim.x;
int batIdx = threadIdx.y + blockIdx.y * blockDim.y;
//__shared__ float kk[KSIZE];
//kk[threadIdx.x] = kernel[kerIdx];
//__syncthreads();
if(kerIdx < kernelLength && batIdx < batchSize)
{
hipfftComplex kk = kernel[kerIdx];
hipfftComplex res;
res.x = proj[batIdx * kernelLength + kerIdx].x * kk.x - proj[batIdx * kernelLength + kerIdx].y * kk.y;
res.y = proj[batIdx * kernelLength + kerIdx].x * kk.y + proj[batIdx * kernelLength + kerIdx].y * kk.x;
proj[batIdx * kernelLength + kerIdx] = res;
//proj[batIdx * kernelLength + kerIdx].y *= kk;
}
}
__global__ void cutProjectionData(float* fpwd, hipfftComplex* proj, const int YL, const int NN, const int batSize)
{
int curIdx = threadIdx.x + blockIdx.x * blockDim.x;
int batIdx = threadIdx.y + blockIdx.y * blockDim.y;
if(curIdx < YL && batIdx < batSize)
{
fpwd[batIdx * YL + curIdx] = proj[batIdx * NN + curIdx].x / NN;
}
}
// Let the projection data stored in the addressing order:
// 1. detector cell transversal direction (YL)
// 2. vertical direction (ZL)
// 3. view index (ViewN)
void filtering(
thrust::device_vector<float>& fpwd, // Filtered projection data
const thrust::device_vector<float>& Proj, // Projection data
const int YL, const int ZL, const int ViewN, // Size of the projection data
const float dYL)
{
thrust::device_vector<hipfftComplex> FtS;
int NN = genHilbertKer(FtS, YL, dYL);
//Expand the projection data
thrust::device_vector<hipfftComplex> exProj(NN * ZL * ViewN);
dim3 copyExpBlk(32,32);
dim3 copyExpGid(
(YL + copyExpBlk.x - 1) / copyExpBlk.x,
(ZL * ViewN + copyExpBlk.y - 1) / copyExpBlk.y);
hipLaunchKernelGGL(( copyExpandedProjectionData), dim3(copyExpGid), dim3(copyExpBlk), 0, 0,
thrust::raw_pointer_cast(&exProj[0]),
thrust::raw_pointer_cast(&Proj[0]),
YL, ZL, ViewN, NN);
// Forward Batch FFT
hipfftHandle plan;
hipfftPlan1d(&plan, NN, HIPFFT_C2C, ZL * ViewN);
hipfftExecC2C(plan, thrust::raw_pointer_cast(&exProj[0]),
thrust::raw_pointer_cast(&exProj[0]),HIPFFT_FORWARD);
// Multiply with the kernel
dim3 multBlk(32,32);
dim3 multGid(
(NN + multBlk.x - 1) / multBlk.x,
(ZL * ViewN + multBlk.y - 1) / multBlk.y);
hipLaunchKernelGGL(( multiplyProjectionWithKernel), dim3(multGid),dim3(multBlk), 0, 0, thrust::raw_pointer_cast(&exProj[0]),
thrust::raw_pointer_cast(&FtS[0]), NN, ZL * ViewN);
// Back batch FFT
hipfftExecC2C(plan, thrust::raw_pointer_cast(&exProj[0]),
thrust::raw_pointer_cast(&exProj[0]),HIPFFT_BACKWARD);
// Cut the data
hipLaunchKernelGGL(( cutProjectionData), dim3(copyExpGid), dim3(copyExpBlk), 0, 0, thrust::raw_pointer_cast(&fpwd[0]),
thrust::raw_pointer_cast(&exProj[0]),
YL, NN, ZL * ViewN);
hipfftDestroy(plan);
FtS.clear();
exProj.clear();
}
//void filtering(float* hfpwd, const float* hProj, const int YL, const int ZL, const int ViewN, const float dYL)
//{
// thrust::device_vector<float> fpwd(YL * ZL * ViewN, 0);
// thrust::device_vector<float> Proj(hProj, hProj + YL * ZL * ViewN);
// filtering(fpwd,Proj, YL, ZL, ViewN, dYL);
// thrust::copy(fpwd.begin(),fpwd.end(),hfpwd);
//}
__global__ void preWeighting_ker(float* Proj,
const int YL,
const int ZL,
const int ViewN,
const float PLC,
const float ZLC,
const float dYL,
const float dZL,
const float SO)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
int v = threadIdx.z + blockIdx.z * blockDim.z;
if(j < YL && k < ZL && v < ViewN)
{
const float t = (j - PLC) * dYL;
const float b = (k - ZLC) * dZL;
const float wei = SO * SO / sqrtf(SO * SO * (SO * SO + b * b) - b * t * b * t);
Proj[(v * ZL + k) * YL + j] *= wei;
}
}
void preWeighting(
thrust::device_vector<float>& Proj,
const int YL,
const int ZL,
const int ViewN,
const float PLC,
const float ZLC,
const float dYL,
const float dZL,
const float SO)
{
dim3 blk(16,4,4);
dim3 gid(
(YL + blk.x - 1) / blk.x,
(ZL + blk.y - 1) / blk.y,
(ViewN + blk.z - 1) / blk.z);
hipLaunchKernelGGL(( preWeighting_ker), dim3(gid),dim3(blk), 0, 0,
thrust::raw_pointer_cast(&Proj[0]),
YL, ZL, ViewN, PLC, ZLC, dYL, dZL, SO);
}
//////////////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ void addressOrder(
T* proj_ZYV,
const T* proj_YZV,
const int YL, const int ZL, const int ViewN)
{
int zIdx = threadIdx.x + blockIdx.x * blockDim.x;
int yIdx = threadIdx.y + blockIdx.y * blockDim.y;
int vIdx = threadIdx.z + blockIdx.z * blockDim.z;
if(zIdx < ZL && yIdx < YL && vIdx < ViewN)
{
proj_ZYV[(vIdx * YL + yIdx) * ZL + zIdx] =
proj_YZV[(vIdx * ZL + zIdx) * YL + yIdx];
}
}
template<typename T>
__global__ void addressOrder_2(
T* proj_YZV,
const T* proj_ZYV,
const int YL, const int ZL, const int ViewN)
{
int yIdx = threadIdx.x + blockIdx.x * blockDim.x;
int zIdx = threadIdx.y + blockIdx.y * blockDim.y;
int vIdx = threadIdx.z + blockIdx.z * blockDim.z;
if(zIdx < ZL && yIdx < YL && vIdx < ViewN)
{
proj_YZV[(vIdx * ZL + zIdx) * YL + yIdx]
= proj_ZYV[(vIdx * YL + yIdx) * ZL + zIdx];
}
}
extern "C"
void filtering(float* hfpwd,
const float* hProj,
const int YL, const int ZL, const int ViewN,
const float PLC, const float ZLC,
const float dYL, const float dZL,
const float SO)
{
thrust::device_vector<float> Proj(hProj, hProj + YL * ZL * ViewN);
preWeighting(Proj, YL, ZL, ViewN, PLC,
ZLC, dYL, dZL, SO);
thrust::device_vector<float> fpwd(YL * ZL * ViewN, 0);
filtering(fpwd,Proj, YL, ZL, ViewN, dYL);
thrust::copy(fpwd.begin(),fpwd.end(),hfpwd);
fpwd.clear();
Proj.clear();
}
| beca52fe62dde351d3a54bc7ace2c3ecf8dad9ec.cu |
#include <vector>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <cufft.h>
// includes, project
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <iostream>
typedef float2 Complex;
//hsl=-2./((pi^2)*(4*n.*n-1))/dYL;
struct genKernel
{
float _dYL;
genKernel(const float _dyl):_dYL(_dyl){}
__host__ __device__ float operator()(float n) const
{
return -2.0 / (9.869604401089358 * (4.0 * n * n - 1.0)) / _dYL;
}
};
// Generate the Hilbert Filtering Kernel
__global__ void copyKernel(cufftComplex* output,const float* HS, const int NN)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < NN)
{
output[id].x = HS[id];
output[id].y = 0;
}
}
__global__ void copyKernel(Complex* output, const cufftComplex* HS, const int NN)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < NN)
{
output[id].x = HS[id].x;
output[id].y = HS[id].y;
}
}
__global__ void copyKernel(float* output, const cufftComplex* HS, const int NN)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < NN)
{
output[id] = HS[id].x;
}
}
int genHilbertKer(
thrust::device_vector<cufftComplex>& FtS,
const int YL,
const float dYL)
{
thrust::device_vector<float> n(2 * YL + 1, 0);
thrust::sequence(n.begin(),n.end(),static_cast<float>(-YL));
thrust::transform(n.begin(),n.end(),n.begin(),genKernel(dYL));
const int NN = pow(2.0,ceil(log2(YL * 3.0)));
FtS.resize(NN);
thrust::device_vector<float> HS(NN,0);
thrust::copy(n.begin(),n.begin() + YL,HS.end() - YL);
thrust::copy(n.begin()+YL,n.end(),HS.begin());
// To Do the CUFFT
cufftHandle plan;
dim3 blk(1024);
dim3 gid((NN + blk.x - 1) / blk.x);
copyKernel<<<gid,blk>>>(thrust::raw_pointer_cast(&FtS[0]),thrust::raw_pointer_cast(&HS[0]), NN);
cufftPlan1d(&plan, NN, CUFFT_C2C,1);
cufftExecC2C(plan, thrust::raw_pointer_cast(&FtS[0]), thrust::raw_pointer_cast(&FtS[0]), CUFFT_FORWARD);
cufftDestroy(plan);
HS.clear();
n.clear();
return NN;
}
__global__ void copyExpandedProjectionData(cufftComplex* output, const float* input,
const int YL, const int ZL, const int ViewN, const int NN)
{
int curPos = threadIdx.x + blockIdx.x * blockDim.x;
int curBatch = threadIdx.y + blockIdx.y * blockDim.y;
if(curPos < YL && curBatch < ZL * ViewN)
{
output[curBatch * NN + curPos].x = input[curBatch * YL + curPos];
output[curBatch * NN + curPos].y = 0;
}
}
__global__ void multiplyProjectionWithKernel(cufftComplex* proj, const cufftComplex* kernel, const int kernelLength, const int batchSize)
{
int kerIdx = threadIdx.x + blockIdx.x * blockDim.x;
int batIdx = threadIdx.y + blockIdx.y * blockDim.y;
//__shared__ float kk[KSIZE];
//kk[threadIdx.x] = kernel[kerIdx];
//__syncthreads();
if(kerIdx < kernelLength && batIdx < batchSize)
{
cufftComplex kk = kernel[kerIdx];
cufftComplex res;
res.x = proj[batIdx * kernelLength + kerIdx].x * kk.x - proj[batIdx * kernelLength + kerIdx].y * kk.y;
res.y = proj[batIdx * kernelLength + kerIdx].x * kk.y + proj[batIdx * kernelLength + kerIdx].y * kk.x;
proj[batIdx * kernelLength + kerIdx] = res;
//proj[batIdx * kernelLength + kerIdx].y *= kk;
}
}
__global__ void cutProjectionData(float* fpwd, cufftComplex* proj, const int YL, const int NN, const int batSize)
{
int curIdx = threadIdx.x + blockIdx.x * blockDim.x;
int batIdx = threadIdx.y + blockIdx.y * blockDim.y;
if(curIdx < YL && batIdx < batSize)
{
fpwd[batIdx * YL + curIdx] = proj[batIdx * NN + curIdx].x / NN;
}
}
// Let the projection data stored in the addressing order:
// 1. detector cell transversal direction (YL)
// 2. vertical direction (ZL)
// 3. view index (ViewN)
void filtering(
thrust::device_vector<float>& fpwd, // Filtered projection data
const thrust::device_vector<float>& Proj, // Projection data
const int YL, const int ZL, const int ViewN, // Size of the projection data
const float dYL)
{
thrust::device_vector<cufftComplex> FtS;
int NN = genHilbertKer(FtS, YL, dYL);
//Expand the projection data
thrust::device_vector<cufftComplex> exProj(NN * ZL * ViewN);
dim3 copyExpBlk(32,32);
dim3 copyExpGid(
(YL + copyExpBlk.x - 1) / copyExpBlk.x,
(ZL * ViewN + copyExpBlk.y - 1) / copyExpBlk.y);
copyExpandedProjectionData<<<copyExpGid, copyExpBlk>>>(
thrust::raw_pointer_cast(&exProj[0]),
thrust::raw_pointer_cast(&Proj[0]),
YL, ZL, ViewN, NN);
// Forward Batch FFT
cufftHandle plan;
cufftPlan1d(&plan, NN, CUFFT_C2C, ZL * ViewN);
cufftExecC2C(plan, thrust::raw_pointer_cast(&exProj[0]),
thrust::raw_pointer_cast(&exProj[0]),CUFFT_FORWARD);
// Multiply with the kernel
dim3 multBlk(32,32);
dim3 multGid(
(NN + multBlk.x - 1) / multBlk.x,
(ZL * ViewN + multBlk.y - 1) / multBlk.y);
multiplyProjectionWithKernel<<<multGid,multBlk>>>(thrust::raw_pointer_cast(&exProj[0]),
thrust::raw_pointer_cast(&FtS[0]), NN, ZL * ViewN);
// Back batch FFT
cufftExecC2C(plan, thrust::raw_pointer_cast(&exProj[0]),
thrust::raw_pointer_cast(&exProj[0]),CUFFT_INVERSE);
// Cut the data
cutProjectionData<<<copyExpGid, copyExpBlk>>>(thrust::raw_pointer_cast(&fpwd[0]),
thrust::raw_pointer_cast(&exProj[0]),
YL, NN, ZL * ViewN);
cufftDestroy(plan);
FtS.clear();
exProj.clear();
}
//void filtering(float* hfpwd, const float* hProj, const int YL, const int ZL, const int ViewN, const float dYL)
//{
// thrust::device_vector<float> fpwd(YL * ZL * ViewN, 0);
// thrust::device_vector<float> Proj(hProj, hProj + YL * ZL * ViewN);
// filtering(fpwd,Proj, YL, ZL, ViewN, dYL);
// thrust::copy(fpwd.begin(),fpwd.end(),hfpwd);
//}
__global__ void preWeighting_ker(float* Proj,
const int YL,
const int ZL,
const int ViewN,
const float PLC,
const float ZLC,
const float dYL,
const float dZL,
const float SO)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
int v = threadIdx.z + blockIdx.z * blockDim.z;
if(j < YL && k < ZL && v < ViewN)
{
const float t = (j - PLC) * dYL;
const float b = (k - ZLC) * dZL;
const float wei = SO * SO / sqrtf(SO * SO * (SO * SO + b * b) - b * t * b * t);
Proj[(v * ZL + k) * YL + j] *= wei;
}
}
void preWeighting(
thrust::device_vector<float>& Proj,
const int YL,
const int ZL,
const int ViewN,
const float PLC,
const float ZLC,
const float dYL,
const float dZL,
const float SO)
{
dim3 blk(16,4,4);
dim3 gid(
(YL + blk.x - 1) / blk.x,
(ZL + blk.y - 1) / blk.y,
(ViewN + blk.z - 1) / blk.z);
preWeighting_ker<<<gid,blk>>>(
thrust::raw_pointer_cast(&Proj[0]),
YL, ZL, ViewN, PLC, ZLC, dYL, dZL, SO);
}
//////////////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ void addressOrder(
T* proj_ZYV,
const T* proj_YZV,
const int YL, const int ZL, const int ViewN)
{
int zIdx = threadIdx.x + blockIdx.x * blockDim.x;
int yIdx = threadIdx.y + blockIdx.y * blockDim.y;
int vIdx = threadIdx.z + blockIdx.z * blockDim.z;
if(zIdx < ZL && yIdx < YL && vIdx < ViewN)
{
proj_ZYV[(vIdx * YL + yIdx) * ZL + zIdx] =
proj_YZV[(vIdx * ZL + zIdx) * YL + yIdx];
}
}
template<typename T>
__global__ void addressOrder_2(
T* proj_YZV,
const T* proj_ZYV,
const int YL, const int ZL, const int ViewN)
{
int yIdx = threadIdx.x + blockIdx.x * blockDim.x;
int zIdx = threadIdx.y + blockIdx.y * blockDim.y;
int vIdx = threadIdx.z + blockIdx.z * blockDim.z;
if(zIdx < ZL && yIdx < YL && vIdx < ViewN)
{
proj_YZV[(vIdx * ZL + zIdx) * YL + yIdx]
= proj_ZYV[(vIdx * YL + yIdx) * ZL + zIdx];
}
}
extern "C"
void filtering(float* hfpwd,
const float* hProj,
const int YL, const int ZL, const int ViewN,
const float PLC, const float ZLC,
const float dYL, const float dZL,
const float SO)
{
thrust::device_vector<float> Proj(hProj, hProj + YL * ZL * ViewN);
preWeighting(Proj, YL, ZL, ViewN, PLC,
ZLC, dYL, dZL, SO);
thrust::device_vector<float> fpwd(YL * ZL * ViewN, 0);
filtering(fpwd,Proj, YL, ZL, ViewN, dYL);
thrust::copy(fpwd.begin(),fpwd.end(),hfpwd);
fpwd.clear();
Proj.clear();
}
|
9b556bc10e3e412520d0b65898ca0af5898263e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "chainerx/cuda/cuda_device.h"
#include <cstdint>
#include <mutex>
#include <type_traits>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <cuda_fp16.hpp>
#include "chainerx/array.h"
#include "chainerx/backend_util.h"
#include "chainerx/cuda/rocblas.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/data_type.cuh"
#include "chainerx/cuda/float16.cuh"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/error.h"
#include "chainerx/float16.h"
#include "chainerx/macro.h"
#include "chainerx/routines/creation.h"
namespace chainerx {
namespace cuda {
namespace {
// Dispatch gemm routines based on the element type T
template <typename T>
struct Gemm;
template <>
struct Gemm<float> {
template <typename... Args>
void operator()(Args&&... args) const {
CheckCublasError(hipblasSgemm(std::forward<Args>(args)...));
}
};
template <>
struct Gemm<double> {
template <typename... Args>
void operator()(Args&&... args) const {
CheckCublasError(hipblasDgemm(std::forward<Args>(args)...));
}
};
struct GemmInputLayout {
int64_t ld = 0;
hipblasOperation_t trans = HIPBLAS_OP_T;
// Configure leading dimension and transposition accordingly, and makes the array C contiguous if necessary.
Array Configure(const Array& a) {
CHAINERX_ASSERT(a.ndim() == 2);
// Row-major
// Note that this condition is slightly relaxed than Array::IsContiguous() which requires
// a.strides()[0] == a.GetItemSize() * a.shape()[1]
if (a.strides()[1] == a.GetItemSize() && a.strides()[0] / a.GetItemSize() >= a.shape()[1] &&
a.strides()[0] % a.GetItemSize() == 0) {
ld = a.strides()[0] / a.GetItemSize();
trans = HIPBLAS_OP_N; // transposed
return a;
}
// Column-major
if (a.strides()[0] == a.GetItemSize() && a.strides()[1] / a.GetItemSize() >= a.shape()[0] &&
a.strides()[1] % a.GetItemSize() == 0) {
ld = a.strides()[1] / a.GetItemSize();
return a;
}
// Force row-major contiguous
ld = a.shape()[1];
trans = HIPBLAS_OP_N; // transposed
return internal::AsContiguous(a);
}
};
} // namespace
void CudaDevice::Dot(const Array& a, const Array& b, const Array& out) {
CheckDevicesCompatible(a, b, out);
CudaSetDeviceScope scope{index()};
if (GetKind(out.dtype()) != DtypeKind::kFloat) {
throw NotImplementedError("dot is not implemented for non-float types in CUDA");
}
CHAINERX_ASSERT(a.ndim() == 2);
CHAINERX_ASSERT(b.ndim() == 2);
CHAINERX_ASSERT(out.ndim() == 2);
int64_t m = a.shape()[0];
int64_t k = a.shape()[1];
int64_t n = b.shape()[1];
CHAINERX_ASSERT(b.shape()[0] == k);
CHAINERX_ASSERT(out.shape()[0] == m);
CHAINERX_ASSERT(out.shape()[1] == n);
if (m == 1 && n == 1) {
// TODO(beam2d): Write a custom reduction kernel.
// TODO(hvy): Avoid unnecessary cast here when multiplication supports mixed dtypes.
const Array& a_cast = a.dtype() == out.dtype() ? a : a.AsType(out.dtype());
const Array& b_cast = b.dtype() == out.dtype() ? b : b.AsType(out.dtype());
Sum(a_cast.Reshape({k}) * b_cast.Reshape({k}), {0}, out.Reshape({}));
return;
}
if (out.dtype() == Dtype::kFloat16) {
// TODO(imanishi): Use hipblasHgemm
Array out_float32 = Empty(out.shape(), Dtype::kFloat32, *this);
Dot(a.AsType(Dtype::kFloat32), b.AsType(Dtype::kFloat32), out_float32);
AsType(out_float32, out);
return;
}
bool is_out_contiguous = out.IsContiguous();
Array out_contiguous = is_out_contiguous ? out : EmptyLike(out, *this);
const Array& a_cast = a.dtype() == out.dtype() ? a : a.AsType(out.dtype());
const Array& b_cast = b.dtype() == out.dtype() ? b : b.AsType(out.dtype());
auto gemm_impl = [&](auto pt) {
CHAINERX_ASSERT(a_cast.dtype() == out_contiguous.dtype());
CHAINERX_ASSERT(b_cast.dtype() == out_contiguous.dtype());
using T = typename decltype(pt)::type;
using StorageType = cuda_internal::StorageType<T>;
using CudaType = cuda_internal::DataType<T>;
// Note that cuBLAS uses Fortran order.
// To compute out = a x b, we use cuBLAS to compute out^T = b^T x a^T (here x is the matrix product).
GemmInputLayout a_cast_layout;
GemmInputLayout b_cast_layout;
Array a_cast_config = a_cast_layout.Configure(a_cast);
Array b_cast_config = b_cast_layout.Configure(b_cast);
const CudaType one{chainerx::Float16{1}};
const CudaType zero{chainerx::Float16{0}};
const CudaType* a_cast_ptr =
&cuda_internal::StorageToDataType<const T>(*static_cast<const StorageType*>(internal::GetRawOffsetData(a_cast_config)));
const CudaType* b_cast_ptr =
&cuda_internal::StorageToDataType<const T>(*static_cast<const StorageType*>(internal::GetRawOffsetData(b_cast_config)));
CudaType* out_ptr = &cuda_internal::StorageToDataType<T>(*static_cast<StorageType*>(internal::GetRawOffsetData(out_contiguous)));
std::lock_guard<std::mutex> lock{cublas_handle_mutex_};
Gemm<T>{}(
cublas_handle(),
b_cast_layout.trans,
a_cast_layout.trans,
n,
m,
k,
&one,
b_cast_ptr,
b_cast_layout.ld,
a_cast_ptr,
a_cast_layout.ld,
&zero,
out_ptr,
n);
};
switch (out.dtype()) {
case Dtype::kFloat32:
gemm_impl(PrimitiveType<float>{});
break;
case Dtype::kFloat64:
gemm_impl(PrimitiveType<double>{});
break;
default:
CHAINERX_NEVER_REACH();
}
if (!is_out_contiguous) {
Copy(out_contiguous, out);
}
}
} // namespace cuda
} // namespace chainerx
| 9b556bc10e3e412520d0b65898ca0af5898263e9.cu | #include "chainerx/cuda/cuda_device.h"
#include <cstdint>
#include <mutex>
#include <type_traits>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <cuda_fp16.hpp>
#include "chainerx/array.h"
#include "chainerx/backend_util.h"
#include "chainerx/cuda/cublas.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/data_type.cuh"
#include "chainerx/cuda/float16.cuh"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/error.h"
#include "chainerx/float16.h"
#include "chainerx/macro.h"
#include "chainerx/routines/creation.h"
namespace chainerx {
namespace cuda {
namespace {
// Dispatch gemm routines based on the element type T
template <typename T>
struct Gemm;
template <>
struct Gemm<float> {
template <typename... Args>
void operator()(Args&&... args) const {
CheckCublasError(cublasSgemm(std::forward<Args>(args)...));
}
};
template <>
struct Gemm<double> {
template <typename... Args>
void operator()(Args&&... args) const {
CheckCublasError(cublasDgemm(std::forward<Args>(args)...));
}
};
struct GemmInputLayout {
int64_t ld = 0;
cublasOperation_t trans = CUBLAS_OP_T;
// Configure leading dimension and transposition accordingly, and makes the array C contiguous if necessary.
Array Configure(const Array& a) {
CHAINERX_ASSERT(a.ndim() == 2);
// Row-major
// Note that this condition is slightly relaxed than Array::IsContiguous() which requires
// a.strides()[0] == a.GetItemSize() * a.shape()[1]
if (a.strides()[1] == a.GetItemSize() && a.strides()[0] / a.GetItemSize() >= a.shape()[1] &&
a.strides()[0] % a.GetItemSize() == 0) {
ld = a.strides()[0] / a.GetItemSize();
trans = CUBLAS_OP_N; // transposed
return a;
}
// Column-major
if (a.strides()[0] == a.GetItemSize() && a.strides()[1] / a.GetItemSize() >= a.shape()[0] &&
a.strides()[1] % a.GetItemSize() == 0) {
ld = a.strides()[1] / a.GetItemSize();
return a;
}
// Force row-major contiguous
ld = a.shape()[1];
trans = CUBLAS_OP_N; // transposed
return internal::AsContiguous(a);
}
};
} // namespace
void CudaDevice::Dot(const Array& a, const Array& b, const Array& out) {
CheckDevicesCompatible(a, b, out);
CudaSetDeviceScope scope{index()};
if (GetKind(out.dtype()) != DtypeKind::kFloat) {
throw NotImplementedError("dot is not implemented for non-float types in CUDA");
}
CHAINERX_ASSERT(a.ndim() == 2);
CHAINERX_ASSERT(b.ndim() == 2);
CHAINERX_ASSERT(out.ndim() == 2);
int64_t m = a.shape()[0];
int64_t k = a.shape()[1];
int64_t n = b.shape()[1];
CHAINERX_ASSERT(b.shape()[0] == k);
CHAINERX_ASSERT(out.shape()[0] == m);
CHAINERX_ASSERT(out.shape()[1] == n);
if (m == 1 && n == 1) {
// TODO(beam2d): Write a custom reduction kernel.
// TODO(hvy): Avoid unnecessary cast here when multiplication supports mixed dtypes.
const Array& a_cast = a.dtype() == out.dtype() ? a : a.AsType(out.dtype());
const Array& b_cast = b.dtype() == out.dtype() ? b : b.AsType(out.dtype());
Sum(a_cast.Reshape({k}) * b_cast.Reshape({k}), {0}, out.Reshape({}));
return;
}
if (out.dtype() == Dtype::kFloat16) {
// TODO(imanishi): Use cublasHgemm
Array out_float32 = Empty(out.shape(), Dtype::kFloat32, *this);
Dot(a.AsType(Dtype::kFloat32), b.AsType(Dtype::kFloat32), out_float32);
AsType(out_float32, out);
return;
}
bool is_out_contiguous = out.IsContiguous();
Array out_contiguous = is_out_contiguous ? out : EmptyLike(out, *this);
const Array& a_cast = a.dtype() == out.dtype() ? a : a.AsType(out.dtype());
const Array& b_cast = b.dtype() == out.dtype() ? b : b.AsType(out.dtype());
auto gemm_impl = [&](auto pt) {
CHAINERX_ASSERT(a_cast.dtype() == out_contiguous.dtype());
CHAINERX_ASSERT(b_cast.dtype() == out_contiguous.dtype());
using T = typename decltype(pt)::type;
using StorageType = cuda_internal::StorageType<T>;
using CudaType = cuda_internal::DataType<T>;
// Note that cuBLAS uses Fortran order.
// To compute out = a x b, we use cuBLAS to compute out^T = b^T x a^T (here x is the matrix product).
GemmInputLayout a_cast_layout;
GemmInputLayout b_cast_layout;
Array a_cast_config = a_cast_layout.Configure(a_cast);
Array b_cast_config = b_cast_layout.Configure(b_cast);
const CudaType one{chainerx::Float16{1}};
const CudaType zero{chainerx::Float16{0}};
const CudaType* a_cast_ptr =
&cuda_internal::StorageToDataType<const T>(*static_cast<const StorageType*>(internal::GetRawOffsetData(a_cast_config)));
const CudaType* b_cast_ptr =
&cuda_internal::StorageToDataType<const T>(*static_cast<const StorageType*>(internal::GetRawOffsetData(b_cast_config)));
CudaType* out_ptr = &cuda_internal::StorageToDataType<T>(*static_cast<StorageType*>(internal::GetRawOffsetData(out_contiguous)));
std::lock_guard<std::mutex> lock{cublas_handle_mutex_};
Gemm<T>{}(
cublas_handle(),
b_cast_layout.trans,
a_cast_layout.trans,
n,
m,
k,
&one,
b_cast_ptr,
b_cast_layout.ld,
a_cast_ptr,
a_cast_layout.ld,
&zero,
out_ptr,
n);
};
switch (out.dtype()) {
case Dtype::kFloat32:
gemm_impl(PrimitiveType<float>{});
break;
case Dtype::kFloat64:
gemm_impl(PrimitiveType<double>{});
break;
default:
CHAINERX_NEVER_REACH();
}
if (!is_out_contiguous) {
Copy(out_contiguous, out);
}
}
} // namespace cuda
} // namespace chainerx
|
5dc02a49829b05578ef5b4b67dff04b24a6ab289.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sync_conv_groups.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sync_conv_groups), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sync_conv_groups), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sync_conv_groups), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5dc02a49829b05578ef5b4b67dff04b24a6ab289.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sync_conv_groups.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sync_conv_groups<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sync_conv_groups<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sync_conv_groups<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
8b3fe1974fd39fd2bc4a5dafa41960280119fb2a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* University of Illinois Open Source License
* Copyright 2010 Luthey-Schulten Group,
* All rights reserved.
*
* Developed by: Luthey-Schulten Group
* University of Illinois at Urbana-Champaign
* http://www.scs.uiuc.edu/~schulten
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the Software), to deal with
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to
* do so, subject to the following conditions:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimers in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the names of the Luthey-Schulten Group, University of Illinois at
* Urbana-Champaign, nor the names of its contributors may be used to endorse or
* promote products derived from this Software without specific prior written
* permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS WITH THE SOFTWARE.
*
* Author(s): Elijah Roberts
*/
#include "lm/Cuda.h"
#define LS_WORDS_PER_SITE 2
#define LS_APRON_SIZE 3
#define LS_X_BLOCK_MAX_X_SIZE 128
#define LS_Y_BLOCK_X_SIZE 16
#define LS_Y_BLOCK_Y_SIZE 8
#define LS_Z_BLOCK_X_SIZE 16
#define LS_Z_BLOCK_Z_SIZE 8
#define LS_BOUNDARY_PERIODIC 1
#include "lm/rdme/dev/lattice_sim_1d_dev.cu"
__global__ void cu_CopyXWindowSitesPeriodic_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize);
__global__ void cu_CopyXWindowApronsPeriodic_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize);
void cu_CopyXWindowSitesPeriodic(unsigned int * host_inLattice, unsigned int * host_outLattice, unsigned int latticeXSize, unsigned int latticeYSize, unsigned int latticeZSize)
{
void* inLattice;
void* outLattice;
CUDA_EXCEPTION_CHECK(hipMalloc(&inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMalloc(&outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMemcpy(inLattice, host_inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), hipMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(hipMemset(outLattice, 0xFF, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
unsigned int gridXSize;
dim3 gridSize, threadBlockSize;
calculateXLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, LS_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeYSize, latticeZSize);
hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((cu_CopyXWindowSitesPeriodic_kernel), dim3(gridSize),dim3(threadBlockSize), 0, 0, (unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeYSize, latticeXSize*latticeYSize*latticeZSize)));
CUDA_EXCEPTION_CHECK(hipStreamSynchronize(0));
CUDA_EXCEPTION_CHECK(hipMemcpy(host_outLattice, outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), hipMemcpyDeviceToHost));
CUDA_EXCEPTION_CHECK(hipFree(outLattice));
CUDA_EXCEPTION_CHECK(hipFree(inLattice));
}
__global__ void cu_CopyXWindowSitesPeriodic_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize)
{
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeXIndex = (bx*blockDim.x) + threadIdx.x;
unsigned int latticeIndex = (bz*latticeXSize*latticeYSize) + (by*latticeXSize) + latticeXIndex;
unsigned int windowIndex = threadIdx.x+LS_APRON_SIZE;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_X_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyXWindowFromLattice(bx, inLattice, window, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex);
// Copy the x window from shared memory to device memory.
copyXWindowToLattice(outLattice, window, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex);
}
void cu_CopyXWindowApronsPeriodic(unsigned int * host_inLattice, unsigned int * host_outLattice, unsigned int latticeXSize, unsigned int latticeYSize, unsigned int latticeZSize)
{
void* inLattice;
void* outLattice;
CUDA_EXCEPTION_CHECK(hipMalloc(&inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMalloc(&outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMemcpy(inLattice, host_inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), hipMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(hipMemset(outLattice, 0xFF, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
unsigned int gridXSize;
dim3 gridSize, threadBlockSize;
calculateXLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, LS_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeYSize, latticeZSize);
hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((cu_CopyXWindowApronsPeriodic_kernel), dim3(gridSize),dim3(threadBlockSize), 0, 0, (unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeYSize, latticeXSize*latticeYSize*latticeZSize)));
CUDA_EXCEPTION_CHECK(hipStreamSynchronize(0));
CUDA_EXCEPTION_CHECK(hipMemcpy(host_outLattice, outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), hipMemcpyDeviceToHost));
CUDA_EXCEPTION_CHECK(hipFree(outLattice));
CUDA_EXCEPTION_CHECK(hipFree(inLattice));
}
__global__ void cu_CopyXWindowApronsPeriodic_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize)
{
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeXIndex = (bx*blockDim.x) + threadIdx.x;
unsigned int latticeIndex = (bz*latticeXSize*latticeYSize) + (by*latticeXSize) + latticeXIndex;
unsigned int windowIndex = threadIdx.x+LS_APRON_SIZE;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_X_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyXWindowFromLattice(bx, inLattice, window, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex);
__syncthreads();
if (latticeXIndex < latticeXSize)
{
outLattice[latticeIndex] = 0;
outLattice[latticeIndex+latticeXYZSize] = 0;
// If this is the first part of the block, load the leading apron.
if (windowIndex < 2*LS_APRON_SIZE)
{
outLattice[latticeIndex] = window[windowIndex-LS_APRON_SIZE];
outLattice[latticeIndex+latticeXYZSize] = window[windowIndex-LS_APRON_SIZE+LS_X_WINDOW_SIZE];
}
// If this is the last part of the block, load the trailing apron.
if (windowIndex >= blockDim.x)
{
outLattice[latticeIndex] = window[windowIndex+LS_APRON_SIZE];
outLattice[latticeIndex+latticeXYZSize] = window[windowIndex+LS_APRON_SIZE+LS_X_WINDOW_SIZE];
}
}
}
| 8b3fe1974fd39fd2bc4a5dafa41960280119fb2a.cu | /*
* University of Illinois Open Source License
* Copyright 2010 Luthey-Schulten Group,
* All rights reserved.
*
* Developed by: Luthey-Schulten Group
* University of Illinois at Urbana-Champaign
* http://www.scs.uiuc.edu/~schulten
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the Software), to deal with
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to
* do so, subject to the following conditions:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimers in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the names of the Luthey-Schulten Group, University of Illinois at
* Urbana-Champaign, nor the names of its contributors may be used to endorse or
* promote products derived from this Software without specific prior written
* permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS WITH THE SOFTWARE.
*
* Author(s): Elijah Roberts
*/
#include "lm/Cuda.h"
#define LS_WORDS_PER_SITE 2
#define LS_APRON_SIZE 3
#define LS_X_BLOCK_MAX_X_SIZE 128
#define LS_Y_BLOCK_X_SIZE 16
#define LS_Y_BLOCK_Y_SIZE 8
#define LS_Z_BLOCK_X_SIZE 16
#define LS_Z_BLOCK_Z_SIZE 8
#define LS_BOUNDARY_PERIODIC 1
#include "lm/rdme/dev/lattice_sim_1d_dev.cu"
__global__ void cu_CopyXWindowSitesPeriodic_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize);
__global__ void cu_CopyXWindowApronsPeriodic_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize);
void cu_CopyXWindowSitesPeriodic(unsigned int * host_inLattice, unsigned int * host_outLattice, unsigned int latticeXSize, unsigned int latticeYSize, unsigned int latticeZSize)
{
void* inLattice;
void* outLattice;
CUDA_EXCEPTION_CHECK(cudaMalloc(&inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMalloc(&outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMemcpy(inLattice, host_inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), cudaMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(cudaMemset(outLattice, 0xFF, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
unsigned int gridXSize;
dim3 gridSize, threadBlockSize;
calculateXLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, LS_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeYSize, latticeZSize);
CUDA_EXCEPTION_EXECUTE((cu_CopyXWindowSitesPeriodic_kernel<<<gridSize,threadBlockSize>>>((unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeYSize, latticeXSize*latticeYSize*latticeZSize)));
CUDA_EXCEPTION_CHECK(cudaStreamSynchronize(0));
CUDA_EXCEPTION_CHECK(cudaMemcpy(host_outLattice, outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), cudaMemcpyDeviceToHost));
CUDA_EXCEPTION_CHECK(cudaFree(outLattice));
CUDA_EXCEPTION_CHECK(cudaFree(inLattice));
}
__global__ void cu_CopyXWindowSitesPeriodic_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize)
{
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeXIndex = (bx*blockDim.x) + threadIdx.x;
unsigned int latticeIndex = (bz*latticeXSize*latticeYSize) + (by*latticeXSize) + latticeXIndex;
unsigned int windowIndex = threadIdx.x+LS_APRON_SIZE;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_X_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyXWindowFromLattice(bx, inLattice, window, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex);
// Copy the x window from shared memory to device memory.
copyXWindowToLattice(outLattice, window, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex);
}
void cu_CopyXWindowApronsPeriodic(unsigned int * host_inLattice, unsigned int * host_outLattice, unsigned int latticeXSize, unsigned int latticeYSize, unsigned int latticeZSize)
{
void* inLattice;
void* outLattice;
CUDA_EXCEPTION_CHECK(cudaMalloc(&inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMalloc(&outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMemcpy(inLattice, host_inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), cudaMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(cudaMemset(outLattice, 0xFF, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
unsigned int gridXSize;
dim3 gridSize, threadBlockSize;
calculateXLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, LS_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeYSize, latticeZSize);
CUDA_EXCEPTION_EXECUTE((cu_CopyXWindowApronsPeriodic_kernel<<<gridSize,threadBlockSize>>>((unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeYSize, latticeXSize*latticeYSize*latticeZSize)));
CUDA_EXCEPTION_CHECK(cudaStreamSynchronize(0));
CUDA_EXCEPTION_CHECK(cudaMemcpy(host_outLattice, outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), cudaMemcpyDeviceToHost));
CUDA_EXCEPTION_CHECK(cudaFree(outLattice));
CUDA_EXCEPTION_CHECK(cudaFree(inLattice));
}
__global__ void cu_CopyXWindowApronsPeriodic_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize)
{
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeXIndex = (bx*blockDim.x) + threadIdx.x;
unsigned int latticeIndex = (bz*latticeXSize*latticeYSize) + (by*latticeXSize) + latticeXIndex;
unsigned int windowIndex = threadIdx.x+LS_APRON_SIZE;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_X_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyXWindowFromLattice(bx, inLattice, window, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex);
__syncthreads();
if (latticeXIndex < latticeXSize)
{
outLattice[latticeIndex] = 0;
outLattice[latticeIndex+latticeXYZSize] = 0;
// If this is the first part of the block, load the leading apron.
if (windowIndex < 2*LS_APRON_SIZE)
{
outLattice[latticeIndex] = window[windowIndex-LS_APRON_SIZE];
outLattice[latticeIndex+latticeXYZSize] = window[windowIndex-LS_APRON_SIZE+LS_X_WINDOW_SIZE];
}
// If this is the last part of the block, load the trailing apron.
if (windowIndex >= blockDim.x)
{
outLattice[latticeIndex] = window[windowIndex+LS_APRON_SIZE];
outLattice[latticeIndex+latticeXYZSize] = window[windowIndex+LS_APRON_SIZE+LS_X_WINDOW_SIZE];
}
}
}
|
08fe5b31b2d1d970b3a1f341eaa97e5e3350bd67.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <exception>
#include <math.h> // for isnan, isinf; cmath does not work here
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/count.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "NVStringsImpl.h"
#include "custring_view.cuh"
//
int NVStrings::hash(unsigned int* results, bool todevice)
{
unsigned int count = size();
if( count==0 || results==0 )
return -1;
auto execpol = rmm::exec_policy(0);
unsigned int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(unsigned int),0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->hash();
else
d_rtn[idx] = 0;
});
//
int zeros = thrust::count(execpol->on(0),d_rtn,d_rtn+count,0);
if( !todevice )
{
hipMemcpy(results,d_rtn,sizeof(float)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)count-zeros;
}
//
int NVStrings::stoi(int* results, bool todevice)
{
unsigned int count = size();
if( count==0 || results==0 )
return -1;
auto execpol = rmm::exec_policy(0);
int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(int),0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->stoi();
else
d_rtn[idx] = 0;
});
//
int zeros = thrust::count(execpol->on(0),d_rtn,d_rtn+count,0);
if( !todevice )
{
hipMemcpy(results,d_rtn,sizeof(int)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)count-zeros;
}
//
int NVStrings::stol(long* results, bool todevice)
{
unsigned int count = size();
if( count==0 || results==0 )
return -1;
auto execpol = rmm::exec_policy(0);
long* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(long),0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->stol();
else
d_rtn[idx] = 0L;
});
//
int zeros = thrust::count(execpol->on(0),d_rtn,d_rtn+count,0);
if( !todevice )
{
hipMemcpy(results,d_rtn,sizeof(long)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)count-zeros;
}
//
int NVStrings::stof(float* results, bool todevice)
{
unsigned int count = size();
if( count==0 || results==0 )
return -1;
auto execpol = rmm::exec_policy(0);
float* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(float),0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->stof();
else
d_rtn[idx] = (float)0;
});
//
int zeros = thrust::count(execpol->on(0),d_rtn,d_rtn+count,0);
if( !todevice )
{
hipMemcpy(results,d_rtn,sizeof(float)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)count-zeros;
}
//
int NVStrings::stod(double* results, bool todevice)
{
unsigned int count = size();
if( count==0 || results==0 )
return -1;
auto execpol = rmm::exec_policy(0);
double* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(double),0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->stod();
else
d_rtn[idx] = 0.0;
});
//
int zeros = thrust::count(execpol->on(0),d_rtn,d_rtn+count,0);
if( !todevice )
{
hipMemcpy(results,d_rtn,sizeof(double)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)count-zeros;
}
//
int NVStrings::htoi(unsigned int* results, bool todevice)
{
unsigned int count = size();
if( count==0 || results==0 )
return -1;
auto execpol = rmm::exec_policy(0);
unsigned int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(unsigned int),0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr || dstr->empty() )
{
d_rtn[idx] = 0;
return;
}
long result = 0, base = 1;
const char* str = dstr->data();
int len = dstr->size()-1;
for( int i=len; i >= 0; --i )
{
char ch = str[i];
if( ch >= '0' && ch <= '9' )
{
result += (long)(ch-48) * base;
base *= 16;
}
else if( ch >= 'A' && ch <= 'Z' )
{
result += (long)(ch-55) * base;
base *= 16;
}
else if( ch >= 'a' && ch <= 'z' )
{
result += (long)(ch-87) * base;
base *= 16;
}
}
d_rtn[idx] = (unsigned int)result;
});
//
int zeros = thrust::count(execpol->on(0),d_rtn,d_rtn+count,0);
if( !todevice )
{
hipMemcpy(results,d_rtn,sizeof(unsigned int)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)count-zeros;
}
// build strings from given integers
NVStrings* NVStrings::itos(const int* values, unsigned int count, const unsigned char* nullbitmask, bool bdevmem)
{
if( values==0 || count==0 )
throw std::invalid_argument("nvstrings::itos values or count invalid");
auto execpol = rmm::exec_policy(0);
NVStrings* rtn = new NVStrings(count);
int* d_values = (int*)values;
unsigned char* d_nulls = (unsigned char*)nullbitmask;
if( !bdevmem )
{
RMM_ALLOC(&d_values,count*sizeof(int),0);
hipMemcpy(d_values,values,count*sizeof(int),hipMemcpyHostToDevice);
if( nullbitmask )
{
RMM_ALLOC(&d_nulls,((count+7)/8)*sizeof(unsigned char),0);
hipMemcpy(d_nulls,nullbitmask,((count+7)/8)*sizeof(unsigned char),hipMemcpyHostToDevice);
}
}
// compute size of memory we'll need
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_values, d_nulls, d_sizes] __device__ (unsigned int idx) {
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) )
{
d_sizes[idx] = 0;
return;
}
int value = d_values[idx];
int size = custring_view::ltos_size(value);
d_sizes[idx] = ALIGN_SIZE(size);
});
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// build strings from integers
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
custring_view_array d_strings = rtn->pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_buffer, d_offsets, d_nulls, d_values, d_strings] __device__(unsigned int idx){
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec
{
d_strings[idx] = nullptr;
return;
}
char* str = d_buffer + d_offsets[idx];
int value = d_values[idx];
d_strings[idx] = custring_view::ltos(value,str);
});
//
if( !bdevmem )
RMM_FREE(d_values,0);
return rtn;
}
NVStrings* NVStrings::ltos(const long* values, unsigned int count, const unsigned char* nullbitmask, bool bdevmem)
{
if( values==0 || count==0 )
throw std::invalid_argument("nvstrings::ltos values or count invalid");
auto execpol = rmm::exec_policy(0);
NVStrings* rtn = new NVStrings(count);
long* d_values = (long*)values;
unsigned char* d_nulls = (unsigned char*)nullbitmask;
if( !bdevmem )
{
RMM_ALLOC(&d_values,count*sizeof(long),0);
hipMemcpy(d_values,values,count*sizeof(long),hipMemcpyHostToDevice);
if( nullbitmask )
{
RMM_ALLOC(&d_nulls,((count+7)/8)*sizeof(unsigned char),0);
hipMemcpy(d_nulls,nullbitmask,((count+7)/8)*sizeof(unsigned char),hipMemcpyHostToDevice);
}
}
// compute size of memory we'll need
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_values, d_nulls, d_sizes] __device__ (unsigned int idx) {
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) )
{
d_sizes[idx] = 0;
return;
}
long value = d_values[idx];
int size = custring_view::ltos_size(value);
d_sizes[idx] = ALIGN_SIZE(size);
});
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// build strings from integers
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
custring_view_array d_strings = rtn->pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_buffer, d_offsets, d_nulls, d_values, d_strings] __device__(unsigned int idx){
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec
{
d_strings[idx] = nullptr;
return;
}
char* str = d_buffer + d_offsets[idx];
long value = d_values[idx];
d_strings[idx] = custring_view::ltos(value,str);
});
//
if( !bdevmem )
RMM_FREE(d_values,0);
return rtn;
}
struct ftos_converter
{
// significant digits is independent of scientific notation range
// digits more than this may require using long values instead of ints
const unsigned int significant_digits = 10;
// maximum power-of-10 that will fit in 32-bits
const unsigned int nine_digits = 1000000000; // 1x10^9
// Range of numbers here is for normalizing the value.
// If the value is above or below the following limits, the output is converted to
// scientific notation in order to show (at most) the number of significant digits.
const double upper_limit = 1000000000; // max is 1x10^9
const double lower_limit = 0.0001; // printf uses scientific notation below this
// Tables for doing normalization: converting to exponent form
// IEEE double float has maximum exponent of 305 so these should cover everthing
const double upper10[9] = { 10, 100, 10000, 1e8, 1e16, 1e32, 1e64, 1e128, 1e256 };
const double lower10[9] = { .1, .01, .0001, 1e-8, 1e-16, 1e-32, 1e-64, 1e-128, 1e-256 };
const double blower10[9] = { 1.0, .1, .001, 1e-7, 1e-15, 1e-31, 1e-63, 1e-127, 1e-255 };
// utility for quickly converting known integer range to character array
__device__ char* int2str( int value, char* output )
{
if( value==0 )
{
*output++ = '0';
return output;
}
char buffer[10]; // should be big-enough for 10 significant digits
char* ptr = buffer;
while( value > 0 )
{
*ptr++ = (char)('0' + (value % 10));
value /= 10;
}
while( ptr != buffer )
*output++ = *--ptr; // 54321 -> 12345
return output;
}
//
// dissect value into parts
// return decimal_places
__device__ int dissect_value( double value, unsigned int& integer, unsigned int& decimal, int& exp10 )
{
// dissect float into parts
int decimal_places = significant_digits-1;
// normalize step puts value between lower-limit and upper-limit
// by adjusting the exponent up or down
exp10 = 0;
if( value > upper_limit )
{
int fx = 256;
for( int idx=8; idx >= 0; --idx )
{
if( value >= upper10[idx] )
{
value *= lower10[idx];
exp10 += fx;
}
fx = fx >> 1;
}
}
else if( (value > 0.0) && (value < lower_limit) )
{
int fx = 256;
for( int idx=8; idx >= 0; --idx )
{
if( value < blower10[idx] )
{
value *= upper10[idx];
exp10 -= fx;
}
fx = fx >> 1;
}
}
//
unsigned int max_digits = nine_digits;
integer = (unsigned int)value;
for( unsigned int i=integer; i >= 10; i/=10 )
{
--decimal_places;
max_digits /= 10;
}
double remainder = (value - (double)integer) * (double)max_digits;
//printf("remainder=%g,value=%g,integer=%u,sd=%u\n",remainder,value,integer,max_digits);
decimal = (unsigned int)remainder;
remainder -= (double)decimal;
//printf("remainder=%g,decimal=%u\n",remainder,decimal);
decimal += (unsigned int)(2.0*remainder);
if( decimal >= max_digits )
{
decimal = 0;
++integer;
if( exp10 && (integer >= 10) )
{
++exp10;
integer = 1;
}
}
//
while( (decimal % 10)==0 && (decimal_places > 0) )
{
decimal /= 10;
--decimal_places;
}
return decimal_places;
}
//
// Converts value to string into output
// Output need not be more than significant_digits+7
// 7 = 1 sign, 1 decimal point, 1 exponent ('e'), 1 exponent-sign, 3 digits for exponent
//
__device__ int float_to_string( double value, char* output )
{
// check for valid value
if( isnan(value) )
{
memcpy(output,"NaN",3);
return 3;
}
bool bneg = false;
if( value < 0.0 )
{
value = -value;
bneg = true;
}
if( isinf(value) )
{
if( bneg )
memcpy(output,"-Inf",4);
else
memcpy(output,"Inf",3);
return bneg ? 4 : 3;
}
// dissect value into components
unsigned int integer = 0, decimal = 0;
int exp10 = 0;
int decimal_places = dissect_value(value,integer,decimal,exp10);
//
// now build the string from the
// components: sign, integer, decimal, exp10, decimal_places
//
// sign
char* ptr = output;
if( bneg )
*ptr++ = '-';
// integer
ptr = int2str(integer,ptr);
// decimal
if( decimal_places )
{
*ptr++ = '.';
char buffer[10];
char* pb = buffer;
while( decimal_places-- )
{
*pb++ = (char)('0' + (decimal % 10));
decimal /= 10;
}
while( pb != buffer ) // reverses the digits
*ptr++ = *--pb; // e.g. 54321 -> 12345
}
// exponent
if( exp10 )
{
*ptr++ = 'e';
if( exp10 < 0 )
{
*ptr++ ='-';
exp10 = -exp10;
}
else
*ptr++ ='+';
if( exp10 < 10 )
*ptr++ = '0'; // extra zero-pad
ptr = int2str(exp10,ptr);
}
// done
//*ptr = 0; // null-terminator
return (int)(ptr-output);
}
// need to compute how much memory is needed to
// hold the output string (not including null)
__device__ int compute_ftos_size( double value )
{
if( isnan(value) )
return 3; // NaN
bool bneg = false;
if( value < 0.0 )
{
value = -value;
bneg = true;
}
if( isinf(value) )
return 3 + (int)bneg; // Inf
// dissect float into parts
unsigned int integer = 0, decimal = 0;
int exp10 = 0;
int decimal_places = dissect_value(value,integer,decimal,exp10);
// now count up the components
// sign
int count = (int)bneg;
// integer
count += (int)(integer==0);
while( integer > 0 )
{
integer /= 10;
++count;
} // log10(integer)
// decimal
if( decimal_places )
{
++count; // decimal point
count += decimal_places;
}
// exponent
if( exp10 )
{
count += 2; // 'e'
if( exp10 < 0 )
exp10 = -exp10;
count += (int)(exp10<10); // padding
while( exp10 > 0 )
{
exp10 /= 10;
++count;
} // log10(exp10)
}
return count;
}
};
// build strings from given floats
NVStrings* NVStrings::ftos(const float* values, unsigned int count, const unsigned char* nullbitmask, bool bdevmem)
{
if( values==0 || count==0 )
throw std::invalid_argument("nvstrings::ftos values or count invalid");
auto execpol = rmm::exec_policy(0);
NVStrings* rtn = new NVStrings(count);
float* d_values = (float*)values;
unsigned char* d_nulls = (unsigned char*)nullbitmask;
if( !bdevmem )
{
RMM_ALLOC(&d_values,count*sizeof(float),0);
hipMemcpy(d_values,values,count*sizeof(float),hipMemcpyHostToDevice);
if( nullbitmask )
{
RMM_ALLOC(&d_nulls,((count+7)/8)*sizeof(unsigned char),0);
hipMemcpy(d_nulls,nullbitmask,((count+7)/8)*sizeof(unsigned char),hipMemcpyHostToDevice);
}
}
// compute size of memory we'll need
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_values, d_nulls, d_sizes] __device__ (unsigned int idx) {
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) )
{
d_sizes[idx] = 0;
return;
}
float value = d_values[idx];
ftos_converter fts;
int bytes = fts.compute_ftos_size((double)value);
int size = custring_view::alloc_size(bytes,bytes);
d_sizes[idx] = ALIGN_SIZE(size);
});
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// build strings from integers
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
custring_view_array d_strings = rtn->pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_buffer, d_offsets, d_nulls, d_values, d_strings] __device__(unsigned int idx){
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec
{
d_strings[idx] = nullptr;
return;
}
char* str = d_buffer + d_offsets[idx];
float value = d_values[idx];
ftos_converter fts;
int len = fts.float_to_string((double)value,str);
d_strings[idx] = custring_view::create_from(str,str,len);
});
//
if( !bdevmem )
RMM_FREE(d_values,0);
return rtn;
}
// build strings from given doubles
NVStrings* NVStrings::dtos(const double* values, unsigned int count, const unsigned char* nullbitmask, bool bdevmem)
{
if( values==0 || count==0 )
throw std::invalid_argument("nvstrings::dtos values or count invalid");
auto execpol = rmm::exec_policy(0);
NVStrings* rtn = new NVStrings(count);
double* d_values = (double*)values;
unsigned char* d_nulls = (unsigned char*)nullbitmask;
if( !bdevmem )
{
RMM_ALLOC(&d_values,count*sizeof(double),0);
hipMemcpy(d_values,values,count*sizeof(double),hipMemcpyHostToDevice);
if( nullbitmask )
{
RMM_ALLOC(&d_nulls,((count+7)/8)*sizeof(unsigned char),0);
hipMemcpy(d_nulls,nullbitmask,((count+7)/8)*sizeof(unsigned char),hipMemcpyHostToDevice);
}
}
// compute size of memory we'll need
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_values, d_nulls, d_sizes] __device__ (unsigned int idx) {
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) )
{
d_sizes[idx] = 0;
return;
}
double value = d_values[idx];
ftos_converter fts;
int bytes = fts.compute_ftos_size(value);
int size = custring_view::alloc_size(bytes,bytes);
d_sizes[idx] = ALIGN_SIZE(size);
});
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// build strings from integers
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
custring_view_array d_strings = rtn->pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_buffer, d_offsets, d_nulls, d_values, d_strings] __device__(unsigned int idx){
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec
{
d_strings[idx] = nullptr;
return;
}
char* str = d_buffer + d_offsets[idx];
double value = d_values[idx];
ftos_converter fts;
int len = fts.float_to_string(value,str);
d_strings[idx] = custring_view::create_from(str,str,len);
});
//
if( !bdevmem )
RMM_FREE(d_values,0);
return rtn;
}
// convert IPv4 to integer
int NVStrings::ip2int( unsigned int* results, bool bdevmem )
{
unsigned int count = size();
if( count==0 || results==0 )
return -1;
auto execpol = rmm::exec_policy(0);
unsigned int* d_rtn = results;
if( !bdevmem )
RMM_ALLOC(&d_rtn,count*sizeof(unsigned int),0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr || dstr->empty() )
{
d_rtn[idx] = 0;
return; // empty or null string
}
int tokens = dstr->split_size(".",1,0,-1);
if( tokens != 4 )
{
d_rtn[idx] = 0;
return; // invalid format
}
unsigned int vals[4];
unsigned int* pval = vals;
const char* str = dstr->data();
int len = dstr->size();
for( int i=0; i < len; ++i )
{
char ch = str[i];
if( ch >= '0' && ch <= '9' )
{
*pval *= 10;
*pval += (unsigned int)(ch-'0');
}
else if( ch=='.' )
{
++pval;
*pval = 0;
}
}
unsigned int result = (vals[0] * 16777216) + (vals[1] * 65536) + (vals[2] * 256) + vals[3];
d_rtn[idx] = result;
});
//
int zeros = thrust::count(execpol->on(0),d_rtn,d_rtn+count,0);
if( !bdevmem )
{
hipMemcpy(results,d_rtn,sizeof(unsigned int)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)count-zeros;
}
NVStrings* NVStrings::int2ip( const unsigned int* values, unsigned int count, const unsigned char* nullbitmask, bool bdevmem )
{
if( values==0 || count==0 )
throw std::invalid_argument("nvstrings::int2ip values or count invalid");
auto execpol = rmm::exec_policy(0);
NVStrings* rtn = new NVStrings(count);
unsigned int* d_values = (unsigned int*)values;
unsigned char* d_nulls = (unsigned char*)nullbitmask;
if( !bdevmem )
{
RMM_ALLOC(&d_values,count*sizeof(unsigned int),0);
hipMemcpy(d_values,values,count*sizeof(unsigned int),hipMemcpyHostToDevice);
if( nullbitmask )
{
RMM_ALLOC(&d_nulls,((count+7)/8)*sizeof(unsigned char),0);
hipMemcpy(d_nulls,nullbitmask,((count+7)/8)*sizeof(unsigned char),hipMemcpyHostToDevice);
}
}
// compute size of memory we'll need
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_values, d_nulls, d_sizes] __device__ (unsigned int idx) {
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) )
{
d_sizes[idx] = 0;
return;
}
unsigned int ipnum = d_values[idx];
int bytes = 3; // 3 dots: xxx.xxx.xxx.xxx
for( int j=0; j < 4; ++j )
{
unsigned int value = (ipnum & 255)+1; // don't want log(0)
bytes += (int)log10((double)value)+1; // number of base10 digits
ipnum = ipnum >> 8;
}
int size = custring_view::alloc_size(bytes,bytes);
d_sizes[idx] = ALIGN_SIZE(size);
});
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// build strings from integers
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
custring_view_array d_strings = rtn->pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_buffer, d_offsets, d_nulls, d_values, d_strings] __device__(unsigned int idx){
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) )
{
d_strings[idx] = nullptr;
return;
}
unsigned int ipnum = d_values[idx];
char* str = d_buffer + d_offsets[idx];
char* ptr = str;
for( int j=0; j < 4; ++j )
{
int value = ipnum & 255;
do {
char ch = '0' + (value % 10);
*ptr++ = ch;
value = value/10;
} while( value > 0 );
if( j < 3 )
*ptr++ = '.';
ipnum = ipnum >> 8;
}
int len = (int)(ptr-str);
for( int j=0; j<(len/2); ++j )
{
char ch1 = str[j];
char ch2 = str[len-j-1];
str[j] = ch2;
str[len-j-1] = ch1;
}
d_strings[idx] = custring_view::create_from(str,str,len);
});
//
if( !bdevmem )
RMM_FREE(d_values,0);
return rtn;
}
int NVStrings::to_bools( bool* results, const char* true_string, bool bdevmem )
{
unsigned int count = size();
if( count==0 || results==0 )
return -1;
auto execpol = rmm::exec_policy(0);
// copy parameter to device memory
char* d_true = nullptr;
int d_len = 0;
if( true_string )
{
d_len = (int)strlen(true_string);
RMM_ALLOC(&d_true,d_len+1,0);
hipMemcpy(d_true,true_string,d_len+1,hipMemcpyHostToDevice);
}
//
bool* d_rtn = results;
if( !bdevmem )
RMM_ALLOC(&d_rtn,count*sizeof(bool),0);
// set the values
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_true, d_len, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->compare(d_true,d_len)==0;
else
d_rtn[idx] = (d_true==0); // let null be a thing
});
//
// calculate the number of falses (to include nulls too)
int falses = thrust::count(execpol->on(0),d_rtn,d_rtn+count,false);
if( !bdevmem )
{
hipMemcpy(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
if( d_true )
RMM_FREE(d_true,0);
return (int)count-falses;
}
NVStrings* NVStrings::create_from_bools(const bool* values, unsigned int count, const char* true_string, const char* false_string, const unsigned char* nullbitmask, bool bdevmem)
{
if( values==0 || count==0 )
throw std::invalid_argument("nvstrings::create_from_bools values or count invalid");
if( true_string==0 || false_string==0 )
throw std::invalid_argument("nvstrings::create_from_bools false and true strings must not be null");
auto execpol = rmm::exec_policy(0);
NVStrings* rtn = new NVStrings(count);
int d_len_true = strlen(true_string);
char* d_true = nullptr;
RMM_ALLOC(&d_true,d_len_true+1,0);
hipMemcpy(d_true,true_string,d_len_true+1,hipMemcpyHostToDevice);
int d_as_true = custring_view::alloc_size(true_string,d_len_true);
int d_len_false = strlen(false_string);
char* d_false = nullptr;
RMM_ALLOC(&d_false,d_len_false+1,0);
hipMemcpy(d_false,false_string,d_len_false+1,hipMemcpyHostToDevice);
int d_as_false = custring_view::alloc_size(false_string,d_len_false);
bool* d_values = (bool*)values;
unsigned char* d_nulls = (unsigned char*)nullbitmask;
if( !bdevmem )
{
RMM_ALLOC(&d_values,count*sizeof(bool),0);
hipMemcpy(d_values,values,count*sizeof(bool),hipMemcpyHostToDevice);
if( nullbitmask )
{
RMM_ALLOC(&d_nulls,((count+7)/8)*sizeof(unsigned char),0);
hipMemcpy(d_nulls,nullbitmask,((count+7)/8)*sizeof(unsigned char),hipMemcpyHostToDevice);
}
}
// compute size of memory we'll need
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_values, d_nulls, d_as_true, d_as_false, d_sizes] __device__ (unsigned int idx) {
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) )
{
d_sizes[idx] = 0;
return;
}
bool value = d_values[idx];
int size = value ? d_as_true : d_as_false;
d_sizes[idx] = ALIGN_SIZE(size);
});
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// build strings of booleans
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
custring_view_array d_strings = rtn->pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_buffer, d_offsets, d_nulls, d_values, d_true, d_len_true, d_false, d_len_false, d_strings] __device__(unsigned int idx){
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec
{
d_strings[idx] = nullptr; // null string
return;
}
char* buf = d_buffer + d_offsets[idx];
bool value = d_values[idx];
if( value )
d_strings[idx] = custring_view::create_from(buf,d_true,d_len_true);
else
d_strings[idx] = custring_view::create_from(buf,d_false,d_len_false);
});
//
if( !bdevmem )
RMM_FREE(d_values,0);
RMM_FREE(d_true,0);
RMM_FREE(d_false,0);
return rtn;
}
| 08fe5b31b2d1d970b3a1f341eaa97e5e3350bd67.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <exception>
#include <math.h> // for isnan, isinf; cmath does not work here
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/count.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "NVStringsImpl.h"
#include "custring_view.cuh"
//
int NVStrings::hash(unsigned int* results, bool todevice)
{
unsigned int count = size();
if( count==0 || results==0 )
return -1;
auto execpol = rmm::exec_policy(0);
unsigned int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(unsigned int),0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->hash();
else
d_rtn[idx] = 0;
});
//
int zeros = thrust::count(execpol->on(0),d_rtn,d_rtn+count,0);
if( !todevice )
{
cudaMemcpy(results,d_rtn,sizeof(float)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)count-zeros;
}
//
int NVStrings::stoi(int* results, bool todevice)
{
unsigned int count = size();
if( count==0 || results==0 )
return -1;
auto execpol = rmm::exec_policy(0);
int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(int),0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->stoi();
else
d_rtn[idx] = 0;
});
//
int zeros = thrust::count(execpol->on(0),d_rtn,d_rtn+count,0);
if( !todevice )
{
cudaMemcpy(results,d_rtn,sizeof(int)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)count-zeros;
}
//
int NVStrings::stol(long* results, bool todevice)
{
unsigned int count = size();
if( count==0 || results==0 )
return -1;
auto execpol = rmm::exec_policy(0);
long* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(long),0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->stol();
else
d_rtn[idx] = 0L;
});
//
int zeros = thrust::count(execpol->on(0),d_rtn,d_rtn+count,0);
if( !todevice )
{
cudaMemcpy(results,d_rtn,sizeof(long)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)count-zeros;
}
//
int NVStrings::stof(float* results, bool todevice)
{
unsigned int count = size();
if( count==0 || results==0 )
return -1;
auto execpol = rmm::exec_policy(0);
float* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(float),0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->stof();
else
d_rtn[idx] = (float)0;
});
//
int zeros = thrust::count(execpol->on(0),d_rtn,d_rtn+count,0);
if( !todevice )
{
cudaMemcpy(results,d_rtn,sizeof(float)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)count-zeros;
}
//
int NVStrings::stod(double* results, bool todevice)
{
unsigned int count = size();
if( count==0 || results==0 )
return -1;
auto execpol = rmm::exec_policy(0);
double* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(double),0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->stod();
else
d_rtn[idx] = 0.0;
});
//
int zeros = thrust::count(execpol->on(0),d_rtn,d_rtn+count,0);
if( !todevice )
{
cudaMemcpy(results,d_rtn,sizeof(double)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)count-zeros;
}
//
int NVStrings::htoi(unsigned int* results, bool todevice)
{
unsigned int count = size();
if( count==0 || results==0 )
return -1;
auto execpol = rmm::exec_policy(0);
unsigned int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,count*sizeof(unsigned int),0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr || dstr->empty() )
{
d_rtn[idx] = 0;
return;
}
long result = 0, base = 1;
const char* str = dstr->data();
int len = dstr->size()-1;
for( int i=len; i >= 0; --i )
{
char ch = str[i];
if( ch >= '0' && ch <= '9' )
{
result += (long)(ch-48) * base;
base *= 16;
}
else if( ch >= 'A' && ch <= 'Z' )
{
result += (long)(ch-55) * base;
base *= 16;
}
else if( ch >= 'a' && ch <= 'z' )
{
result += (long)(ch-87) * base;
base *= 16;
}
}
d_rtn[idx] = (unsigned int)result;
});
//
int zeros = thrust::count(execpol->on(0),d_rtn,d_rtn+count,0);
if( !todevice )
{
cudaMemcpy(results,d_rtn,sizeof(unsigned int)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)count-zeros;
}
// build strings from given integers
NVStrings* NVStrings::itos(const int* values, unsigned int count, const unsigned char* nullbitmask, bool bdevmem)
{
if( values==0 || count==0 )
throw std::invalid_argument("nvstrings::itos values or count invalid");
auto execpol = rmm::exec_policy(0);
NVStrings* rtn = new NVStrings(count);
int* d_values = (int*)values;
unsigned char* d_nulls = (unsigned char*)nullbitmask;
if( !bdevmem )
{
RMM_ALLOC(&d_values,count*sizeof(int),0);
cudaMemcpy(d_values,values,count*sizeof(int),cudaMemcpyHostToDevice);
if( nullbitmask )
{
RMM_ALLOC(&d_nulls,((count+7)/8)*sizeof(unsigned char),0);
cudaMemcpy(d_nulls,nullbitmask,((count+7)/8)*sizeof(unsigned char),cudaMemcpyHostToDevice);
}
}
// compute size of memory we'll need
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_values, d_nulls, d_sizes] __device__ (unsigned int idx) {
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) )
{
d_sizes[idx] = 0;
return;
}
int value = d_values[idx];
int size = custring_view::ltos_size(value);
d_sizes[idx] = ALIGN_SIZE(size);
});
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// build strings from integers
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
custring_view_array d_strings = rtn->pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_buffer, d_offsets, d_nulls, d_values, d_strings] __device__(unsigned int idx){
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec
{
d_strings[idx] = nullptr;
return;
}
char* str = d_buffer + d_offsets[idx];
int value = d_values[idx];
d_strings[idx] = custring_view::ltos(value,str);
});
//
if( !bdevmem )
RMM_FREE(d_values,0);
return rtn;
}
NVStrings* NVStrings::ltos(const long* values, unsigned int count, const unsigned char* nullbitmask, bool bdevmem)
{
if( values==0 || count==0 )
throw std::invalid_argument("nvstrings::ltos values or count invalid");
auto execpol = rmm::exec_policy(0);
NVStrings* rtn = new NVStrings(count);
long* d_values = (long*)values;
unsigned char* d_nulls = (unsigned char*)nullbitmask;
if( !bdevmem )
{
RMM_ALLOC(&d_values,count*sizeof(long),0);
cudaMemcpy(d_values,values,count*sizeof(long),cudaMemcpyHostToDevice);
if( nullbitmask )
{
RMM_ALLOC(&d_nulls,((count+7)/8)*sizeof(unsigned char),0);
cudaMemcpy(d_nulls,nullbitmask,((count+7)/8)*sizeof(unsigned char),cudaMemcpyHostToDevice);
}
}
// compute size of memory we'll need
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_values, d_nulls, d_sizes] __device__ (unsigned int idx) {
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) )
{
d_sizes[idx] = 0;
return;
}
long value = d_values[idx];
int size = custring_view::ltos_size(value);
d_sizes[idx] = ALIGN_SIZE(size);
});
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// build strings from integers
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
custring_view_array d_strings = rtn->pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_buffer, d_offsets, d_nulls, d_values, d_strings] __device__(unsigned int idx){
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec
{
d_strings[idx] = nullptr;
return;
}
char* str = d_buffer + d_offsets[idx];
long value = d_values[idx];
d_strings[idx] = custring_view::ltos(value,str);
});
//
if( !bdevmem )
RMM_FREE(d_values,0);
return rtn;
}
struct ftos_converter
{
// significant digits is independent of scientific notation range
// digits more than this may require using long values instead of ints
const unsigned int significant_digits = 10;
// maximum power-of-10 that will fit in 32-bits
const unsigned int nine_digits = 1000000000; // 1x10^9
// Range of numbers here is for normalizing the value.
// If the value is above or below the following limits, the output is converted to
// scientific notation in order to show (at most) the number of significant digits.
const double upper_limit = 1000000000; // max is 1x10^9
const double lower_limit = 0.0001; // printf uses scientific notation below this
// Tables for doing normalization: converting to exponent form
// IEEE double float has maximum exponent of 305 so these should cover everthing
const double upper10[9] = { 10, 100, 10000, 1e8, 1e16, 1e32, 1e64, 1e128, 1e256 };
const double lower10[9] = { .1, .01, .0001, 1e-8, 1e-16, 1e-32, 1e-64, 1e-128, 1e-256 };
const double blower10[9] = { 1.0, .1, .001, 1e-7, 1e-15, 1e-31, 1e-63, 1e-127, 1e-255 };
// utility for quickly converting known integer range to character array
__device__ char* int2str( int value, char* output )
{
if( value==0 )
{
*output++ = '0';
return output;
}
char buffer[10]; // should be big-enough for 10 significant digits
char* ptr = buffer;
while( value > 0 )
{
*ptr++ = (char)('0' + (value % 10));
value /= 10;
}
while( ptr != buffer )
*output++ = *--ptr; // 54321 -> 12345
return output;
}
//
// dissect value into parts
// return decimal_places
__device__ int dissect_value( double value, unsigned int& integer, unsigned int& decimal, int& exp10 )
{
// dissect float into parts
int decimal_places = significant_digits-1;
// normalize step puts value between lower-limit and upper-limit
// by adjusting the exponent up or down
exp10 = 0;
if( value > upper_limit )
{
int fx = 256;
for( int idx=8; idx >= 0; --idx )
{
if( value >= upper10[idx] )
{
value *= lower10[idx];
exp10 += fx;
}
fx = fx >> 1;
}
}
else if( (value > 0.0) && (value < lower_limit) )
{
int fx = 256;
for( int idx=8; idx >= 0; --idx )
{
if( value < blower10[idx] )
{
value *= upper10[idx];
exp10 -= fx;
}
fx = fx >> 1;
}
}
//
unsigned int max_digits = nine_digits;
integer = (unsigned int)value;
for( unsigned int i=integer; i >= 10; i/=10 )
{
--decimal_places;
max_digits /= 10;
}
double remainder = (value - (double)integer) * (double)max_digits;
//printf("remainder=%g,value=%g,integer=%u,sd=%u\n",remainder,value,integer,max_digits);
decimal = (unsigned int)remainder;
remainder -= (double)decimal;
//printf("remainder=%g,decimal=%u\n",remainder,decimal);
decimal += (unsigned int)(2.0*remainder);
if( decimal >= max_digits )
{
decimal = 0;
++integer;
if( exp10 && (integer >= 10) )
{
++exp10;
integer = 1;
}
}
//
while( (decimal % 10)==0 && (decimal_places > 0) )
{
decimal /= 10;
--decimal_places;
}
return decimal_places;
}
//
// Converts value to string into output
// Output need not be more than significant_digits+7
// 7 = 1 sign, 1 decimal point, 1 exponent ('e'), 1 exponent-sign, 3 digits for exponent
//
__device__ int float_to_string( double value, char* output )
{
// check for valid value
if( isnan(value) )
{
memcpy(output,"NaN",3);
return 3;
}
bool bneg = false;
if( value < 0.0 )
{
value = -value;
bneg = true;
}
if( isinf(value) )
{
if( bneg )
memcpy(output,"-Inf",4);
else
memcpy(output,"Inf",3);
return bneg ? 4 : 3;
}
// dissect value into components
unsigned int integer = 0, decimal = 0;
int exp10 = 0;
int decimal_places = dissect_value(value,integer,decimal,exp10);
//
// now build the string from the
// components: sign, integer, decimal, exp10, decimal_places
//
// sign
char* ptr = output;
if( bneg )
*ptr++ = '-';
// integer
ptr = int2str(integer,ptr);
// decimal
if( decimal_places )
{
*ptr++ = '.';
char buffer[10];
char* pb = buffer;
while( decimal_places-- )
{
*pb++ = (char)('0' + (decimal % 10));
decimal /= 10;
}
while( pb != buffer ) // reverses the digits
*ptr++ = *--pb; // e.g. 54321 -> 12345
}
// exponent
if( exp10 )
{
*ptr++ = 'e';
if( exp10 < 0 )
{
*ptr++ ='-';
exp10 = -exp10;
}
else
*ptr++ ='+';
if( exp10 < 10 )
*ptr++ = '0'; // extra zero-pad
ptr = int2str(exp10,ptr);
}
// done
//*ptr = 0; // null-terminator
return (int)(ptr-output);
}
// need to compute how much memory is needed to
// hold the output string (not including null)
__device__ int compute_ftos_size( double value )
{
if( isnan(value) )
return 3; // NaN
bool bneg = false;
if( value < 0.0 )
{
value = -value;
bneg = true;
}
if( isinf(value) )
return 3 + (int)bneg; // Inf
// dissect float into parts
unsigned int integer = 0, decimal = 0;
int exp10 = 0;
int decimal_places = dissect_value(value,integer,decimal,exp10);
// now count up the components
// sign
int count = (int)bneg;
// integer
count += (int)(integer==0);
while( integer > 0 )
{
integer /= 10;
++count;
} // log10(integer)
// decimal
if( decimal_places )
{
++count; // decimal point
count += decimal_places;
}
// exponent
if( exp10 )
{
count += 2; // 'e±'
if( exp10 < 0 )
exp10 = -exp10;
count += (int)(exp10<10); // padding
while( exp10 > 0 )
{
exp10 /= 10;
++count;
} // log10(exp10)
}
return count;
}
};
// build strings from given floats
NVStrings* NVStrings::ftos(const float* values, unsigned int count, const unsigned char* nullbitmask, bool bdevmem)
{
if( values==0 || count==0 )
throw std::invalid_argument("nvstrings::ftos values or count invalid");
auto execpol = rmm::exec_policy(0);
NVStrings* rtn = new NVStrings(count);
float* d_values = (float*)values;
unsigned char* d_nulls = (unsigned char*)nullbitmask;
if( !bdevmem )
{
RMM_ALLOC(&d_values,count*sizeof(float),0);
cudaMemcpy(d_values,values,count*sizeof(float),cudaMemcpyHostToDevice);
if( nullbitmask )
{
RMM_ALLOC(&d_nulls,((count+7)/8)*sizeof(unsigned char),0);
cudaMemcpy(d_nulls,nullbitmask,((count+7)/8)*sizeof(unsigned char),cudaMemcpyHostToDevice);
}
}
// compute size of memory we'll need
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_values, d_nulls, d_sizes] __device__ (unsigned int idx) {
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) )
{
d_sizes[idx] = 0;
return;
}
float value = d_values[idx];
ftos_converter fts;
int bytes = fts.compute_ftos_size((double)value);
int size = custring_view::alloc_size(bytes,bytes);
d_sizes[idx] = ALIGN_SIZE(size);
});
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// build strings from integers
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
custring_view_array d_strings = rtn->pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_buffer, d_offsets, d_nulls, d_values, d_strings] __device__(unsigned int idx){
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec
{
d_strings[idx] = nullptr;
return;
}
char* str = d_buffer + d_offsets[idx];
float value = d_values[idx];
ftos_converter fts;
int len = fts.float_to_string((double)value,str);
d_strings[idx] = custring_view::create_from(str,str,len);
});
//
if( !bdevmem )
RMM_FREE(d_values,0);
return rtn;
}
// build strings from given doubles
NVStrings* NVStrings::dtos(const double* values, unsigned int count, const unsigned char* nullbitmask, bool bdevmem)
{
if( values==0 || count==0 )
throw std::invalid_argument("nvstrings::dtos values or count invalid");
auto execpol = rmm::exec_policy(0);
NVStrings* rtn = new NVStrings(count);
double* d_values = (double*)values;
unsigned char* d_nulls = (unsigned char*)nullbitmask;
if( !bdevmem )
{
RMM_ALLOC(&d_values,count*sizeof(double),0);
cudaMemcpy(d_values,values,count*sizeof(double),cudaMemcpyHostToDevice);
if( nullbitmask )
{
RMM_ALLOC(&d_nulls,((count+7)/8)*sizeof(unsigned char),0);
cudaMemcpy(d_nulls,nullbitmask,((count+7)/8)*sizeof(unsigned char),cudaMemcpyHostToDevice);
}
}
// compute size of memory we'll need
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_values, d_nulls, d_sizes] __device__ (unsigned int idx) {
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) )
{
d_sizes[idx] = 0;
return;
}
double value = d_values[idx];
ftos_converter fts;
int bytes = fts.compute_ftos_size(value);
int size = custring_view::alloc_size(bytes,bytes);
d_sizes[idx] = ALIGN_SIZE(size);
});
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// build strings from integers
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
custring_view_array d_strings = rtn->pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_buffer, d_offsets, d_nulls, d_values, d_strings] __device__(unsigned int idx){
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec
{
d_strings[idx] = nullptr;
return;
}
char* str = d_buffer + d_offsets[idx];
double value = d_values[idx];
ftos_converter fts;
int len = fts.float_to_string(value,str);
d_strings[idx] = custring_view::create_from(str,str,len);
});
//
if( !bdevmem )
RMM_FREE(d_values,0);
return rtn;
}
// convert IPv4 to integer
int NVStrings::ip2int( unsigned int* results, bool bdevmem )
{
unsigned int count = size();
if( count==0 || results==0 )
return -1;
auto execpol = rmm::exec_policy(0);
unsigned int* d_rtn = results;
if( !bdevmem )
RMM_ALLOC(&d_rtn,count*sizeof(unsigned int),0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr || dstr->empty() )
{
d_rtn[idx] = 0;
return; // empty or null string
}
int tokens = dstr->split_size(".",1,0,-1);
if( tokens != 4 )
{
d_rtn[idx] = 0;
return; // invalid format
}
unsigned int vals[4];
unsigned int* pval = vals;
const char* str = dstr->data();
int len = dstr->size();
for( int i=0; i < len; ++i )
{
char ch = str[i];
if( ch >= '0' && ch <= '9' )
{
*pval *= 10;
*pval += (unsigned int)(ch-'0');
}
else if( ch=='.' )
{
++pval;
*pval = 0;
}
}
unsigned int result = (vals[0] * 16777216) + (vals[1] * 65536) + (vals[2] * 256) + vals[3];
d_rtn[idx] = result;
});
//
int zeros = thrust::count(execpol->on(0),d_rtn,d_rtn+count,0);
if( !bdevmem )
{
cudaMemcpy(results,d_rtn,sizeof(unsigned int)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return (int)count-zeros;
}
NVStrings* NVStrings::int2ip( const unsigned int* values, unsigned int count, const unsigned char* nullbitmask, bool bdevmem )
{
if( values==0 || count==0 )
throw std::invalid_argument("nvstrings::int2ip values or count invalid");
auto execpol = rmm::exec_policy(0);
NVStrings* rtn = new NVStrings(count);
unsigned int* d_values = (unsigned int*)values;
unsigned char* d_nulls = (unsigned char*)nullbitmask;
if( !bdevmem )
{
RMM_ALLOC(&d_values,count*sizeof(unsigned int),0);
cudaMemcpy(d_values,values,count*sizeof(unsigned int),cudaMemcpyHostToDevice);
if( nullbitmask )
{
RMM_ALLOC(&d_nulls,((count+7)/8)*sizeof(unsigned char),0);
cudaMemcpy(d_nulls,nullbitmask,((count+7)/8)*sizeof(unsigned char),cudaMemcpyHostToDevice);
}
}
// compute size of memory we'll need
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_values, d_nulls, d_sizes] __device__ (unsigned int idx) {
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) )
{
d_sizes[idx] = 0;
return;
}
unsigned int ipnum = d_values[idx];
int bytes = 3; // 3 dots: xxx.xxx.xxx.xxx
for( int j=0; j < 4; ++j )
{
unsigned int value = (ipnum & 255)+1; // don't want log(0)
bytes += (int)log10((double)value)+1; // number of base10 digits
ipnum = ipnum >> 8;
}
int size = custring_view::alloc_size(bytes,bytes);
d_sizes[idx] = ALIGN_SIZE(size);
});
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// build strings from integers
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
custring_view_array d_strings = rtn->pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_buffer, d_offsets, d_nulls, d_values, d_strings] __device__(unsigned int idx){
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) )
{
d_strings[idx] = nullptr;
return;
}
unsigned int ipnum = d_values[idx];
char* str = d_buffer + d_offsets[idx];
char* ptr = str;
for( int j=0; j < 4; ++j )
{
int value = ipnum & 255;
do {
char ch = '0' + (value % 10);
*ptr++ = ch;
value = value/10;
} while( value > 0 );
if( j < 3 )
*ptr++ = '.';
ipnum = ipnum >> 8;
}
int len = (int)(ptr-str);
for( int j=0; j<(len/2); ++j )
{
char ch1 = str[j];
char ch2 = str[len-j-1];
str[j] = ch2;
str[len-j-1] = ch1;
}
d_strings[idx] = custring_view::create_from(str,str,len);
});
//
if( !bdevmem )
RMM_FREE(d_values,0);
return rtn;
}
int NVStrings::to_bools( bool* results, const char* true_string, bool bdevmem )
{
unsigned int count = size();
if( count==0 || results==0 )
return -1;
auto execpol = rmm::exec_policy(0);
// copy parameter to device memory
char* d_true = nullptr;
int d_len = 0;
if( true_string )
{
d_len = (int)strlen(true_string);
RMM_ALLOC(&d_true,d_len+1,0);
cudaMemcpy(d_true,true_string,d_len+1,cudaMemcpyHostToDevice);
}
//
bool* d_rtn = results;
if( !bdevmem )
RMM_ALLOC(&d_rtn,count*sizeof(bool),0);
// set the values
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_true, d_len, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->compare(d_true,d_len)==0;
else
d_rtn[idx] = (d_true==0); // let null be a thing
});
//
// calculate the number of falses (to include nulls too)
int falses = thrust::count(execpol->on(0),d_rtn,d_rtn+count,false);
if( !bdevmem )
{
cudaMemcpy(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
if( d_true )
RMM_FREE(d_true,0);
return (int)count-falses;
}
NVStrings* NVStrings::create_from_bools(const bool* values, unsigned int count, const char* true_string, const char* false_string, const unsigned char* nullbitmask, bool bdevmem)
{
if( values==0 || count==0 )
throw std::invalid_argument("nvstrings::create_from_bools values or count invalid");
if( true_string==0 || false_string==0 )
throw std::invalid_argument("nvstrings::create_from_bools false and true strings must not be null");
auto execpol = rmm::exec_policy(0);
NVStrings* rtn = new NVStrings(count);
int d_len_true = strlen(true_string);
char* d_true = nullptr;
RMM_ALLOC(&d_true,d_len_true+1,0);
cudaMemcpy(d_true,true_string,d_len_true+1,cudaMemcpyHostToDevice);
int d_as_true = custring_view::alloc_size(true_string,d_len_true);
int d_len_false = strlen(false_string);
char* d_false = nullptr;
RMM_ALLOC(&d_false,d_len_false+1,0);
cudaMemcpy(d_false,false_string,d_len_false+1,cudaMemcpyHostToDevice);
int d_as_false = custring_view::alloc_size(false_string,d_len_false);
bool* d_values = (bool*)values;
unsigned char* d_nulls = (unsigned char*)nullbitmask;
if( !bdevmem )
{
RMM_ALLOC(&d_values,count*sizeof(bool),0);
cudaMemcpy(d_values,values,count*sizeof(bool),cudaMemcpyHostToDevice);
if( nullbitmask )
{
RMM_ALLOC(&d_nulls,((count+7)/8)*sizeof(unsigned char),0);
cudaMemcpy(d_nulls,nullbitmask,((count+7)/8)*sizeof(unsigned char),cudaMemcpyHostToDevice);
}
}
// compute size of memory we'll need
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_values, d_nulls, d_as_true, d_as_false, d_sizes] __device__ (unsigned int idx) {
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) )
{
d_sizes[idx] = 0;
return;
}
bool value = d_values[idx];
int size = value ? d_as_true : d_as_false;
d_sizes[idx] = ALIGN_SIZE(size);
});
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// build strings of booleans
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
custring_view_array d_strings = rtn->pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_buffer, d_offsets, d_nulls, d_values, d_true, d_len_true, d_false, d_len_false, d_strings] __device__(unsigned int idx){
if( d_nulls && ((d_nulls[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec
{
d_strings[idx] = nullptr; // null string
return;
}
char* buf = d_buffer + d_offsets[idx];
bool value = d_values[idx];
if( value )
d_strings[idx] = custring_view::create_from(buf,d_true,d_len_true);
else
d_strings[idx] = custring_view::create_from(buf,d_false,d_len_false);
});
//
if( !bdevmem )
RMM_FREE(d_values,0);
RMM_FREE(d_true,0);
RMM_FREE(d_false,0);
return rtn;
}
|
f99c73b50a0ab29e0a8b3e8585e6a37e3f33a26b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated s Wed Aug 14 12:16:37 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#if (GPUSHMEM < 200)
#define NUM_THREADS 512
#else
#define NUM_THREADS 1024
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
#define BLK_K (NUM_THREADS / (BLK_M * BLK_N))
///////////////////////////////////////////////////////////////////////////////////////////////////
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce2( /*int n,*/ int j, int k, int i, float x[][ BLK_N +1][ BLK_K +1] )
{
__syncthreads();
/*
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[j][k][i] += x[j][k][i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[j][k][i] += x[j][k][i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[j][k][i] += x[j][k][i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[j][k][i] += x[j][k][i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[j][k][i] += x[j][k][i+ 64]; } __syncthreads(); }
*/
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[j][k][i] += x[j][k][i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[j][k][i] += x[j][k][i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[j][k][i] += x[j][k][i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[j][k][i] += x[j][k][i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[j][k][i] += x[j][k][i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[j][k][i] += x[j][k][i+ 1]; } __syncthreads(); }
}
// end sum_reduce
//==============================================================================
__global__
void magmablas_sgemm_reduce_kernel(int m, int n, int k,
float alpha,
const float * __restrict__ d_A, int lda,
const float * __restrict__ d_B, int ldb,
float beta,
float *d_C, int ldc)
{
const int i = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n){
const float *dA = d_A + (blockIdx.x*BLK_M + threadIdx.y) * lda;
const float *dB = d_B + (blockIdx.y*BLK_N + threadIdx.z) * ldb;
float *dC = d_C + blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
__shared__ float sum[BLK_M][BLK_N+1][ BLK_K +1];
float lsum;
/* w := v' * C */
lsum = MAGMA_S_ZERO;
for( int j = i; j < k; j += BLK_K )
lsum += MAGMA_S_CNJG( dA[j] )* dB[j];
sum[threadIdx.y][threadIdx.z][i] = lsum;
sum_reduce2< BLK_K >( threadIdx.y, threadIdx.z, i, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_S_EQUAL(beta, MAGMA_S_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[threadIdx.y][threadIdx.z][0];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[threadIdx.y][threadIdx.z][0];
}
}
}
//==============================================================================
extern "C" void
magmablas_sgemm_reduce(magma_int_t m, magma_int_t n, magma_int_t k,
float alpha,
const float *d_A, magma_int_t lda,
const float *d_B, magma_int_t ldb,
float beta,
float *d_C, magma_int_t ldc )
{
/* -- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
Purpose
=======
SGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha* A' B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
===================================================================== */
dim3 blocks( (m+BLK_M-1)/BLK_M, (n+BLK_N-1)/BLK_N );
dim3 threads( BLK_K, BLK_M, BLK_N );
hipLaunchKernelGGL(( magmablas_sgemm_reduce_kernel), dim3(blocks),dim3(threads), 0, magma_stream ,
m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
}
//==============================================================================
| f99c73b50a0ab29e0a8b3e8585e6a37e3f33a26b.cu | /*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated s Wed Aug 14 12:16:37 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#if (GPUSHMEM < 200)
#define NUM_THREADS 512
#else
#define NUM_THREADS 1024
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
#define BLK_K (NUM_THREADS / (BLK_M * BLK_N))
///////////////////////////////////////////////////////////////////////////////////////////////////
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce2( /*int n,*/ int j, int k, int i, float x[][ BLK_N +1][ BLK_K +1] )
{
__syncthreads();
/*
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[j][k][i] += x[j][k][i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[j][k][i] += x[j][k][i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[j][k][i] += x[j][k][i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[j][k][i] += x[j][k][i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[j][k][i] += x[j][k][i+ 64]; } __syncthreads(); }
*/
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[j][k][i] += x[j][k][i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[j][k][i] += x[j][k][i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[j][k][i] += x[j][k][i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[j][k][i] += x[j][k][i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[j][k][i] += x[j][k][i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[j][k][i] += x[j][k][i+ 1]; } __syncthreads(); }
}
// end sum_reduce
//==============================================================================
__global__
void magmablas_sgemm_reduce_kernel(int m, int n, int k,
float alpha,
const float * __restrict__ d_A, int lda,
const float * __restrict__ d_B, int ldb,
float beta,
float *d_C, int ldc)
{
const int i = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n){
const float *dA = d_A + (blockIdx.x*BLK_M + threadIdx.y) * lda;
const float *dB = d_B + (blockIdx.y*BLK_N + threadIdx.z) * ldb;
float *dC = d_C + blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
__shared__ float sum[BLK_M][BLK_N+1][ BLK_K +1];
float lsum;
/* w := v' * C */
lsum = MAGMA_S_ZERO;
for( int j = i; j < k; j += BLK_K )
lsum += MAGMA_S_CNJG( dA[j] )* dB[j];
sum[threadIdx.y][threadIdx.z][i] = lsum;
sum_reduce2< BLK_K >( threadIdx.y, threadIdx.z, i, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_S_EQUAL(beta, MAGMA_S_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[threadIdx.y][threadIdx.z][0];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[threadIdx.y][threadIdx.z][0];
}
}
}
//==============================================================================
extern "C" void
magmablas_sgemm_reduce(magma_int_t m, magma_int_t n, magma_int_t k,
float alpha,
const float *d_A, magma_int_t lda,
const float *d_B, magma_int_t ldb,
float beta,
float *d_C, magma_int_t ldc )
{
/* -- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
Purpose
=======
SGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha* A' B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
===================================================================== */
dim3 blocks( (m+BLK_M-1)/BLK_M, (n+BLK_N-1)/BLK_N );
dim3 threads( BLK_K, BLK_M, BLK_N );
magmablas_sgemm_reduce_kernel<<<blocks,threads, 0, magma_stream >>>(
m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
}
//==============================================================================
|
24b5c277382c74f89757bee7725c666179a2ceb0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of DeviceHistogram utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <limits>
#include <algorithm>
#include <typeinfo>
#include <hipcub/hipcub.hpp>
#include <cub/iterator/constant_input_iterator.cuh>
#include <cub/device/device_histogram.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
// Dispatch types
enum Backend
{
CUB, // CUB method
CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
};
bool g_verbose_input = false;
bool g_verbose = false;
int g_timing_iterations = 0;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
//---------------------------------------------------------------------
// Dispatch to different DeviceHistogram entrypoints
//---------------------------------------------------------------------
template <int NUM_ACTIVE_CHANNELS, int NUM_CHANNELS, int BACKEND>
struct Dispatch;
template <int NUM_ACTIVE_CHANNELS, int NUM_CHANNELS>
struct Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, CUB>
{
/**
* Dispatch to CUB multi histogram-range entrypoint
*/
template <typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT>
//CUB_RUNTIME_FUNCTION __forceinline__
static hipError_t Range(
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples).
CounterT *(&d_histogram)[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int *num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT *(&d_levels)[NUM_ACTIVE_CHANNELS], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
hipStream_t stream,
bool debug_synchronous)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceHistogram::MultiHistogramRange<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram,
num_levels,
d_levels,
num_row_pixels,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
return error;
}
/**
* Dispatch to CUB multi histogram-even entrypoint
*/
template <typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT>
//CUB_RUNTIME_FUNCTION __forceinline__
static hipError_t Even(
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples).
CounterT *(&d_histogram)[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int *num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT *lower_level, ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT *upper_level, ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
hipStream_t stream,
bool debug_synchronous)
{
typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT;
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceHistogram::MultiHistogramEven<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram,
num_levels,
lower_level,
upper_level,
num_row_pixels,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
return error;
}
};
template <>
struct Dispatch<1, 1, CUB>
{
/**
* Dispatch to CUB single histogram-range entrypoint
*/
template <typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT>
//CUB_RUNTIME_FUNCTION __forceinline__
static hipError_t Range(
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples).
CounterT* (&d_histogram)[1], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int *num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT (&d_levels)[1], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
hipStream_t stream,
bool debug_synchronous)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceHistogram::HistogramRange(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram[0],
num_levels[0],
d_levels[0],
num_row_pixels,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
return error;
}
/**
* Dispatch to CUB single histogram-even entrypoint
*/
template <typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT>
//CUB_RUNTIME_FUNCTION __forceinline__
static hipError_t Even(
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples).
CounterT* (&d_histogram)[1], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int *num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT *lower_level, ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT *upper_level, ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
hipStream_t stream,
bool debug_synchronous)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceHistogram::HistogramEven(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram[0],
num_levels[0],
lower_level[0],
upper_level[0],
num_row_pixels,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
return error;
}
};
//---------------------------------------------------------------------
// CUDA nested-parallelism test kernel
//---------------------------------------------------------------------
/**
* Simple wrapper kernel to invoke DeviceHistogram
* /
template <int BINS, int NUM_CHANNELS, int NUM_ACTIVE_CHANNELS, typename SampleT, typename SampleIteratorT, typename CounterT, int ALGORITHM>
__global__ void CnpDispatchKernel(
Int2Type<ALGORITHM> algorithm,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t temp_storage_bytes,
SampleT *d_samples,
SampleIteratorT d_sample_itr,
ArrayWrapper<CounterT*, NUM_ACTIVE_CHANNELS> d_out_histograms,
int num_samples,
bool debug_synchronous)
{
#ifndef CUB_CDP
*d_cdp_error = hipErrorNotSupported;
#else
*d_cdp_error = Dispatch<BINS, NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(algorithm, Int2Type<false>(), timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_samples, d_sample_itr, d_out_histograms.array, num_samples, 0, debug_synchronous);
*d_temp_storage_bytes = temp_storage_bytes;
#endif
}
/ **
* Dispatch to CDP kernel
* /
template <int BINS, int NUM_CHANNELS, int NUM_ACTIVE_CHANNELS, typename SampleT, typename SampleIteratorT, typename CounterT, int ALGORITHM>
hipError_t Dispatch(
Int2Type<ALGORITHM> algorithm,
Int2Type<true> use_cdp,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleT *d_samples,
SampleIteratorT d_sample_itr,
CounterT *d_histograms[NUM_ACTIVE_CHANNELS],
int num_samples,
hipStream_t stream,
bool debug_synchronous)
{
// Setup array wrapper for histogram channel output (because we can't pass static arrays as kernel parameters)
ArrayWrapper<CounterT*, NUM_ACTIVE_CHANNELS> d_histo_wrapper;
for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL)
d_histo_wrapper.array[CHANNEL] = d_histograms[CHANNEL];
// Invoke kernel to invoke device-side dispatch
CnpDispatchKernel<BINS, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, ALGORITHM><<<1,1>>>(algorithm, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_samples, d_sample_itr, d_histo_wrapper, num_samples, debug_synchronous);
// Copy out temp_storage_bytes
CubDebugExit(hipMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, hipMemcpyDeviceToHost));
// Copy out error
hipError_t retval;
CubDebugExit(hipMemcpy(&retval, d_cdp_error, sizeof(hipError_t) * 1, hipMemcpyDeviceToHost));
return retval;
}
*/
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
// Searches for bin given a list of bin-boundary levels
template <typename LevelT>
struct SearchTransform
{
LevelT *levels; // Pointer to levels array
int num_levels; // Number of levels in array
// Functor for converting samples to bin-ids (num_levels is returned if sample is out of range)
template <typename SampleT>
int operator()(SampleT sample)
{
int bin = int(std::upper_bound(levels, levels + num_levels, (LevelT) sample) - levels - 1);
if (bin < 0)
{
// Sample out of range
return num_levels;
}
return bin;
}
};
// Scales samples to evenly-spaced bins
template <typename LevelT>
struct ScaleTransform
{
int num_levels; // Number of levels in array
LevelT max; // Max sample level (exclusive)
LevelT min; // Min sample level (inclusive)
LevelT scale; // Bin scaling factor
void Init(
int num_levels_, // Number of levels in array
LevelT max_, // Max sample level (exclusive)
LevelT min_, // Min sample level (inclusive)
LevelT scale_) // Bin scaling factor
{
this->num_levels = num_levels_;
this->max = max_;
this->min = min_;
this->scale = scale_;
}
// Functor for converting samples to bin-ids (num_levels is returned if sample is out of range)
template <typename SampleT>
int operator()(SampleT sample)
{
if ((sample < min) || (sample >= max))
{
// Sample out of range
return num_levels;
}
return (int) ((((LevelT) sample) - min) / scale);
}
};
// Scales samples to evenly-spaced bins
template <>
struct ScaleTransform<float>
{
int num_levels; // Number of levels in array
float max; // Max sample level (exclusive)
float min; // Min sample level (inclusive)
float scale; // Bin scaling factor
void Init(
int _num_levels, // Number of levels in array
float _max, // Max sample level (exclusive)
float _min, // Min sample level (inclusive)
float _scale) // Bin scaling factor
{
this->num_levels = _num_levels;
this->max = _max;
this->min = _min;
this->scale = 1.0f / _scale;
}
// Functor for converting samples to bin-ids (num_levels is returned if sample is out of range)
template <typename SampleT>
int operator()(SampleT sample)
{
if ((sample < min) || (sample >= max))
{
// Sample out of range
return num_levels;
}
return (int) ((((float) sample) - min) * scale);
}
};
/**
* Generate sample
*/
template <typename T, typename LevelT>
void Sample(T &datum, LevelT max_level, int entropy_reduction)
{
unsigned int max = (unsigned int) -1;
unsigned int bits;
RandomBits(bits, entropy_reduction);
float fraction = (float(bits) / max);
datum = (T) (fraction * max_level);
}
/**
* Initialize histogram samples
*/
template <
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename LevelT,
typename SampleT,
typename OffsetT>
void InitializeSamples(
LevelT max_level,
int entropy_reduction,
SampleT *h_samples,
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
// Initialize samples
for (OffsetT row = 0; row < num_rows; ++row)
{
for (OffsetT pixel = 0; pixel < num_row_pixels; ++pixel)
{
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
// Sample offset
OffsetT offset = (row * (row_stride_bytes / sizeof(SampleT))) + (pixel * NUM_CHANNELS) + channel;
// Init sample value
Sample(h_samples[offset], max_level, entropy_reduction);
if (g_verbose_input)
{
if (channel > 0) printf(", ");
std::cout << CoutCast(h_samples[offset]);
}
}
}
}
}
/**
* Initialize histogram solutions
*/
template <
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename SampleIteratorT,
typename TransformOp,
typename OffsetT>
void InitializeBins(
SampleIteratorT h_samples,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
TransformOp transform_op[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
CounterT *h_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT;
// Init bins
for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL)
{
for (int bin = 0; bin < num_levels[CHANNEL] - 1; ++bin)
{
h_histogram[CHANNEL][bin] = 0;
}
}
// Initialize samples
if (g_verbose_input) printf("Samples: \n");
for (OffsetT row = 0; row < num_rows; ++row)
{
for (OffsetT pixel = 0; pixel < num_row_pixels; ++pixel)
{
if (g_verbose_input) printf("[");
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
// Sample offset
OffsetT offset = (row * (row_stride_bytes / sizeof(SampleT))) + (pixel * NUM_CHANNELS) + channel;
// Update sample bin
int bin = transform_op[channel](h_samples[offset]);
if (g_verbose_input) printf(" (%d)", bin); fflush(stdout);
if ((bin >= 0) && (bin < num_levels[channel] - 1))
{
// valid bin
h_histogram[channel][bin]++;
}
}
if (g_verbose_input) printf("]");
}
if (g_verbose_input) printf("\n\n");
}
}
/**
* Test histogram-even
*/
template <
Backend BACKEND,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT,
typename SampleIteratorT>
void TestEven(
LevelT max_level,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
SampleIteratorT h_samples,
SampleIteratorT d_samples)
{
OffsetT total_samples = num_rows * (row_stride_bytes / sizeof(SampleT));
printf("\n----------------------------\n");
printf("%s cub::DeviceHistogramEven (%s) %d pixels (%d height, %d width, %d-byte row stride), %d %d-byte %s samples (entropy reduction %d), %s counters, %d/%d channels, max sample ",
(BACKEND == CDP) ? "CDP CUB" : "CUB",
(IsPointer<SampleIteratorT>::VALUE) ? "pointer" : "iterator",
(int) (num_row_pixels * num_rows),
(int) num_rows,
(int) num_row_pixels,
(int) row_stride_bytes,
(int) total_samples,
(int) sizeof(SampleT),
typeid(SampleT).name(),
entropy_reduction,
typeid(CounterT).name(),
NUM_ACTIVE_CHANNELS,
NUM_CHANNELS);
std::cout << CoutCast(max_level) << "\n";
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
std::cout << "\n\tChannel " << channel << ": " << num_levels[channel] - 1 << " bins [" << lower_level[channel] << ", " << upper_level[channel] << ")\n";
fflush(stdout);
// Allocate and initialize host and device data
typedef SampleT Foo; // rename type to quelch gcc warnings (bug?)
CounterT* h_histogram[NUM_ACTIVE_CHANNELS];
ScaleTransform<LevelT> transform_op[NUM_ACTIVE_CHANNELS];
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int bins = num_levels[channel] - 1;
h_histogram[channel] = new CounterT[bins];
transform_op[channel].Init(
num_levels[channel],
upper_level[channel],
lower_level[channel],
static_cast<LevelT>(((upper_level[channel] - lower_level[channel]) / bins)));
}
InitializeBins<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
h_samples, num_levels, transform_op, h_histogram, num_row_pixels, num_rows, row_stride_bytes);
// Allocate and initialize device data
CounterT* d_histogram[NUM_ACTIVE_CHANNELS];
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_histogram[channel], sizeof(CounterT) * (num_levels[channel] - 1)));
CubDebugExit(hipMemset(d_histogram[channel], 0, sizeof(CounterT) * (num_levels[channel] - 1)));
}
// Allocate CDP device arrays
size_t *d_temp_storage_bytes = NULL;
hipError_t *d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(hipError_t) * 1));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Even(
1, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes,
d_samples, d_histogram, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Allocate temporary storage with "canary" zones
int canary_bytes = 256;
char canary_token = 8;
char* canary_zone = new char[canary_bytes];
memset(canary_zone, canary_token, canary_bytes);
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes + (canary_bytes * 2)));
CubDebugExit(hipMemset(d_temp_storage, canary_token, temp_storage_bytes + (canary_bytes * 2)));
// Run warmup/correctness iteration
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Even(
1, d_temp_storage_bytes, d_cdp_error,
((char *) d_temp_storage) + canary_bytes, temp_storage_bytes,
d_samples, d_histogram, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Check canary zones
int error = CompareDeviceResults(canary_zone, (char *) d_temp_storage, canary_bytes, true, g_verbose);
AssertEquals(0, error);
error = CompareDeviceResults(canary_zone, ((char *) d_temp_storage) + canary_bytes + temp_storage_bytes, canary_bytes, true, g_verbose);
AssertEquals(0, error);
// Flush any stdout/stderr
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
fflush(stdout);
fflush(stderr);
// Check for correctness (and display results, if specified)
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int channel_error = CompareDeviceResults(h_histogram[channel], d_histogram[channel], num_levels[channel] - 1, true, g_verbose);
printf("\tChannel %d %s", channel, channel_error ? "FAIL" : "PASS\n");
error |= channel_error;
}
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Even(
g_timing_iterations, d_temp_storage_bytes, d_cdp_error,
((char *) d_temp_storage) + canary_bytes, temp_storage_bytes,
d_samples, d_histogram, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
0, false);
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(total_samples) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = giga_rate * sizeof(SampleT);
printf("\t%.3f avg ms, %.3f billion samples/s, %.3f billion bins/s, %.3f billion pixels/s, %.3f logical GB/s",
avg_millis,
giga_rate,
giga_rate * NUM_ACTIVE_CHANNELS / NUM_CHANNELS,
giga_rate / NUM_CHANNELS,
giga_bandwidth);
}
printf("\n\n");
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
if (h_histogram[channel])
delete[] h_histogram[channel];
if (d_histogram[channel])
CubDebugExit(g_allocator.DeviceFree(d_histogram[channel]));
}
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, error);
}
/**
* Test histogram-even (native pointer input)
*/
template <
Backend BACKEND,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestEvenNative(
LevelT max_level,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
OffsetT total_samples = num_rows * (row_stride_bytes / sizeof(SampleT));
// Allocate and initialize host sample data
typedef SampleT Foo; // rename type to quelch gcc warnings (bug?)
SampleT* h_samples = new Foo[total_samples];
InitializeSamples<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
max_level, entropy_reduction, h_samples, num_row_pixels, num_rows, row_stride_bytes);
// Allocate and initialize device data
SampleT* d_samples = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_samples, sizeof(SampleT) * total_samples));
CubDebugExit(hipMemcpy(d_samples, h_samples, sizeof(SampleT) * total_samples, hipMemcpyHostToDevice));
TestEven<BACKEND, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
h_samples, d_samples);
// Cleanup
if (h_samples) delete[] h_samples;
if (d_samples) CubDebugExit(g_allocator.DeviceFree(d_samples));
}
/**
* Test histogram-even (native pointer input)
*/
template <
Backend BACKEND,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestEvenIterator(
LevelT max_level,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
SampleT sample = (SampleT) lower_level[0];
ConstantInputIterator<SampleT> sample_itr(sample);
TestEven<BACKEND, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
sample_itr, sample_itr);
}
/**
* Test histogram-range
*/
template <
Backend BACKEND,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestRange(
LevelT max_level,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT* levels[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
OffsetT total_samples = num_rows * (row_stride_bytes / sizeof(SampleT));
printf("\n----------------------------\n");
printf("%s cub::DeviceHistogramRange %d pixels (%d height, %d width, %d-byte row stride), %d %d-byte %s samples (entropy reduction %d), %s counters, %d/%d channels, max sample ",
(BACKEND == CDP) ? "CDP CUB" : "CUB",
(int) (num_row_pixels * num_rows),
(int) num_rows,
(int) num_row_pixels,
(int) row_stride_bytes,
(int) total_samples,
(int) sizeof(SampleT),
typeid(SampleT).name(),
entropy_reduction,
typeid(CounterT).name(),
NUM_ACTIVE_CHANNELS,
NUM_CHANNELS);
std::cout << CoutCast(max_level) << "\n";
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
printf("Channel %d: %d bins [", channel, num_levels[channel] - 1);
std::cout << levels[channel][0];
for (int level = 1; level < num_levels[channel]; ++level)
std::cout << ", " << levels[channel][level];
printf("]\n");
}
fflush(stdout);
// Allocate and initialize host and device data
typedef SampleT Foo; // rename type to quelch gcc warnings (bug?)
SampleT* h_samples = new Foo[total_samples];
CounterT* h_histogram[NUM_ACTIVE_CHANNELS];
SearchTransform<LevelT> transform_op[NUM_ACTIVE_CHANNELS];
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
transform_op[channel].levels = levels[channel];
transform_op[channel].num_levels = num_levels[channel];
int bins = num_levels[channel] - 1;
h_histogram[channel] = new CounterT[bins];
}
InitializeSamples<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
max_level, entropy_reduction, h_samples, num_row_pixels, num_rows, row_stride_bytes);
InitializeBins<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
h_samples, num_levels, transform_op, h_histogram, num_row_pixels, num_rows, row_stride_bytes);
// Allocate and initialize device data
SampleT* d_samples = NULL;
LevelT* d_levels[NUM_ACTIVE_CHANNELS];
CounterT* d_histogram[NUM_ACTIVE_CHANNELS];
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_samples, sizeof(SampleT) * total_samples));
CubDebugExit(hipMemcpy(d_samples, h_samples, sizeof(SampleT) * total_samples, hipMemcpyHostToDevice));
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_levels[channel], sizeof(LevelT) * num_levels[channel]));
CubDebugExit(hipMemcpy(d_levels[channel], levels[channel], sizeof(LevelT) * num_levels[channel], hipMemcpyHostToDevice));
int bins = num_levels[channel] - 1;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_histogram[channel], sizeof(CounterT) * bins));
CubDebugExit(hipMemset(d_histogram[channel], 0, sizeof(CounterT) * bins));
}
// Allocate CDP device arrays
size_t *d_temp_storage_bytes = NULL;
hipError_t *d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(hipError_t) * 1));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Range(
1, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes,
d_samples,
d_histogram,
num_levels, d_levels,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Allocate temporary storage with "canary" zones
int canary_bytes = 256;
char canary_token = 9;
char* canary_zone = new char[canary_bytes];
memset(canary_zone, canary_token, canary_bytes);
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes + (canary_bytes * 2)));
CubDebugExit(hipMemset(d_temp_storage, canary_token, temp_storage_bytes + (canary_bytes * 2)));
// Run warmup/correctness iteration
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Range(
1, d_temp_storage_bytes, d_cdp_error,
((char *) d_temp_storage) + canary_bytes, temp_storage_bytes,
d_samples,
d_histogram,
num_levels, d_levels,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Check canary zones
int error = CompareDeviceResults(canary_zone, (char *) d_temp_storage, canary_bytes, true, g_verbose);
AssertEquals(0, error);
error = CompareDeviceResults(canary_zone, ((char *) d_temp_storage) + canary_bytes + temp_storage_bytes, canary_bytes, true, g_verbose);
AssertEquals(0, error);
// Flush any stdout/stderr
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
fflush(stdout);
fflush(stderr);
// Check for correctness (and display results, if specified)
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int channel_error = CompareDeviceResults(h_histogram[channel], d_histogram[channel], num_levels[channel] - 1, true, g_verbose);
printf("\tChannel %d %s", channel, channel_error ? "FAIL" : "PASS\n");
error |= channel_error;
}
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Range(
g_timing_iterations, d_temp_storage_bytes, d_cdp_error,
((char *) d_temp_storage) + canary_bytes, temp_storage_bytes,
d_samples,
d_histogram,
num_levels, d_levels,
num_row_pixels, num_rows, row_stride_bytes,
0, false);
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(total_samples) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = giga_rate * sizeof(SampleT);
printf("\t%.3f avg ms, %.3f billion samples/s, %.3f billion bins/s, %.3f billion pixels/s, %.3f logical GB/s",
avg_millis,
giga_rate,
giga_rate * NUM_ACTIVE_CHANNELS / NUM_CHANNELS,
giga_rate / NUM_CHANNELS,
giga_bandwidth);
}
printf("\n\n");
// Cleanup
if (h_samples) delete[] h_samples;
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
if (h_histogram[channel])
delete[] h_histogram[channel];
if (d_histogram[channel])
CubDebugExit(g_allocator.DeviceFree(d_histogram[channel]));
if (d_levels[channel])
CubDebugExit(g_allocator.DeviceFree(d_levels[channel]));
}
if (d_samples) CubDebugExit(g_allocator.DeviceFree(d_samples));
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, error);
}
/**
* Test histogram-even
*/
template <
Backend BACKEND,
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestEven(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS],
LevelT max_level,
int max_num_levels)
{
LevelT lower_level[NUM_ACTIVE_CHANNELS];
LevelT upper_level[NUM_ACTIVE_CHANNELS];
// Find smallest level increment
int max_bins = max_num_levels - 1;
LevelT min_level_increment = static_cast<LevelT>(max_level / max_bins);
// Set upper and lower levels for each channel
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int num_bins = num_levels[channel] - 1;
lower_level[channel] = static_cast<LevelT>((max_level - (num_bins * min_level_increment)) / 2);
upper_level[channel] = static_cast<LevelT>((max_level + (num_bins * min_level_increment)) / 2);
}
// Test pointer-based samples
TestEvenNative<BACKEND, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, lower_level, upper_level, num_row_pixels, num_rows, row_stride_bytes);
// Test iterator-based samples (CUB-only)
TestEvenIterator<CUB, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, lower_level, upper_level, num_row_pixels, num_rows, row_stride_bytes);
}
/**
* Test histogram-range
*/
template <
Backend BACKEND,
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestRange(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS],
LevelT max_level,
int max_num_levels)
{
// Find smallest level increment
int max_bins = max_num_levels - 1;
LevelT min_level_increment = max_level / max_bins;
LevelT* levels[NUM_ACTIVE_CHANNELS];
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
levels[channel] = new LevelT[num_levels[channel]];
int num_bins = num_levels[channel] - 1;
LevelT lower_level = (max_level - (num_bins * min_level_increment)) / 2;
for (int level = 0; level < num_levels[channel]; ++level)
levels[channel][level] = lower_level + (level * min_level_increment);
}
TestRange<BACKEND, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, levels, num_row_pixels, num_rows, row_stride_bytes);
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
delete[] levels[channel];
}
/**
* Test different entrypoints
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS],
LevelT max_level,
int max_num_levels)
{
TestEven<CUB, SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels);
TestRange<CUB, SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels);
}
/**
* Test different number of levels
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
int entropy_reduction,
LevelT max_level,
int max_num_levels)
{
int num_levels[NUM_ACTIVE_CHANNELS];
// Unnecessary testing
// // All the same level
// for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
// {
// num_levels[channel] = max_num_levels;
// }
// Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
// num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels);
// All different levels
num_levels[0] = max_num_levels;
for (int channel = 1; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
num_levels[channel] = (num_levels[channel - 1] / 2) + 1;
}
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels);
}
/**
* Test different entropy-levels
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
LevelT max_level,
int max_num_levels)
{
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, 0, max_level, max_num_levels);
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, -1, max_level, max_num_levels);
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, 5, max_level, max_num_levels);
}
/**
* Test different row strides
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
OffsetT num_row_pixels,
OffsetT num_rows,
LevelT max_level,
int max_num_levels)
{
OffsetT row_stride_bytes = num_row_pixels * NUM_CHANNELS * sizeof(SampleT);
// No padding
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, max_level, max_num_levels);
// 13 samples padding
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes + (13 * sizeof(SampleT)), max_level, max_num_levels);
}
/**
* Test different problem sizes
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
LevelT max_level,
int max_num_levels)
{
// 0 row/col images
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
OffsetT(1920), OffsetT(0), max_level, max_num_levels);
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
OffsetT(0), OffsetT(0), max_level, max_num_levels);
// 1080 image
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
OffsetT(1920), OffsetT(1080), max_level, max_num_levels);
// Sample different aspect ratios sizes
for (OffsetT rows = 1; rows < 1000000; rows *= 1000)
{
for (OffsetT cols = 1; cols < (1000000 / rows); cols *= 1000)
{
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
cols, rows, max_level, max_num_levels);
}
}
// Randomly select linear problem size between 1:10,000,000
unsigned int max_int = (unsigned int) -1;
for (int i = 0; i < 4; ++i)
{
unsigned int num_items;
RandomBits(num_items);
num_items = (unsigned int) ((double(num_items) * double(10000000)) / double(max_int));
num_items = CUB_MAX(1, num_items);
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
OffsetT(num_items), 1, max_level, max_num_levels);
}
}
/**
* Test different channel interleavings (valid specialiation)
*/
template <
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestChannels(
LevelT max_level,
int max_num_levels,
Int2Type<true> /*is_valid_tag*/)
{
Test<SampleT, 1, 1, CounterT, LevelT, OffsetT>(max_level, max_num_levels);
Test<SampleT, 4, 3, CounterT, LevelT, OffsetT>(max_level, max_num_levels);
Test<SampleT, 3, 3, CounterT, LevelT, OffsetT>(max_level, max_num_levels);
Test<SampleT, 4, 4, CounterT, LevelT, OffsetT>(max_level, max_num_levels);
}
/**
* Test different channel interleavings (invalid specialiation)
*/
template <
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestChannels(
LevelT /*max_level*/,
int /*max_num_levels*/,
Int2Type<false> /*is_valid_tag*/)
{}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_row_pixels = -1;
int entropy_reduction = 0;
int num_rows = 1;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
g_verbose_input = args.CheckCmdLineFlag("v2");
args.GetCmdLineArgument("n", num_row_pixels);
int row_stride_pixels = num_row_pixels;
args.GetCmdLineArgument("rows", num_rows);
args.GetCmdLineArgument("stride", row_stride_pixels);
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("repeat", g_repeat);
args.GetCmdLineArgument("entropy", entropy_reduction);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<pixels per row>] "
"[--rows=<number of rows>] "
"[--stride=<row stride in pixels>] "
"[--i=<timing iterations>] "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--entropy=<entropy-reduction factor (default 0)>]"
"[--v] "
"[--cdp]"
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Get ptx version
int ptx_version = 0;
CubDebugExit(PtxVersion(ptx_version));
if (num_row_pixels < 0)
{
num_row_pixels = 1920 * 1080;
row_stride_pixels = num_row_pixels;
}
#if defined(CUB_TEST_MINIMAL)
// Compile/run quick tests
{
// HistogramEven: unsigned char 256 bins
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramRange: signed char 256 bins
typedef signed char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestRange<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
#elif defined(CUB_TEST_BENCHMARK)
// Compile/run quick tests
{
// HistogramEven: unsigned char 256 bins
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: 4/4 multichannel Unsigned char 256 bins
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[4] = {257, 257, 257, 257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4;
TestEven<CUB, SampleT, 4, 4, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: 3/4 multichannel Unsigned char 256 bins
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[3] = {257, 257, 257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4;
TestEven<CUB, SampleT, 4, 3, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: short [0,1024] 256 bins
typedef unsigned short SampleT;
typedef unsigned short LevelT;
LevelT max_level = 1024;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: float [0,1.0] 256 bins
typedef float SampleT;
typedef float LevelT;
LevelT max_level = 1.0;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
#if !defined(__ICC)
// Fails with ICC for unknown reasons, see #332.
{
// HistogramEven: 3/4 multichannel float [0,1.0] 256 bins
typedef float SampleT;
typedef float LevelT;
LevelT max_level = 1.0;
int num_levels[3] = {257, 257, 257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4;
TestEven<CUB, SampleT, 4, 3, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
#endif
{
// HistogramRange: signed char 256 bins
typedef signed char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestRange<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramRange: 3/4 channel, unsigned char, varied bins (256, 128, 64)
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[3] = {257, 129, 65};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4;
TestRange<CUB, SampleT, 4, 3, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted
{
// HistogramEven: double [0,1.0] 64 bins
typedef double SampleT;
typedef double LevelT;
LevelT max_level = 1.0;
int num_levels[1] = {65};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: short [0,1024] 512 bins
typedef unsigned short SampleT;
typedef unsigned short LevelT;
LevelT max_level = 1024;
int num_levels[1] = {513};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
TestChannels <unsigned char, int, int, int>(256, 256 + 1, Int2Type<true>());
TestChannels <signed char, int, int, int>(256, 256 + 1, Int2Type<true>());
TestChannels <unsigned short, int, int, int>(128, 128 + 1, Int2Type<true>());
TestChannels <unsigned short, int, int, int>(8192, 8192 + 1, Int2Type<true>());
#if !defined(__ICC)
// Fails with ICC for unknown reasons, see #332.
TestChannels <float, int, float, int>(1.0, 256 + 1, Int2Type<true>());
#endif
// Test down-conversion of size_t offsets to int
TestChannels <unsigned char, int, int, long long>(256, 256 + 1, Int2Type<(sizeof(size_t) != sizeof(int))>());
}
#endif
return 0;
}
| 24b5c277382c74f89757bee7725c666179a2ceb0.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of DeviceHistogram utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <limits>
#include <algorithm>
#include <typeinfo>
#include <cub/util_allocator.cuh>
#include <cub/iterator/constant_input_iterator.cuh>
#include <cub/device/device_histogram.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
// Dispatch types
enum Backend
{
CUB, // CUB method
CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
};
bool g_verbose_input = false;
bool g_verbose = false;
int g_timing_iterations = 0;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
//---------------------------------------------------------------------
// Dispatch to different DeviceHistogram entrypoints
//---------------------------------------------------------------------
template <int NUM_ACTIVE_CHANNELS, int NUM_CHANNELS, int BACKEND>
struct Dispatch;
template <int NUM_ACTIVE_CHANNELS, int NUM_CHANNELS>
struct Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, CUB>
{
/**
* Dispatch to CUB multi histogram-range entrypoint
*/
template <typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT>
//CUB_RUNTIME_FUNCTION __forceinline__
static cudaError_t Range(
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples).
CounterT *(&d_histogram)[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int *num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT *(&d_levels)[NUM_ACTIVE_CHANNELS], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
cudaStream_t stream,
bool debug_synchronous)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceHistogram::MultiHistogramRange<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram,
num_levels,
d_levels,
num_row_pixels,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
return error;
}
/**
* Dispatch to CUB multi histogram-even entrypoint
*/
template <typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT>
//CUB_RUNTIME_FUNCTION __forceinline__
static cudaError_t Even(
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples).
CounterT *(&d_histogram)[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int *num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT *lower_level, ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT *upper_level, ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
cudaStream_t stream,
bool debug_synchronous)
{
typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT;
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceHistogram::MultiHistogramEven<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram,
num_levels,
lower_level,
upper_level,
num_row_pixels,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
return error;
}
};
template <>
struct Dispatch<1, 1, CUB>
{
/**
* Dispatch to CUB single histogram-range entrypoint
*/
template <typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT>
//CUB_RUNTIME_FUNCTION __forceinline__
static cudaError_t Range(
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples).
CounterT* (&d_histogram)[1], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int *num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT (&d_levels)[1], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
cudaStream_t stream,
bool debug_synchronous)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceHistogram::HistogramRange(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram[0],
num_levels[0],
d_levels[0],
num_row_pixels,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
return error;
}
/**
* Dispatch to CUB single histogram-even entrypoint
*/
template <typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT>
//CUB_RUNTIME_FUNCTION __forceinline__
static cudaError_t Even(
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples).
CounterT* (&d_histogram)[1], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int *num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT *lower_level, ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT *upper_level, ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
cudaStream_t stream,
bool debug_synchronous)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceHistogram::HistogramEven(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram[0],
num_levels[0],
lower_level[0],
upper_level[0],
num_row_pixels,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
return error;
}
};
//---------------------------------------------------------------------
// CUDA nested-parallelism test kernel
//---------------------------------------------------------------------
/**
* Simple wrapper kernel to invoke DeviceHistogram
* /
template <int BINS, int NUM_CHANNELS, int NUM_ACTIVE_CHANNELS, typename SampleT, typename SampleIteratorT, typename CounterT, int ALGORITHM>
__global__ void CnpDispatchKernel(
Int2Type<ALGORITHM> algorithm,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t temp_storage_bytes,
SampleT *d_samples,
SampleIteratorT d_sample_itr,
ArrayWrapper<CounterT*, NUM_ACTIVE_CHANNELS> d_out_histograms,
int num_samples,
bool debug_synchronous)
{
#ifndef CUB_CDP
*d_cdp_error = cudaErrorNotSupported;
#else
*d_cdp_error = Dispatch<BINS, NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(algorithm, Int2Type<false>(), timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_samples, d_sample_itr, d_out_histograms.array, num_samples, 0, debug_synchronous);
*d_temp_storage_bytes = temp_storage_bytes;
#endif
}
/ **
* Dispatch to CDP kernel
* /
template <int BINS, int NUM_CHANNELS, int NUM_ACTIVE_CHANNELS, typename SampleT, typename SampleIteratorT, typename CounterT, int ALGORITHM>
cudaError_t Dispatch(
Int2Type<ALGORITHM> algorithm,
Int2Type<true> use_cdp,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleT *d_samples,
SampleIteratorT d_sample_itr,
CounterT *d_histograms[NUM_ACTIVE_CHANNELS],
int num_samples,
cudaStream_t stream,
bool debug_synchronous)
{
// Setup array wrapper for histogram channel output (because we can't pass static arrays as kernel parameters)
ArrayWrapper<CounterT*, NUM_ACTIVE_CHANNELS> d_histo_wrapper;
for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL)
d_histo_wrapper.array[CHANNEL] = d_histograms[CHANNEL];
// Invoke kernel to invoke device-side dispatch
CnpDispatchKernel<BINS, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, ALGORITHM><<<1,1>>>(algorithm, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_samples, d_sample_itr, d_histo_wrapper, num_samples, debug_synchronous);
// Copy out temp_storage_bytes
CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost));
// Copy out error
cudaError_t retval;
CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost));
return retval;
}
*/
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
// Searches for bin given a list of bin-boundary levels
template <typename LevelT>
struct SearchTransform
{
LevelT *levels; // Pointer to levels array
int num_levels; // Number of levels in array
// Functor for converting samples to bin-ids (num_levels is returned if sample is out of range)
template <typename SampleT>
int operator()(SampleT sample)
{
int bin = int(std::upper_bound(levels, levels + num_levels, (LevelT) sample) - levels - 1);
if (bin < 0)
{
// Sample out of range
return num_levels;
}
return bin;
}
};
// Scales samples to evenly-spaced bins
template <typename LevelT>
struct ScaleTransform
{
int num_levels; // Number of levels in array
LevelT max; // Max sample level (exclusive)
LevelT min; // Min sample level (inclusive)
LevelT scale; // Bin scaling factor
void Init(
int num_levels_, // Number of levels in array
LevelT max_, // Max sample level (exclusive)
LevelT min_, // Min sample level (inclusive)
LevelT scale_) // Bin scaling factor
{
this->num_levels = num_levels_;
this->max = max_;
this->min = min_;
this->scale = scale_;
}
// Functor for converting samples to bin-ids (num_levels is returned if sample is out of range)
template <typename SampleT>
int operator()(SampleT sample)
{
if ((sample < min) || (sample >= max))
{
// Sample out of range
return num_levels;
}
return (int) ((((LevelT) sample) - min) / scale);
}
};
// Scales samples to evenly-spaced bins
template <>
struct ScaleTransform<float>
{
int num_levels; // Number of levels in array
float max; // Max sample level (exclusive)
float min; // Min sample level (inclusive)
float scale; // Bin scaling factor
void Init(
int _num_levels, // Number of levels in array
float _max, // Max sample level (exclusive)
float _min, // Min sample level (inclusive)
float _scale) // Bin scaling factor
{
this->num_levels = _num_levels;
this->max = _max;
this->min = _min;
this->scale = 1.0f / _scale;
}
// Functor for converting samples to bin-ids (num_levels is returned if sample is out of range)
template <typename SampleT>
int operator()(SampleT sample)
{
if ((sample < min) || (sample >= max))
{
// Sample out of range
return num_levels;
}
return (int) ((((float) sample) - min) * scale);
}
};
/**
* Generate sample
*/
template <typename T, typename LevelT>
void Sample(T &datum, LevelT max_level, int entropy_reduction)
{
unsigned int max = (unsigned int) -1;
unsigned int bits;
RandomBits(bits, entropy_reduction);
float fraction = (float(bits) / max);
datum = (T) (fraction * max_level);
}
/**
* Initialize histogram samples
*/
template <
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename LevelT,
typename SampleT,
typename OffsetT>
void InitializeSamples(
LevelT max_level,
int entropy_reduction,
SampleT *h_samples,
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
// Initialize samples
for (OffsetT row = 0; row < num_rows; ++row)
{
for (OffsetT pixel = 0; pixel < num_row_pixels; ++pixel)
{
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
// Sample offset
OffsetT offset = (row * (row_stride_bytes / sizeof(SampleT))) + (pixel * NUM_CHANNELS) + channel;
// Init sample value
Sample(h_samples[offset], max_level, entropy_reduction);
if (g_verbose_input)
{
if (channel > 0) printf(", ");
std::cout << CoutCast(h_samples[offset]);
}
}
}
}
}
/**
* Initialize histogram solutions
*/
template <
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename SampleIteratorT,
typename TransformOp,
typename OffsetT>
void InitializeBins(
SampleIteratorT h_samples,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
TransformOp transform_op[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
CounterT *h_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT;
// Init bins
for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL)
{
for (int bin = 0; bin < num_levels[CHANNEL] - 1; ++bin)
{
h_histogram[CHANNEL][bin] = 0;
}
}
// Initialize samples
if (g_verbose_input) printf("Samples: \n");
for (OffsetT row = 0; row < num_rows; ++row)
{
for (OffsetT pixel = 0; pixel < num_row_pixels; ++pixel)
{
if (g_verbose_input) printf("[");
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
// Sample offset
OffsetT offset = (row * (row_stride_bytes / sizeof(SampleT))) + (pixel * NUM_CHANNELS) + channel;
// Update sample bin
int bin = transform_op[channel](h_samples[offset]);
if (g_verbose_input) printf(" (%d)", bin); fflush(stdout);
if ((bin >= 0) && (bin < num_levels[channel] - 1))
{
// valid bin
h_histogram[channel][bin]++;
}
}
if (g_verbose_input) printf("]");
}
if (g_verbose_input) printf("\n\n");
}
}
/**
* Test histogram-even
*/
template <
Backend BACKEND,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT,
typename SampleIteratorT>
void TestEven(
LevelT max_level,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
SampleIteratorT h_samples,
SampleIteratorT d_samples)
{
OffsetT total_samples = num_rows * (row_stride_bytes / sizeof(SampleT));
printf("\n----------------------------\n");
printf("%s cub::DeviceHistogramEven (%s) %d pixels (%d height, %d width, %d-byte row stride), %d %d-byte %s samples (entropy reduction %d), %s counters, %d/%d channels, max sample ",
(BACKEND == CDP) ? "CDP CUB" : "CUB",
(IsPointer<SampleIteratorT>::VALUE) ? "pointer" : "iterator",
(int) (num_row_pixels * num_rows),
(int) num_rows,
(int) num_row_pixels,
(int) row_stride_bytes,
(int) total_samples,
(int) sizeof(SampleT),
typeid(SampleT).name(),
entropy_reduction,
typeid(CounterT).name(),
NUM_ACTIVE_CHANNELS,
NUM_CHANNELS);
std::cout << CoutCast(max_level) << "\n";
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
std::cout << "\n\tChannel " << channel << ": " << num_levels[channel] - 1 << " bins [" << lower_level[channel] << ", " << upper_level[channel] << ")\n";
fflush(stdout);
// Allocate and initialize host and device data
typedef SampleT Foo; // rename type to quelch gcc warnings (bug?)
CounterT* h_histogram[NUM_ACTIVE_CHANNELS];
ScaleTransform<LevelT> transform_op[NUM_ACTIVE_CHANNELS];
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int bins = num_levels[channel] - 1;
h_histogram[channel] = new CounterT[bins];
transform_op[channel].Init(
num_levels[channel],
upper_level[channel],
lower_level[channel],
static_cast<LevelT>(((upper_level[channel] - lower_level[channel]) / bins)));
}
InitializeBins<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
h_samples, num_levels, transform_op, h_histogram, num_row_pixels, num_rows, row_stride_bytes);
// Allocate and initialize device data
CounterT* d_histogram[NUM_ACTIVE_CHANNELS];
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_histogram[channel], sizeof(CounterT) * (num_levels[channel] - 1)));
CubDebugExit(cudaMemset(d_histogram[channel], 0, sizeof(CounterT) * (num_levels[channel] - 1)));
}
// Allocate CDP device arrays
size_t *d_temp_storage_bytes = NULL;
cudaError_t *d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Even(
1, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes,
d_samples, d_histogram, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Allocate temporary storage with "canary" zones
int canary_bytes = 256;
char canary_token = 8;
char* canary_zone = new char[canary_bytes];
memset(canary_zone, canary_token, canary_bytes);
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes + (canary_bytes * 2)));
CubDebugExit(cudaMemset(d_temp_storage, canary_token, temp_storage_bytes + (canary_bytes * 2)));
// Run warmup/correctness iteration
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Even(
1, d_temp_storage_bytes, d_cdp_error,
((char *) d_temp_storage) + canary_bytes, temp_storage_bytes,
d_samples, d_histogram, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Check canary zones
int error = CompareDeviceResults(canary_zone, (char *) d_temp_storage, canary_bytes, true, g_verbose);
AssertEquals(0, error);
error = CompareDeviceResults(canary_zone, ((char *) d_temp_storage) + canary_bytes + temp_storage_bytes, canary_bytes, true, g_verbose);
AssertEquals(0, error);
// Flush any stdout/stderr
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
fflush(stdout);
fflush(stderr);
// Check for correctness (and display results, if specified)
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int channel_error = CompareDeviceResults(h_histogram[channel], d_histogram[channel], num_levels[channel] - 1, true, g_verbose);
printf("\tChannel %d %s", channel, channel_error ? "FAIL" : "PASS\n");
error |= channel_error;
}
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Even(
g_timing_iterations, d_temp_storage_bytes, d_cdp_error,
((char *) d_temp_storage) + canary_bytes, temp_storage_bytes,
d_samples, d_histogram, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
0, false);
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(total_samples) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = giga_rate * sizeof(SampleT);
printf("\t%.3f avg ms, %.3f billion samples/s, %.3f billion bins/s, %.3f billion pixels/s, %.3f logical GB/s",
avg_millis,
giga_rate,
giga_rate * NUM_ACTIVE_CHANNELS / NUM_CHANNELS,
giga_rate / NUM_CHANNELS,
giga_bandwidth);
}
printf("\n\n");
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
if (h_histogram[channel])
delete[] h_histogram[channel];
if (d_histogram[channel])
CubDebugExit(g_allocator.DeviceFree(d_histogram[channel]));
}
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, error);
}
/**
* Test histogram-even (native pointer input)
*/
template <
Backend BACKEND,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestEvenNative(
LevelT max_level,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
OffsetT total_samples = num_rows * (row_stride_bytes / sizeof(SampleT));
// Allocate and initialize host sample data
typedef SampleT Foo; // rename type to quelch gcc warnings (bug?)
SampleT* h_samples = new Foo[total_samples];
InitializeSamples<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
max_level, entropy_reduction, h_samples, num_row_pixels, num_rows, row_stride_bytes);
// Allocate and initialize device data
SampleT* d_samples = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_samples, sizeof(SampleT) * total_samples));
CubDebugExit(cudaMemcpy(d_samples, h_samples, sizeof(SampleT) * total_samples, cudaMemcpyHostToDevice));
TestEven<BACKEND, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
h_samples, d_samples);
// Cleanup
if (h_samples) delete[] h_samples;
if (d_samples) CubDebugExit(g_allocator.DeviceFree(d_samples));
}
/**
* Test histogram-even (native pointer input)
*/
template <
Backend BACKEND,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestEvenIterator(
LevelT max_level,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
SampleT sample = (SampleT) lower_level[0];
ConstantInputIterator<SampleT> sample_itr(sample);
TestEven<BACKEND, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
sample_itr, sample_itr);
}
/**
* Test histogram-range
*/
template <
Backend BACKEND,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestRange(
LevelT max_level,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT* levels[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
OffsetT total_samples = num_rows * (row_stride_bytes / sizeof(SampleT));
printf("\n----------------------------\n");
printf("%s cub::DeviceHistogramRange %d pixels (%d height, %d width, %d-byte row stride), %d %d-byte %s samples (entropy reduction %d), %s counters, %d/%d channels, max sample ",
(BACKEND == CDP) ? "CDP CUB" : "CUB",
(int) (num_row_pixels * num_rows),
(int) num_rows,
(int) num_row_pixels,
(int) row_stride_bytes,
(int) total_samples,
(int) sizeof(SampleT),
typeid(SampleT).name(),
entropy_reduction,
typeid(CounterT).name(),
NUM_ACTIVE_CHANNELS,
NUM_CHANNELS);
std::cout << CoutCast(max_level) << "\n";
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
printf("Channel %d: %d bins [", channel, num_levels[channel] - 1);
std::cout << levels[channel][0];
for (int level = 1; level < num_levels[channel]; ++level)
std::cout << ", " << levels[channel][level];
printf("]\n");
}
fflush(stdout);
// Allocate and initialize host and device data
typedef SampleT Foo; // rename type to quelch gcc warnings (bug?)
SampleT* h_samples = new Foo[total_samples];
CounterT* h_histogram[NUM_ACTIVE_CHANNELS];
SearchTransform<LevelT> transform_op[NUM_ACTIVE_CHANNELS];
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
transform_op[channel].levels = levels[channel];
transform_op[channel].num_levels = num_levels[channel];
int bins = num_levels[channel] - 1;
h_histogram[channel] = new CounterT[bins];
}
InitializeSamples<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
max_level, entropy_reduction, h_samples, num_row_pixels, num_rows, row_stride_bytes);
InitializeBins<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
h_samples, num_levels, transform_op, h_histogram, num_row_pixels, num_rows, row_stride_bytes);
// Allocate and initialize device data
SampleT* d_samples = NULL;
LevelT* d_levels[NUM_ACTIVE_CHANNELS];
CounterT* d_histogram[NUM_ACTIVE_CHANNELS];
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_samples, sizeof(SampleT) * total_samples));
CubDebugExit(cudaMemcpy(d_samples, h_samples, sizeof(SampleT) * total_samples, cudaMemcpyHostToDevice));
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_levels[channel], sizeof(LevelT) * num_levels[channel]));
CubDebugExit(cudaMemcpy(d_levels[channel], levels[channel], sizeof(LevelT) * num_levels[channel], cudaMemcpyHostToDevice));
int bins = num_levels[channel] - 1;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_histogram[channel], sizeof(CounterT) * bins));
CubDebugExit(cudaMemset(d_histogram[channel], 0, sizeof(CounterT) * bins));
}
// Allocate CDP device arrays
size_t *d_temp_storage_bytes = NULL;
cudaError_t *d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Range(
1, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes,
d_samples,
d_histogram,
num_levels, d_levels,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Allocate temporary storage with "canary" zones
int canary_bytes = 256;
char canary_token = 9;
char* canary_zone = new char[canary_bytes];
memset(canary_zone, canary_token, canary_bytes);
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes + (canary_bytes * 2)));
CubDebugExit(cudaMemset(d_temp_storage, canary_token, temp_storage_bytes + (canary_bytes * 2)));
// Run warmup/correctness iteration
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Range(
1, d_temp_storage_bytes, d_cdp_error,
((char *) d_temp_storage) + canary_bytes, temp_storage_bytes,
d_samples,
d_histogram,
num_levels, d_levels,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Check canary zones
int error = CompareDeviceResults(canary_zone, (char *) d_temp_storage, canary_bytes, true, g_verbose);
AssertEquals(0, error);
error = CompareDeviceResults(canary_zone, ((char *) d_temp_storage) + canary_bytes + temp_storage_bytes, canary_bytes, true, g_verbose);
AssertEquals(0, error);
// Flush any stdout/stderr
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
fflush(stdout);
fflush(stderr);
// Check for correctness (and display results, if specified)
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int channel_error = CompareDeviceResults(h_histogram[channel], d_histogram[channel], num_levels[channel] - 1, true, g_verbose);
printf("\tChannel %d %s", channel, channel_error ? "FAIL" : "PASS\n");
error |= channel_error;
}
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Range(
g_timing_iterations, d_temp_storage_bytes, d_cdp_error,
((char *) d_temp_storage) + canary_bytes, temp_storage_bytes,
d_samples,
d_histogram,
num_levels, d_levels,
num_row_pixels, num_rows, row_stride_bytes,
0, false);
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(total_samples) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = giga_rate * sizeof(SampleT);
printf("\t%.3f avg ms, %.3f billion samples/s, %.3f billion bins/s, %.3f billion pixels/s, %.3f logical GB/s",
avg_millis,
giga_rate,
giga_rate * NUM_ACTIVE_CHANNELS / NUM_CHANNELS,
giga_rate / NUM_CHANNELS,
giga_bandwidth);
}
printf("\n\n");
// Cleanup
if (h_samples) delete[] h_samples;
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
if (h_histogram[channel])
delete[] h_histogram[channel];
if (d_histogram[channel])
CubDebugExit(g_allocator.DeviceFree(d_histogram[channel]));
if (d_levels[channel])
CubDebugExit(g_allocator.DeviceFree(d_levels[channel]));
}
if (d_samples) CubDebugExit(g_allocator.DeviceFree(d_samples));
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, error);
}
/**
* Test histogram-even
*/
template <
Backend BACKEND,
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestEven(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS],
LevelT max_level,
int max_num_levels)
{
LevelT lower_level[NUM_ACTIVE_CHANNELS];
LevelT upper_level[NUM_ACTIVE_CHANNELS];
// Find smallest level increment
int max_bins = max_num_levels - 1;
LevelT min_level_increment = static_cast<LevelT>(max_level / max_bins);
// Set upper and lower levels for each channel
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int num_bins = num_levels[channel] - 1;
lower_level[channel] = static_cast<LevelT>((max_level - (num_bins * min_level_increment)) / 2);
upper_level[channel] = static_cast<LevelT>((max_level + (num_bins * min_level_increment)) / 2);
}
// Test pointer-based samples
TestEvenNative<BACKEND, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, lower_level, upper_level, num_row_pixels, num_rows, row_stride_bytes);
// Test iterator-based samples (CUB-only)
TestEvenIterator<CUB, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, lower_level, upper_level, num_row_pixels, num_rows, row_stride_bytes);
}
/**
* Test histogram-range
*/
template <
Backend BACKEND,
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestRange(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS],
LevelT max_level,
int max_num_levels)
{
// Find smallest level increment
int max_bins = max_num_levels - 1;
LevelT min_level_increment = max_level / max_bins;
LevelT* levels[NUM_ACTIVE_CHANNELS];
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
levels[channel] = new LevelT[num_levels[channel]];
int num_bins = num_levels[channel] - 1;
LevelT lower_level = (max_level - (num_bins * min_level_increment)) / 2;
for (int level = 0; level < num_levels[channel]; ++level)
levels[channel][level] = lower_level + (level * min_level_increment);
}
TestRange<BACKEND, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, levels, num_row_pixels, num_rows, row_stride_bytes);
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
delete[] levels[channel];
}
/**
* Test different entrypoints
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS],
LevelT max_level,
int max_num_levels)
{
TestEven<CUB, SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels);
TestRange<CUB, SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels);
}
/**
* Test different number of levels
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
int entropy_reduction,
LevelT max_level,
int max_num_levels)
{
int num_levels[NUM_ACTIVE_CHANNELS];
// Unnecessary testing
// // All the same level
// for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
// {
// num_levels[channel] = max_num_levels;
// }
// Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
// num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels);
// All different levels
num_levels[0] = max_num_levels;
for (int channel = 1; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
num_levels[channel] = (num_levels[channel - 1] / 2) + 1;
}
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels);
}
/**
* Test different entropy-levels
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
LevelT max_level,
int max_num_levels)
{
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, 0, max_level, max_num_levels);
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, -1, max_level, max_num_levels);
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, 5, max_level, max_num_levels);
}
/**
* Test different row strides
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
OffsetT num_row_pixels,
OffsetT num_rows,
LevelT max_level,
int max_num_levels)
{
OffsetT row_stride_bytes = num_row_pixels * NUM_CHANNELS * sizeof(SampleT);
// No padding
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, max_level, max_num_levels);
// 13 samples padding
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes + (13 * sizeof(SampleT)), max_level, max_num_levels);
}
/**
* Test different problem sizes
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
LevelT max_level,
int max_num_levels)
{
// 0 row/col images
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
OffsetT(1920), OffsetT(0), max_level, max_num_levels);
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
OffsetT(0), OffsetT(0), max_level, max_num_levels);
// 1080 image
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
OffsetT(1920), OffsetT(1080), max_level, max_num_levels);
// Sample different aspect ratios sizes
for (OffsetT rows = 1; rows < 1000000; rows *= 1000)
{
for (OffsetT cols = 1; cols < (1000000 / rows); cols *= 1000)
{
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
cols, rows, max_level, max_num_levels);
}
}
// Randomly select linear problem size between 1:10,000,000
unsigned int max_int = (unsigned int) -1;
for (int i = 0; i < 4; ++i)
{
unsigned int num_items;
RandomBits(num_items);
num_items = (unsigned int) ((double(num_items) * double(10000000)) / double(max_int));
num_items = CUB_MAX(1, num_items);
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
OffsetT(num_items), 1, max_level, max_num_levels);
}
}
/**
* Test different channel interleavings (valid specialiation)
*/
template <
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestChannels(
LevelT max_level,
int max_num_levels,
Int2Type<true> /*is_valid_tag*/)
{
Test<SampleT, 1, 1, CounterT, LevelT, OffsetT>(max_level, max_num_levels);
Test<SampleT, 4, 3, CounterT, LevelT, OffsetT>(max_level, max_num_levels);
Test<SampleT, 3, 3, CounterT, LevelT, OffsetT>(max_level, max_num_levels);
Test<SampleT, 4, 4, CounterT, LevelT, OffsetT>(max_level, max_num_levels);
}
/**
* Test different channel interleavings (invalid specialiation)
*/
template <
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestChannels(
LevelT /*max_level*/,
int /*max_num_levels*/,
Int2Type<false> /*is_valid_tag*/)
{}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_row_pixels = -1;
int entropy_reduction = 0;
int num_rows = 1;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
g_verbose_input = args.CheckCmdLineFlag("v2");
args.GetCmdLineArgument("n", num_row_pixels);
int row_stride_pixels = num_row_pixels;
args.GetCmdLineArgument("rows", num_rows);
args.GetCmdLineArgument("stride", row_stride_pixels);
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("repeat", g_repeat);
args.GetCmdLineArgument("entropy", entropy_reduction);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<pixels per row>] "
"[--rows=<number of rows>] "
"[--stride=<row stride in pixels>] "
"[--i=<timing iterations>] "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--entropy=<entropy-reduction factor (default 0)>]"
"[--v] "
"[--cdp]"
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Get ptx version
int ptx_version = 0;
CubDebugExit(PtxVersion(ptx_version));
if (num_row_pixels < 0)
{
num_row_pixels = 1920 * 1080;
row_stride_pixels = num_row_pixels;
}
#if defined(CUB_TEST_MINIMAL)
// Compile/run quick tests
{
// HistogramEven: unsigned char 256 bins
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramRange: signed char 256 bins
typedef signed char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestRange<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
#elif defined(CUB_TEST_BENCHMARK)
// Compile/run quick tests
{
// HistogramEven: unsigned char 256 bins
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: 4/4 multichannel Unsigned char 256 bins
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[4] = {257, 257, 257, 257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4;
TestEven<CUB, SampleT, 4, 4, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: 3/4 multichannel Unsigned char 256 bins
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[3] = {257, 257, 257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4;
TestEven<CUB, SampleT, 4, 3, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: short [0,1024] 256 bins
typedef unsigned short SampleT;
typedef unsigned short LevelT;
LevelT max_level = 1024;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: float [0,1.0] 256 bins
typedef float SampleT;
typedef float LevelT;
LevelT max_level = 1.0;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
#if !defined(__ICC)
// Fails with ICC for unknown reasons, see #332.
{
// HistogramEven: 3/4 multichannel float [0,1.0] 256 bins
typedef float SampleT;
typedef float LevelT;
LevelT max_level = 1.0;
int num_levels[3] = {257, 257, 257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4;
TestEven<CUB, SampleT, 4, 3, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
#endif
{
// HistogramRange: signed char 256 bins
typedef signed char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestRange<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramRange: 3/4 channel, unsigned char, varied bins (256, 128, 64)
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[3] = {257, 129, 65};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4;
TestRange<CUB, SampleT, 4, 3, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted
{
// HistogramEven: double [0,1.0] 64 bins
typedef double SampleT;
typedef double LevelT;
LevelT max_level = 1.0;
int num_levels[1] = {65};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: short [0,1024] 512 bins
typedef unsigned short SampleT;
typedef unsigned short LevelT;
LevelT max_level = 1024;
int num_levels[1] = {513};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
TestChannels <unsigned char, int, int, int>(256, 256 + 1, Int2Type<true>());
TestChannels <signed char, int, int, int>(256, 256 + 1, Int2Type<true>());
TestChannels <unsigned short, int, int, int>(128, 128 + 1, Int2Type<true>());
TestChannels <unsigned short, int, int, int>(8192, 8192 + 1, Int2Type<true>());
#if !defined(__ICC)
// Fails with ICC for unknown reasons, see #332.
TestChannels <float, int, float, int>(1.0, 256 + 1, Int2Type<true>());
#endif
// Test down-conversion of size_t offsets to int
TestChannels <unsigned char, int, int, long long>(256, 256 + 1, Int2Type<(sizeof(size_t) != sizeof(int))>());
}
#endif
return 0;
}
|
7322efec6d77113889cb90d67488da812a97e695.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
int n = 10;
double *h_a;
double *h_b;
double *h_c;
double *d_a;
double *d_b;
double *d_c;
size_t bytes = n*sizeof(double);
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
int i;
for( i = 0; i < n; i++ ) {
h_a[i] = i;
h_b[i] = i;
}
hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 100;
gridSize = (int)ceil((float)n/blockSize);
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost );
double sum = 0;
for(i=0; i<n; i++)
printf(" %f + %f =%f\n",h_a[i],h_b[i],h_c[i]);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
| 7322efec6d77113889cb90d67488da812a97e695.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
int n = 10;
double *h_a;
double *h_b;
double *h_c;
double *d_a;
double *d_b;
double *d_c;
size_t bytes = n*sizeof(double);
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
for( i = 0; i < n; i++ ) {
h_a[i] = i;
h_b[i] = i;
}
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 100;
gridSize = (int)ceil((float)n/blockSize);
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
double sum = 0;
for(i=0; i<n; i++)
printf(" %f + %f =%f\n",h_a[i],h_b[i],h_c[i]);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
3c2dfbd4122c778bd73c4a50ace58a93a6264a29.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <iostream>
#define DTYPE float
__global__ void kernel(float *a, float *x, float* buff,int Xblocks,int size)
{
int i=threadIdx.x+blockIdx.x*blockDim.x;
int j=threadIdx.y+blockIdx.y*blockDim.y;
if (i<size && j<size)
{
float addsc=a[i+j*size]*x[i];
atomicAdd(&buff[j],addsc);
}
}
void fillA(DTYPE *a, int size)
{
for (int i=0;i<size*size;i++)
a[i]=1.0;
}
//X mit Werten fllen
void fillX(DTYPE *x, int size)
{
for (int i=0;i<size;i++){
x[i]=1;
// x[i]= (DTYPE)(i+1);
}
}
int main(int argc, char**argv)
{
int sx=32;
int sy=32;
int i=1;
bool standard=true;
if (argc>1)
{
standard=false;
sx=atoi(argv[1]);
if (argc>2){
sy=atoi(argv[2]);
if(argc>3){
i=atoi(argv[3]);
}
}
}
if(standard){
std::cout<<"Do experiment with standard settings"<<std::endl;
std::cout<<"Sx="<<sx<<"\nSy="<<sy<<"\nSize=1024*"<<i<<std::endl;
}else
{
std::cout<<"Do experiment with individual settings"<<std::endl;
std::cout<<"Sx="<<sx<<"\nSy="<<sy<<"\nSize=1024*"<<i<<std::endl;
}
/*if(sx*sy!=t){
std::cout<<"Sx*Sy has to be equal to threads per block"<<std::endl;
return -1;
}*/
int size=1024*i;
int xblocks=size/sx;
//Datenfelder anlegen fr Host
DTYPE *a_host, *buff_host, *x_host;
//und Device
DTYPE *a_dev, *buff_dev,*x_dev;
//Events fr die Zeitmessung
hipEvent_t start,end;
//Zeiten:
float kernelA_time=0.0;
//TODO: Host Speicher anlegen und A und x fllen
a_host = (DTYPE*)malloc(size*size*sizeof(DTYPE));
x_host = (DTYPE*)malloc(size*sizeof(DTYPE));
buff_host=(DTYPE*)malloc(size*sizeof(DTYPE));
fillA(a_host,size);
fillX(x_host,size);
//TODO: CUDA Events erstellen
//TODO: CUDA Speicher anlegen fr alle Arrays (a_dev,x_dev,y_dev)
hipMalloc((void**)&a_dev,size*size*sizeof(DTYPE));
hipMalloc((void**)&x_dev,size*sizeof(DTYPE));
hipMalloc((void**)&buff_dev,size*sizeof(DTYPE));
//TODO: Host->Device Memcpy von A und x + Zeitmessung
//hipMemcpy(a_dev,a_host,1*sizeof(DTYPE),hipMemcpyHostToDevice);
//hipMemcpy(x_dev,x_host,1*sizeof(DTYPE),hipMemcpyHostToDevice);
hipMemcpy(a_dev,a_host,size*size*sizeof(DTYPE),hipMemcpyHostToDevice);
hipMemcpy(x_dev,x_host,size*sizeof(DTYPE),hipMemcpyHostToDevice);
//hipMemcpy(buff_dev,buff_host,1*sizeof(DTYPE),hipMemcpyHostToDevice);
//Konfiguration der CUDA Kernels
dim3 block(sx,sy);
dim3 grid(size/block.x,size/block.y);
//cache Konfiguration
if(argc>4){
if(atoi(argv[4])==1){//L1 Prefered
std::cout<<"16 kB shared, 48kB L1"<<std::endl;
hipFuncSetCacheConfig(kernel, hipFuncCachePreferL1);
} else if(atoi(argv[4])==2){
std::cout<<"48kB shared, 16kb L1"<<std::endl;
hipFuncSetCacheConfig(kernel, hipFuncCachePreferShared);
}else{
std::cout<<"32kB shared, 32kB L1"<<std::endl;
hipFuncSetCacheConfig(kernel, hipFuncCachePreferNone);
}
}
//TODO: kernelAx ausfhren und Zeit messen
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start,0);
hipLaunchKernelGGL(( kernel), dim3(grid),dim3(block), 0, 0, a_dev,x_dev,buff_dev,xblocks,size);//,y_dev,size);
hipEventRecord(end,0);
hipEventSynchronize(end);
hipEventElapsedTime(&kernelA_time,start,end);
if(argc>5){
hipMemcpy(buff_host,buff_dev,size*sizeof(DTYPE),hipMemcpyDeviceToHost);
for(int lj=0;lj<10;lj++){
std::cout<<buff_host[lj]<<std::endl;
}
}
std::cout<<"Computation time: "<<kernelA_time<<std::endl;
float gflops=pow(10,-6)*size*size*2/kernelA_time;
std::cout<<"Computation Performance in GFLOPs: "<<gflops<<std::endl;
return 0;
}
| 3c2dfbd4122c778bd73c4a50ace58a93a6264a29.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <iostream>
#define DTYPE float
__global__ void kernel(float *a, float *x, float* buff,int Xblocks,int size)
{
int i=threadIdx.x+blockIdx.x*blockDim.x;
int j=threadIdx.y+blockIdx.y*blockDim.y;
if (i<size && j<size)
{
float addsc=a[i+j*size]*x[i];
atomicAdd(&buff[j],addsc);
}
}
void fillA(DTYPE *a, int size)
{
for (int i=0;i<size*size;i++)
a[i]=1.0;
}
//X mit Werten füllen
void fillX(DTYPE *x, int size)
{
for (int i=0;i<size;i++){
x[i]=1;
// x[i]= (DTYPE)(i+1);
}
}
int main(int argc, char**argv)
{
int sx=32;
int sy=32;
int i=1;
bool standard=true;
if (argc>1)
{
standard=false;
sx=atoi(argv[1]);
if (argc>2){
sy=atoi(argv[2]);
if(argc>3){
i=atoi(argv[3]);
}
}
}
if(standard){
std::cout<<"Do experiment with standard settings"<<std::endl;
std::cout<<"Sx="<<sx<<"\nSy="<<sy<<"\nSize=1024*"<<i<<std::endl;
}else
{
std::cout<<"Do experiment with individual settings"<<std::endl;
std::cout<<"Sx="<<sx<<"\nSy="<<sy<<"\nSize=1024*"<<i<<std::endl;
}
/*if(sx*sy!=t){
std::cout<<"Sx*Sy has to be equal to threads per block"<<std::endl;
return -1;
}*/
int size=1024*i;
int xblocks=size/sx;
//Datenfelder anlegen für Host
DTYPE *a_host, *buff_host, *x_host;
//und Device
DTYPE *a_dev, *buff_dev,*x_dev;
//Events für die Zeitmessung
cudaEvent_t start,end;
//Zeiten:
float kernelA_time=0.0;
//TODO: Host Speicher anlegen und A und x füllen
a_host = (DTYPE*)malloc(size*size*sizeof(DTYPE));
x_host = (DTYPE*)malloc(size*sizeof(DTYPE));
buff_host=(DTYPE*)malloc(size*sizeof(DTYPE));
fillA(a_host,size);
fillX(x_host,size);
//TODO: CUDA Events erstellen
//TODO: CUDA Speicher anlegen für alle Arrays (a_dev,x_dev,y_dev)
cudaMalloc((void**)&a_dev,size*size*sizeof(DTYPE));
cudaMalloc((void**)&x_dev,size*sizeof(DTYPE));
cudaMalloc((void**)&buff_dev,size*sizeof(DTYPE));
//TODO: Host->Device Memcpy von A und x + Zeitmessung
//cudaMemcpy(a_dev,a_host,1*sizeof(DTYPE),cudaMemcpyHostToDevice);
//cudaMemcpy(x_dev,x_host,1*sizeof(DTYPE),cudaMemcpyHostToDevice);
cudaMemcpy(a_dev,a_host,size*size*sizeof(DTYPE),cudaMemcpyHostToDevice);
cudaMemcpy(x_dev,x_host,size*sizeof(DTYPE),cudaMemcpyHostToDevice);
//cudaMemcpy(buff_dev,buff_host,1*sizeof(DTYPE),cudaMemcpyHostToDevice);
//Konfiguration der CUDA Kernels
dim3 block(sx,sy);
dim3 grid(size/block.x,size/block.y);
//cache Konfiguration
if(argc>4){
if(atoi(argv[4])==1){//L1 Prefered
std::cout<<"16 kB shared, 48kB L1"<<std::endl;
cudaFuncSetCacheConfig(kernel, cudaFuncCachePreferL1);
} else if(atoi(argv[4])==2){
std::cout<<"48kB shared, 16kb L1"<<std::endl;
cudaFuncSetCacheConfig(kernel, cudaFuncCachePreferShared);
}else{
std::cout<<"32kB shared, 32kB L1"<<std::endl;
cudaFuncSetCacheConfig(kernel, cudaFuncCachePreferNone);
}
}
//TODO: kernelAx ausführen und Zeit messen
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start,0);
kernel<<<grid,block>>>(a_dev,x_dev,buff_dev,xblocks,size);//,y_dev,size);
cudaEventRecord(end,0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&kernelA_time,start,end);
if(argc>5){
cudaMemcpy(buff_host,buff_dev,size*sizeof(DTYPE),cudaMemcpyDeviceToHost);
for(int lj=0;lj<10;lj++){
std::cout<<buff_host[lj]<<std::endl;
}
}
std::cout<<"Computation time: "<<kernelA_time<<std::endl;
float gflops=pow(10,-6)*size*size*2/kernelA_time;
std::cout<<"Computation Performance in GFLOPs: "<<gflops<<std::endl;
return 0;
}
|
14df90d83ff554562d2e470f41258e8fbbc5c953.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
@author Weifeng Liu
*/
// CSC Sync-Free SpTRSM kernel
// see paper by W. Liu, A. Li, J. D. Hogg, I. S. Duff, and B. Vinter. (2016).
// "A Synchronization-Free Algorithm for Parallel Sparse Triangular Solves".
// 22nd International European Conference on Parallel and Distributed Computing
// (Euro-Par '16). pp. 617-630.
#include "magmasparse_internal.h"
#include "atomicopsmagmaDoubleComplex.h"
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#define MAGMA_CSC_SYNCFREE_WARP_SIZE 32
#define MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD 0
#define MAGMA_CSC_SYNCFREE_SUBSTITUTION_BACKWARD 1
#define MAGMA_CSC_SYNCFREE_OPT_WARP_NNZ 1
#define MAGMA_CSC_SYNCFREE_OPT_WARP_RHS 2
#define MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO 3
__global__
void sptrsv_syncfree_analyser(magmaIndex_ptr d_cscRowIdx,
magmaDoubleComplex_ptr d_cscVal,
magma_int_t m,
magma_int_t nnz,
magmaIndex_ptr d_graphInDegree)
{
const int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < nnz)
{
atomicAdd(&d_graphInDegree[d_cscRowIdx[global_id]], 1);
}
}
__global__
void sptrsm_syncfree_executor(magmaIndex_ptr d_cscColPtr,
magmaIndex_ptr d_cscRowIdx,
magmaDoubleComplex_ptr d_cscVal,
magmaIndex_ptr d_graphInDegree,
magma_int_t m,
magma_int_t substitution,
magma_int_t rhs,
magma_int_t opt,
magmaDoubleComplex_ptr d_b,
magmaDoubleComplex_ptr d_x)
{
const int global_id = blockIdx.x * blockDim.x + threadIdx.x;
int global_x_id = global_id / MAGMA_CSC_SYNCFREE_WARP_SIZE;
if (global_x_id >= m) return;
// substitution is forward or backward
global_x_id = substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
global_x_id : m - 1 - global_x_id;
// Initialize
const int lane_id = (MAGMA_CSC_SYNCFREE_WARP_SIZE - 1) & threadIdx.x;
// Prefetch
const int pos = substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id] : d_cscColPtr[global_x_id+1]-1;
const magmaDoubleComplex one = MAGMA_Z_MAKE( 1.0, 0.0);
const magmaDoubleComplex coef = one / d_cscVal[pos];
/*
// clock_t start;
// Consumer
do {
start = clock();
}
while (1 != d_graphInDegree[global_x_id]);
// Consumer
int graphInDegree;
do {
//bypass Tex cache and avoid other mem optimization by nvcc/ptxas
asm("ld.global.u32 %0, [%1];" : "=r"(graphInDegree),"=r"(d_graphInDegree[global_x_id]) :: "memory");
}
while (1 != graphInDegree );
*/
for (int k = lane_id; k < rhs; k += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const int pos = global_x_id * rhs + k;
d_x[pos] = (d_b[pos] - d_x[pos]) * coef;
}
// Producer
const magma_index_t start_ptr =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id]+1 : d_cscColPtr[global_x_id];
const magma_index_t stop_ptr =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id+1] : d_cscColPtr[global_x_id+1]-1;
if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_NNZ)
{
for (magma_index_t jj = start_ptr + lane_id;
jj < stop_ptr; jj += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = 0; k < rhs; k++)
atomicAddmagmaDoubleComplex(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_RHS)
{
for (magma_index_t jj = start_ptr; jj < stop_ptr; jj++)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = lane_id;
k < rhs; k+=MAGMA_CSC_SYNCFREE_WARP_SIZE)
atomicAddmagmaDoubleComplex(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
if (!lane_id) atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO)
{
const magma_index_t len = stop_ptr - start_ptr;
if ((len <= rhs || rhs > 8) && len < 2048)
{
for (magma_index_t jj = start_ptr; jj < stop_ptr; jj++)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = lane_id;
k < rhs; k+=MAGMA_CSC_SYNCFREE_WARP_SIZE)
atomicAddmagmaDoubleComplex(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
if (!lane_id) atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else
{
for (magma_index_t jj = start_ptr + lane_id;
jj < stop_ptr; jj += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = 0; k < rhs; k++)
atomicAddmagmaDoubleComplex(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
}
}
extern "C" magma_int_t
magma_zgecscsyncfreetrsm_analysis(
magma_int_t m,
magma_int_t nnz,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolptr,
magmaIndex_ptr drowind,
magmaIndex_ptr dgraphindegree,
magmaIndex_ptr dgraphindegree_bak,
magma_queue_t queue )
{
int info = MAGMA_SUCCESS;
int num_threads = 128;
int num_blocks = ceil ((double)nnz / (double)num_threads);
hipMemset(dgraphindegree, 0, m * sizeof(magma_index_t));
hipLaunchKernelGGL(( sptrsv_syncfree_analyser), dim3(num_blocks), dim3(num_threads) , 0, 0,
drowind, dval, m, nnz, dgraphindegree);
// backup in-degree array
hipMemcpy(dgraphindegree_bak, dgraphindegree,
m * sizeof(int), hipMemcpyDeviceToDevice);
return info;
}
extern "C" magma_int_t
magma_zgecscsyncfreetrsm_solve(
magma_int_t m,
magma_int_t nnz,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolptr,
magmaIndex_ptr drowind,
magmaIndex_ptr dgraphindegree,
magmaIndex_ptr dgraphindegree_bak,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr db,
magma_int_t substitution,
magma_int_t rhs,
magma_queue_t queue )
{
int info = MAGMA_SUCCESS;
// get an unmodified in-degree array, only for benchmarking use
hipMemcpy(dgraphindegree, dgraphindegree_bak,
m * sizeof(magma_index_t), hipMemcpyDeviceToDevice);
// clear d_x for atomic operations
hipMemset(dx, 0, sizeof(magmaDoubleComplex) * m * rhs);
int num_threads, num_blocks;
num_threads = 4 * MAGMA_CSC_SYNCFREE_WARP_SIZE;
num_blocks = ceil ((double)m /
(double)(num_threads/MAGMA_CSC_SYNCFREE_WARP_SIZE));
hipLaunchKernelGGL(( sptrsm_syncfree_executor), dim3(num_blocks), dim3(num_threads) , 0, 0,
dcolptr, drowind, dval, dgraphindegree,
m, substitution, rhs, MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO,
db, dx);
return info;
}
| 14df90d83ff554562d2e470f41258e8fbbc5c953.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
@author Weifeng Liu
*/
// CSC Sync-Free SpTRSM kernel
// see paper by W. Liu, A. Li, J. D. Hogg, I. S. Duff, and B. Vinter. (2016).
// "A Synchronization-Free Algorithm for Parallel Sparse Triangular Solves".
// 22nd International European Conference on Parallel and Distributed Computing
// (Euro-Par '16). pp. 617-630.
#include "magmasparse_internal.h"
#include "atomicopsmagmaDoubleComplex.h"
#include <cuda.h> // for CUDA_VERSION
#define MAGMA_CSC_SYNCFREE_WARP_SIZE 32
#define MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD 0
#define MAGMA_CSC_SYNCFREE_SUBSTITUTION_BACKWARD 1
#define MAGMA_CSC_SYNCFREE_OPT_WARP_NNZ 1
#define MAGMA_CSC_SYNCFREE_OPT_WARP_RHS 2
#define MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO 3
__global__
void sptrsv_syncfree_analyser(magmaIndex_ptr d_cscRowIdx,
magmaDoubleComplex_ptr d_cscVal,
magma_int_t m,
magma_int_t nnz,
magmaIndex_ptr d_graphInDegree)
{
const int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < nnz)
{
atomicAdd(&d_graphInDegree[d_cscRowIdx[global_id]], 1);
}
}
__global__
void sptrsm_syncfree_executor(magmaIndex_ptr d_cscColPtr,
magmaIndex_ptr d_cscRowIdx,
magmaDoubleComplex_ptr d_cscVal,
magmaIndex_ptr d_graphInDegree,
magma_int_t m,
magma_int_t substitution,
magma_int_t rhs,
magma_int_t opt,
magmaDoubleComplex_ptr d_b,
magmaDoubleComplex_ptr d_x)
{
const int global_id = blockIdx.x * blockDim.x + threadIdx.x;
int global_x_id = global_id / MAGMA_CSC_SYNCFREE_WARP_SIZE;
if (global_x_id >= m) return;
// substitution is forward or backward
global_x_id = substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
global_x_id : m - 1 - global_x_id;
// Initialize
const int lane_id = (MAGMA_CSC_SYNCFREE_WARP_SIZE - 1) & threadIdx.x;
// Prefetch
const int pos = substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id] : d_cscColPtr[global_x_id+1]-1;
const magmaDoubleComplex one = MAGMA_Z_MAKE( 1.0, 0.0);
const magmaDoubleComplex coef = one / d_cscVal[pos];
/*
// clock_t start;
// Consumer
do {
start = clock();
}
while (1 != d_graphInDegree[global_x_id]);
// Consumer
int graphInDegree;
do {
//bypass Tex cache and avoid other mem optimization by nvcc/ptxas
asm("ld.global.u32 %0, [%1];" : "=r"(graphInDegree),"=r"(d_graphInDegree[global_x_id]) :: "memory");
}
while (1 != graphInDegree );
*/
for (int k = lane_id; k < rhs; k += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const int pos = global_x_id * rhs + k;
d_x[pos] = (d_b[pos] - d_x[pos]) * coef;
}
// Producer
const magma_index_t start_ptr =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id]+1 : d_cscColPtr[global_x_id];
const magma_index_t stop_ptr =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id+1] : d_cscColPtr[global_x_id+1]-1;
if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_NNZ)
{
for (magma_index_t jj = start_ptr + lane_id;
jj < stop_ptr; jj += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = 0; k < rhs; k++)
atomicAddmagmaDoubleComplex(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_RHS)
{
for (magma_index_t jj = start_ptr; jj < stop_ptr; jj++)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = lane_id;
k < rhs; k+=MAGMA_CSC_SYNCFREE_WARP_SIZE)
atomicAddmagmaDoubleComplex(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
if (!lane_id) atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO)
{
const magma_index_t len = stop_ptr - start_ptr;
if ((len <= rhs || rhs > 8) && len < 2048)
{
for (magma_index_t jj = start_ptr; jj < stop_ptr; jj++)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = lane_id;
k < rhs; k+=MAGMA_CSC_SYNCFREE_WARP_SIZE)
atomicAddmagmaDoubleComplex(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
if (!lane_id) atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else
{
for (magma_index_t jj = start_ptr + lane_id;
jj < stop_ptr; jj += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = 0; k < rhs; k++)
atomicAddmagmaDoubleComplex(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
}
}
extern "C" magma_int_t
magma_zgecscsyncfreetrsm_analysis(
magma_int_t m,
magma_int_t nnz,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolptr,
magmaIndex_ptr drowind,
magmaIndex_ptr dgraphindegree,
magmaIndex_ptr dgraphindegree_bak,
magma_queue_t queue )
{
int info = MAGMA_SUCCESS;
int num_threads = 128;
int num_blocks = ceil ((double)nnz / (double)num_threads);
cudaMemset(dgraphindegree, 0, m * sizeof(magma_index_t));
sptrsv_syncfree_analyser<<< num_blocks, num_threads >>>
(drowind, dval, m, nnz, dgraphindegree);
// backup in-degree array
cudaMemcpy(dgraphindegree_bak, dgraphindegree,
m * sizeof(int), cudaMemcpyDeviceToDevice);
return info;
}
extern "C" magma_int_t
magma_zgecscsyncfreetrsm_solve(
magma_int_t m,
magma_int_t nnz,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolptr,
magmaIndex_ptr drowind,
magmaIndex_ptr dgraphindegree,
magmaIndex_ptr dgraphindegree_bak,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr db,
magma_int_t substitution,
magma_int_t rhs,
magma_queue_t queue )
{
int info = MAGMA_SUCCESS;
// get an unmodified in-degree array, only for benchmarking use
cudaMemcpy(dgraphindegree, dgraphindegree_bak,
m * sizeof(magma_index_t), cudaMemcpyDeviceToDevice);
// clear d_x for atomic operations
cudaMemset(dx, 0, sizeof(magmaDoubleComplex) * m * rhs);
int num_threads, num_blocks;
num_threads = 4 * MAGMA_CSC_SYNCFREE_WARP_SIZE;
num_blocks = ceil ((double)m /
(double)(num_threads/MAGMA_CSC_SYNCFREE_WARP_SIZE));
sptrsm_syncfree_executor<<< num_blocks, num_threads >>>
(dcolptr, drowind, dval, dgraphindegree,
m, substitution, rhs, MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO,
db, dx);
return info;
}
|
e286d0971a065c77fe5cda298f690e310f839fc1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// -1/target probability if target = 1.0, 0.0 otherwise
__global__ void backwardLogisticLossKernel (float *predictions, float *targets, float *result)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
result[globalId] = targets[globalId] * -(1.0/predictions[globalId]);
} | e286d0971a065c77fe5cda298f690e310f839fc1.cu | // -1/target probability if target = 1.0, 0.0 otherwise
__global__ void backwardLogisticLossKernel (float *predictions, float *targets, float *result)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
result[globalId] = targets[globalId] * -(1.0/predictions[globalId]);
} |
4e96cc4813c885b4e6a34efe21e3983f161999fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/utilities/block_utils.cuh>
#include "gpuinflate.h"
namespace cudf {
namespace io {
// Not supporting streams longer than this (not what snappy is intended for)
#define SNAPPY_MAX_STREAM_SIZE 0x7fffffff
#define LOG2_BATCH_SIZE 5
#define BATCH_SIZE (1 << LOG2_BATCH_SIZE)
#define LOG2_BATCH_COUNT 2
#define BATCH_COUNT (1 << LOG2_BATCH_COUNT)
#define LOG2_PREFETCH_SIZE 9
#define PREFETCH_SIZE (1 << LOG2_PREFETCH_SIZE) // 512B, in 32B chunks
#define LOG_CYCLECOUNT 0
/**
* @brief Describes a single LZ77 symbol (single entry in batch)
**/
struct unsnap_batch_s {
int32_t len; // 1..64 = Number of bytes
uint32_t
offset; // copy distance if greater than zero or negative of literal offset in byte stream
};
/**
* @brief Queue structure used to exchange data between warps
**/
struct unsnap_queue_s {
uint32_t prefetch_wrpos; ///< Prefetcher write position
uint32_t prefetch_rdpos; ///< Prefetch consumer read position
int32_t prefetch_end; ///< Prefetch enable flag (nonzero stops prefetcher)
int32_t batch_len[BATCH_COUNT]; ///< Length of each batch - <0:end, 0:not ready, >0:symbol count
unsnap_batch_s batch[BATCH_COUNT * BATCH_SIZE]; ///< LZ77 batch data
uint8_t buf[PREFETCH_SIZE]; ///< Prefetch buffer
};
/**
* @brief snappy decompression state
**/
struct unsnap_state_s {
const uint8_t *base; ///< base ptr of compressed stream
const uint8_t *end; ///< end of compressed stream
uint32_t uncompressed_size; ///< uncompressed stream size
uint32_t bytes_left; ///< bytes to uncompressed remaining
int32_t error; ///< current error status
uint32_t tstart; ///< start time for perf logging
volatile unsnap_queue_s q; ///< queue for cross-warp communication
gpu_inflate_input_s in; ///< input parameters for current block
};
/**
* @brief prefetches data for the symbol decoding stage
*
* @param s decompression state
* @param t warp lane id
**/
__device__ void snappy_prefetch_bytestream(unsnap_state_s *s, int t)
{
const uint8_t *base = s->base;
uint32_t end = (uint32_t)(s->end - base);
uint32_t align_bytes = (uint32_t)(0x20 - (0x1f & reinterpret_cast<uintptr_t>(base)));
int32_t pos = min(align_bytes, end);
int32_t blen;
// Start by prefetching up to the next a 32B-aligned location
if (t < pos) { s->q.buf[t] = base[t]; }
blen = 0;
do {
SYNCWARP();
if (!t) {
uint32_t minrdpos;
s->q.prefetch_wrpos = pos;
minrdpos = pos - min(pos, PREFETCH_SIZE - 32u);
blen = (int)min(32u, end - pos);
for (;;) {
uint32_t rdpos = s->q.prefetch_rdpos;
if (rdpos >= minrdpos) break;
if (s->q.prefetch_end) {
blen = 0;
break;
}
NANOSLEEP(100);
}
}
blen = SHFL0(blen);
if (t < blen) { s->q.buf[(pos + t) & (PREFETCH_SIZE - 1)] = base[pos + t]; }
pos += blen;
} while (blen > 0);
}
/**
* @brief Lookup table for get_len3_mask()
*
* Indexed by a 10-bit pattern, contains the corresponding 4-bit mask of
* 3-byte code lengths in the lower 4 bits, along with the total number of
* bytes used for coding the four lengths in the upper 4 bits.
* The upper 4-bit value could also be obtained by 8+__popc(mask4)
*
* for (uint32_t k = 0; k < 1024; k++)
* {
* for (uint32_t i = 0, v = 0, b = k, n = 0; i < 4; i++)
* {
* v |= (b & 1) << i;
* n += (b & 1) + 2;
* b >>= (b & 1) + 2;
* }
* k_len3lut[k] = v | (n << 4);
* }
*
**/
static const uint8_t __device__ __constant__ k_len3lut[1 << 10] = {
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xb7, 0x94, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xb7, 0xac, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xb7, 0x94, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xb7, 0xac, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xcf, 0x94, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xcf, 0xac, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xcf, 0x94, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xcf, 0xac, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf};
/**
* @brief Returns a 32-bit mask where 1 means 3-byte code length and 0 means 2-byte
* code length, given an input mask of up to 96 bits.
*
* Implemented by doing 8 consecutive lookups, building the result 4-bit at a time
**/
inline __device__ uint32_t get_len3_mask(uint32_t v0, uint32_t v1, uint32_t v2)
{
uint32_t m, v, m4, n;
v = v0;
m4 = k_len3lut[v & 0x3ff];
m = m4 & 0xf;
n = m4 >> 4; // 8..12
v = v0 >> n;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 4;
n += m4 >> 4; // 16..24
v = __funnelshift_r(v0, v1, n);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 8;
n += m4 >> 4; // 24..36
v >>= (m4 >> 4);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 12;
n = (n + (m4 >> 4)) & 0x1f; // (32..48) % 32 = 0..16
v1 = __funnelshift_r(v1, v2, n);
v2 >>= n;
v = v1;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 16;
n = m4 >> 4; // 8..12
v = v1 >> n;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 20;
n += m4 >> 4; // 16..24
v = __funnelshift_r(v1, v2, n);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 24;
n += m4 >> 4; // 24..36
v >>= (m4 >> 4);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 28;
return m;
}
/**
* @brief Returns a 32-bit mask where each 2-bit pair contains the symbol length
* minus 2, given two input masks each containing bit0 or bit1 of the corresponding
* code length minus 2 for up to 32 bytes
**/
inline __device__ uint32_t get_len5_mask(uint32_t v0, uint32_t v1)
{
uint32_t m;
m = (v1 & 1) * 2 + (v0 & 1);
v0 >>= (m + 2);
v1 >>= (m + 1);
for (uint32_t i = 1; i < 16; i++) {
uint32_t m2 = (v1 & 2) | (v0 & 1);
uint32_t n = m2 + 2;
m |= m2 << (i * 2);
v0 >>= n;
v1 >>= n;
}
return m;
}
#define READ_BYTE(pos) s->q.buf[(pos) & (PREFETCH_SIZE - 1)]
/**
* @brief decode symbols and output LZ77 batches (single-warp)
*
* @param s decompression state
* @param t warp lane id
**/
__device__ void snappy_decode_symbols(unsnap_state_s *s, uint32_t t)
{
uint32_t cur = 0;
uint32_t end = static_cast<uint32_t>(s->end - s->base);
uint32_t bytes_left = s->uncompressed_size;
uint32_t dst_pos = 0;
int32_t batch = 0;
for (;;) {
int32_t batch_len;
volatile unsnap_batch_s *b;
// Wait for prefetcher
if (t == 0) {
s->q.prefetch_rdpos = cur;
#pragma unroll(1) // We don't want unrolling here
while (s->q.prefetch_wrpos < min(cur + 5 * BATCH_SIZE, end)) { NANOSLEEP(50); }
b = &s->q.batch[batch * BATCH_SIZE];
}
// Process small symbols in parallel: for data that does not get good compression,
// the stream will consist of a large number of short literals (1-byte or 2-byte)
// followed by short repeat runs. This results in many 2-byte or 3-byte symbols
// that can all be decoded in parallel once we know the symbol length.
{
uint32_t v0, v1, v2, len3_mask, cur_t, is_long_sym, short_sym_mask;
uint32_t b0;
cur = SHFL0(cur);
cur_t = cur + t;
b0 = READ_BYTE(cur_t);
v0 = BALLOT((b0 == 4) || (b0 & 2));
b0 = READ_BYTE(cur_t + 32);
v1 = BALLOT((b0 == 4) || (b0 & 2));
b0 = READ_BYTE(cur_t + 64);
v2 = BALLOT((b0 == 4) || (b0 & 2));
len3_mask = SHFL0((t == 0) ? get_len3_mask(v0, v1, v2) : 0);
cur_t = cur + 2 * t + __popc(len3_mask & ((1 << t) - 1));
b0 = READ_BYTE(cur_t);
is_long_sym = ((b0 & ~4) != 0) && (((b0 + 1) & 2) == 0);
short_sym_mask = BALLOT(is_long_sym);
batch_len = 0;
b = reinterpret_cast<volatile unsnap_batch_s *>(SHFL0(reinterpret_cast<uintptr_t>(b)));
if (!(short_sym_mask & 1)) {
batch_len = SHFL0((t == 0) ? (short_sym_mask) ? __ffs(short_sym_mask) - 1 : 32 : 0);
if (batch_len != 0) {
uint32_t blen = 0;
int32_t ofs = 0;
if (t < batch_len) {
blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1);
ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | READ_BYTE(cur_t + 1)
: (b0 & 2) ? READ_BYTE(cur_t + 1) | (READ_BYTE(cur_t + 2) << 8)
: -(int32_t)(cur_t + 1);
b[t].len = blen;
b[t].offset = ofs;
ofs += blen; // for correct out-of-range detection below
}
blen = WarpReducePos32(blen, t);
bytes_left = SHFL0(bytes_left);
dst_pos = SHFL0(dst_pos);
short_sym_mask = __ffs(BALLOT(blen > bytes_left || ofs > (int32_t)(dst_pos + blen)));
if (short_sym_mask != 0) { batch_len = min(batch_len, short_sym_mask - 1); }
if (batch_len != 0) {
blen = SHFL(blen, batch_len - 1);
cur = SHFL(cur_t, batch_len - 1) + 2 + ((len3_mask >> (batch_len - 1)) & 1);
if (t == 0) {
dst_pos += blen;
bytes_left -= blen;
}
}
}
}
// Check if the batch was stopped by a 3-byte or 4-byte literal
if (batch_len < BATCH_SIZE - 2 && SHFL(b0 & ~4, batch_len) == 8) {
// If so, run a slower version of the above that can also handle 3/4-byte literal sequences
uint32_t batch_add;
do {
uint32_t clen, mask_t;
cur_t = cur + t;
b0 = READ_BYTE(cur_t);
clen = (b0 & 3) ? (b0 & 2) ? 1 : 0 : (b0 >> 2); // symbol length minus 2
v0 = BALLOT(clen & 1);
v1 = BALLOT((clen >> 1) & 1);
len3_mask = SHFL0((t == 0) ? get_len5_mask(v0, v1) : 0);
mask_t = (1 << (2 * t)) - 1;
cur_t = cur + 2 * t + 2 * __popc((len3_mask & 0xaaaaaaaa) & mask_t) +
__popc((len3_mask & 0x55555555) & mask_t);
b0 = READ_BYTE(cur_t);
is_long_sym = ((b0 & 3) ? ((b0 & 3) == 3) : (b0 > 3 * 4)) || (cur_t >= cur + 32) ||
(batch_len + t >= BATCH_SIZE);
batch_add = __ffs(BALLOT(is_long_sym)) - 1;
if (batch_add != 0) {
uint32_t blen = 0;
int32_t ofs = 0;
if (t < batch_add) {
blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1);
ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | READ_BYTE(cur_t + 1)
: (b0 & 2) ? READ_BYTE(cur_t + 1) | (READ_BYTE(cur_t + 2) << 8)
: -(int32_t)(cur_t + 1);
b[batch_len + t].len = blen;
b[batch_len + t].offset = ofs;
ofs += blen; // for correct out-of-range detection below
}
blen = WarpReducePos32(blen, t);
bytes_left = SHFL0(bytes_left);
dst_pos = SHFL0(dst_pos);
short_sym_mask = __ffs(BALLOT(blen > bytes_left || ofs > (int32_t)(dst_pos + blen)));
if (short_sym_mask != 0) { batch_add = min(batch_add, short_sym_mask - 1); }
if (batch_add != 0) {
blen = SHFL(blen, batch_add - 1);
cur = SHFL(cur_t, batch_add - 1) + 2 + ((len3_mask >> ((batch_add - 1) * 2)) & 3);
if (t == 0) {
dst_pos += blen;
bytes_left -= blen;
}
batch_len += batch_add;
}
}
} while (batch_add >= 6 && batch_len < BATCH_SIZE - 2);
}
}
if (t == 0) {
while (bytes_left > 0 && batch_len < BATCH_SIZE) {
uint32_t blen, offset;
uint8_t b0 = READ_BYTE(cur);
if (b0 & 3) {
uint8_t b1 = READ_BYTE(cur + 1);
if (!(b0 & 2)) {
// xxxxxx01.oooooooo: copy with 3-bit length, 11-bit offset
offset = ((b0 & 0xe0) << 3) | b1;
blen = ((b0 >> 2) & 7) + 4;
cur += 2;
} else {
// xxxxxx1x: copy with 6-bit length, 2-byte or 4-byte offset
offset = b1 | (READ_BYTE(cur + 2) << 8);
if (b0 & 1) // 4-byte offset
{
offset |= (READ_BYTE(cur + 3) << 16) | (READ_BYTE(cur + 4) << 24);
cur += 5;
} else {
cur += 3;
}
blen = (b0 >> 2) + 1;
}
dst_pos += blen;
if (offset - 1u >= dst_pos || bytes_left < blen) break;
bytes_left -= blen;
} else if (b0 < 4 * 4) {
// 0000xx00: short literal
blen = (b0 >> 2) + 1;
offset = -(int32_t)(cur + 1);
cur += 1 + blen;
dst_pos += blen;
if (bytes_left < blen) break;
bytes_left -= blen;
} else {
// xxxxxx00: literal
blen = b0 >> 2;
if (blen >= 60) {
uint32_t num_bytes = blen - 59;
blen = READ_BYTE(cur + 1);
if (num_bytes > 1) {
blen |= READ_BYTE(cur + 2) << 8;
if (num_bytes > 2) {
blen |= READ_BYTE(cur + 3) << 16;
if (num_bytes > 3) { blen |= READ_BYTE(cur + 4) << 24; }
}
}
cur += num_bytes;
}
cur += 1;
blen += 1;
offset = -(int32_t)cur;
cur += blen;
// Wait for prefetcher
s->q.prefetch_rdpos = cur;
#pragma unroll(1) // We don't want unrolling here
while (s->q.prefetch_wrpos < min(cur + 5 * BATCH_SIZE, end)) { NANOSLEEP(50); }
dst_pos += blen;
if (bytes_left < blen) break;
bytes_left -= blen;
}
b[batch_len].len = blen;
b[batch_len].offset = offset;
batch_len++;
}
if (batch_len != 0) {
s->q.batch_len[batch] = batch_len;
batch = (batch + 1) & (BATCH_COUNT - 1);
}
}
batch_len = SHFL0(batch_len);
if (t == 0) {
while (s->q.batch_len[batch] != 0) { NANOSLEEP(100); }
}
if (batch_len != BATCH_SIZE) { break; }
}
if (!t) {
s->q.prefetch_end = 1;
s->q.batch_len[batch] = -1;
s->bytes_left = bytes_left;
if (bytes_left != 0) { s->error = -2; }
}
}
/**
* @brief process LZ77 symbols and output uncompressed stream
*
* @param s decompression state
* @param t thread id within participating group (lane id)
*
* NOTE: No error checks at this stage (WARP0 responsible for not sending offsets and lengths that
*would result in out-of-bounds accesses)
**/
__device__ void snappy_process_symbols(unsnap_state_s *s, int t)
{
const uint8_t *literal_base = s->base;
uint8_t *out = reinterpret_cast<uint8_t *>(s->in.dstDevice);
int batch = 0;
do {
volatile unsnap_batch_s *b = &s->q.batch[batch * BATCH_SIZE];
int32_t batch_len, blen_t, dist_t;
if (t == 0) {
while ((batch_len = s->q.batch_len[batch]) == 0) { NANOSLEEP(100); }
} else {
batch_len = 0;
}
batch_len = SHFL0(batch_len);
if (batch_len <= 0) { break; }
if (t < batch_len) {
blen_t = b[t].len;
dist_t = b[t].offset;
} else {
blen_t = dist_t = 0;
}
// Try to combine as many small entries as possible, but try to avoid doing that
// if we see a small repeat distance 8 bytes or less
if (SHFL0(min((uint32_t)dist_t, (uint32_t)SHFL_XOR(dist_t, 1))) > 8) {
uint32_t n;
do {
uint32_t bofs = WarpReducePos32(blen_t, t);
uint32_t stop_mask = BALLOT((uint32_t)dist_t < bofs);
uint32_t start_mask = WarpReduceSum32((bofs < 32 && t < batch_len) ? 1 << bofs : 0);
n = min(min((uint32_t)__popc(start_mask), (uint32_t)(__ffs(stop_mask) - 1u)),
(uint32_t)batch_len);
if (n != 0) {
uint32_t it = __popc(start_mask & ((2 << t) - 1));
uint32_t tr = t - SHFL(bofs - blen_t, it);
int32_t dist = SHFL(dist_t, it);
if (it < n) {
const uint8_t *src = (dist > 0) ? (out + t - dist) : (literal_base + tr - dist);
out[t] = *src;
}
out += SHFL(bofs, n - 1);
blen_t = SHFL(blen_t, (n + t) & 0x1f);
dist_t = SHFL(dist_t, (n + t) & 0x1f);
batch_len -= n;
}
} while (n >= 4);
}
for (int i = 0; i < batch_len; i++) {
int32_t blen = SHFL(blen_t, i);
int32_t dist = SHFL(dist_t, i);
int32_t blen2 = (i + 1 < batch_len) ? SHFL(blen_t, i + 1) : 32;
// Try to combine consecutive small entries if they are independent
if ((uint32_t)dist >= (uint32_t)blen && blen + blen2 <= 32) {
int32_t dist2 = SHFL(dist_t, i + 1);
if ((uint32_t)dist2 >= (uint32_t)(blen + blen2)) {
int32_t d;
if (t < blen) {
d = dist;
} else {
dist = dist2;
d = (dist2 <= 0) ? dist2 + blen : dist2;
}
blen += blen2;
if (t < blen) {
const uint8_t *src = (dist > 0) ? (out - d) : (literal_base - d);
out[t] = src[t];
}
out += blen;
i++;
continue;
}
}
if (dist > 0) {
// Copy
uint8_t b0, b1;
if (t < blen) {
uint32_t pos = t;
const uint8_t *src = out + ((pos >= dist) ? (pos % dist) : pos) - dist;
b0 = *src;
}
if (32 + t < blen) {
uint32_t pos = 32 + t;
const uint8_t *src = out + ((pos >= dist) ? (pos % dist) : pos) - dist;
b1 = *src;
}
if (t < blen) { out[t] = b0; }
if (32 + t < blen) { out[32 + t] = b1; }
} else {
// Literal
uint8_t b0, b1;
dist = -dist;
while (blen >= 64) {
b0 = literal_base[dist + t];
b1 = literal_base[dist + 32 + t];
out[t] = b0;
out[32 + t] = b1;
dist += 64;
out += 64;
blen -= 64;
}
if (t < blen) { b0 = literal_base[dist + t]; }
if (32 + t < blen) { b1 = literal_base[dist + 32 + t]; }
if (t < blen) { out[t] = b0; }
if (32 + t < blen) { out[32 + t] = b1; }
}
out += blen;
}
SYNCWARP();
if (t == 0) { s->q.batch_len[batch] = 0; }
batch = (batch + 1) & (BATCH_COUNT - 1);
} while (1);
}
/**
* @brief Snappy decompression kernel
* See http://github.com/google/snappy/blob/master/format_description.txt
*
* blockDim {128,1,1}
*
* @param[in] inputs Source & destination information per block
* @param[out] outputs Decompression status per block
**/
extern "C" __global__ void __launch_bounds__(128)
unsnap_kernel(gpu_inflate_input_s *inputs, gpu_inflate_status_s *outputs)
{
__shared__ __align__(16) unsnap_state_s state_g;
int t = threadIdx.x;
unsnap_state_s *s = &state_g;
int strm_id = blockIdx.x;
if (t < sizeof(gpu_inflate_input_s) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->in)[t] =
reinterpret_cast<const uint32_t *>(&inputs[strm_id])[t];
__threadfence_block();
}
if (t < BATCH_COUNT) { s->q.batch_len[t] = 0; }
__syncthreads();
if (!t) {
const uint8_t *cur = reinterpret_cast<const uint8_t *>(s->in.srcDevice);
const uint8_t *end = cur + s->in.srcSize;
s->error = 0;
#if LOG_CYCLECOUNT
s->tstart = clock();
#endif
if (cur < end) {
// Read uncompressed size (varint), limited to 32-bit
uint32_t uncompressed_size = *cur++;
if (uncompressed_size > 0x7f) {
uint32_t c = (cur < end) ? *cur++ : 0;
uncompressed_size = (uncompressed_size & 0x7f) | (c << 7);
if (uncompressed_size >= (0x80 << 7)) {
c = (cur < end) ? *cur++ : 0;
uncompressed_size = (uncompressed_size & ((0x7f << 7) | 0x7f)) | (c << 14);
if (uncompressed_size >= (0x80 << 14)) {
c = (cur < end) ? *cur++ : 0;
uncompressed_size =
(uncompressed_size & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | (c << 21);
if (uncompressed_size >= (0x80 << 21)) {
c = (cur < end) ? *cur++ : 0;
if (c < 0x8)
uncompressed_size =
(uncompressed_size & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) |
(c << 28);
else
s->error = -1;
}
}
}
}
s->uncompressed_size = uncompressed_size;
s->bytes_left = uncompressed_size;
s->base = cur;
s->end = end;
if ((cur >= end && uncompressed_size != 0) || (uncompressed_size > s->in.dstSize)) {
s->error = -1;
}
} else {
s->error = -1;
}
s->q.prefetch_end = 0;
s->q.prefetch_wrpos = 0;
s->q.prefetch_rdpos = 0;
}
__syncthreads();
if (!s->error) {
if (t < 32) {
// WARP0: decode lengths and offsets
snappy_decode_symbols(s, t);
} else if (t < 64) {
// WARP1: prefetch byte stream for WARP0
snappy_prefetch_bytestream(s, t & 0x1f);
} else if (t < 96) {
// WARP2: LZ77
snappy_process_symbols(s, t & 0x1f);
}
__syncthreads();
}
if (!t) {
outputs[strm_id].bytes_written = s->uncompressed_size - s->bytes_left;
outputs[strm_id].status = s->error;
#if LOG_CYCLECOUNT
outputs[strm_id].reserved = clock() - s->tstart;
#else
outputs[strm_id].reserved = 0;
#endif
}
}
hipError_t __host__ gpu_unsnap(gpu_inflate_input_s *inputs,
gpu_inflate_status_s *outputs,
int count,
hipStream_t stream)
{
uint32_t count32 = (count > 0) ? count : 0;
dim3 dim_block(128, 1); // 4 warps per stream, 1 stream per block
dim3 dim_grid(count32, 1); // TODO: Check max grid dimensions vs max expected count
hipLaunchKernelGGL(( unsnap_kernel), dim3(dim_grid), dim3(dim_block), 0, stream, inputs, outputs);
return hipSuccess;
}
} // namespace io
} // namespace cudf
| 4e96cc4813c885b4e6a34efe21e3983f161999fa.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/utilities/block_utils.cuh>
#include "gpuinflate.h"
namespace cudf {
namespace io {
// Not supporting streams longer than this (not what snappy is intended for)
#define SNAPPY_MAX_STREAM_SIZE 0x7fffffff
#define LOG2_BATCH_SIZE 5
#define BATCH_SIZE (1 << LOG2_BATCH_SIZE)
#define LOG2_BATCH_COUNT 2
#define BATCH_COUNT (1 << LOG2_BATCH_COUNT)
#define LOG2_PREFETCH_SIZE 9
#define PREFETCH_SIZE (1 << LOG2_PREFETCH_SIZE) // 512B, in 32B chunks
#define LOG_CYCLECOUNT 0
/**
* @brief Describes a single LZ77 symbol (single entry in batch)
**/
struct unsnap_batch_s {
int32_t len; // 1..64 = Number of bytes
uint32_t
offset; // copy distance if greater than zero or negative of literal offset in byte stream
};
/**
* @brief Queue structure used to exchange data between warps
**/
struct unsnap_queue_s {
uint32_t prefetch_wrpos; ///< Prefetcher write position
uint32_t prefetch_rdpos; ///< Prefetch consumer read position
int32_t prefetch_end; ///< Prefetch enable flag (nonzero stops prefetcher)
int32_t batch_len[BATCH_COUNT]; ///< Length of each batch - <0:end, 0:not ready, >0:symbol count
unsnap_batch_s batch[BATCH_COUNT * BATCH_SIZE]; ///< LZ77 batch data
uint8_t buf[PREFETCH_SIZE]; ///< Prefetch buffer
};
/**
* @brief snappy decompression state
**/
struct unsnap_state_s {
const uint8_t *base; ///< base ptr of compressed stream
const uint8_t *end; ///< end of compressed stream
uint32_t uncompressed_size; ///< uncompressed stream size
uint32_t bytes_left; ///< bytes to uncompressed remaining
int32_t error; ///< current error status
uint32_t tstart; ///< start time for perf logging
volatile unsnap_queue_s q; ///< queue for cross-warp communication
gpu_inflate_input_s in; ///< input parameters for current block
};
/**
* @brief prefetches data for the symbol decoding stage
*
* @param s decompression state
* @param t warp lane id
**/
__device__ void snappy_prefetch_bytestream(unsnap_state_s *s, int t)
{
const uint8_t *base = s->base;
uint32_t end = (uint32_t)(s->end - base);
uint32_t align_bytes = (uint32_t)(0x20 - (0x1f & reinterpret_cast<uintptr_t>(base)));
int32_t pos = min(align_bytes, end);
int32_t blen;
// Start by prefetching up to the next a 32B-aligned location
if (t < pos) { s->q.buf[t] = base[t]; }
blen = 0;
do {
SYNCWARP();
if (!t) {
uint32_t minrdpos;
s->q.prefetch_wrpos = pos;
minrdpos = pos - min(pos, PREFETCH_SIZE - 32u);
blen = (int)min(32u, end - pos);
for (;;) {
uint32_t rdpos = s->q.prefetch_rdpos;
if (rdpos >= minrdpos) break;
if (s->q.prefetch_end) {
blen = 0;
break;
}
NANOSLEEP(100);
}
}
blen = SHFL0(blen);
if (t < blen) { s->q.buf[(pos + t) & (PREFETCH_SIZE - 1)] = base[pos + t]; }
pos += blen;
} while (blen > 0);
}
/**
* @brief Lookup table for get_len3_mask()
*
* Indexed by a 10-bit pattern, contains the corresponding 4-bit mask of
* 3-byte code lengths in the lower 4 bits, along with the total number of
* bytes used for coding the four lengths in the upper 4 bits.
* The upper 4-bit value could also be obtained by 8+__popc(mask4)
*
* for (uint32_t k = 0; k < 1024; k++)
* {
* for (uint32_t i = 0, v = 0, b = k, n = 0; i < 4; i++)
* {
* v |= (b & 1) << i;
* n += (b & 1) + 2;
* b >>= (b & 1) + 2;
* }
* k_len3lut[k] = v | (n << 4);
* }
*
**/
static const uint8_t __device__ __constant__ k_len3lut[1 << 10] = {
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xb7, 0x94, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xb7, 0xac, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xb7, 0x94, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xb7, 0xac, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xcf, 0x94, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xcf, 0xac, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xcf, 0x94, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xcf, 0xac, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf};
/**
* @brief Returns a 32-bit mask where 1 means 3-byte code length and 0 means 2-byte
* code length, given an input mask of up to 96 bits.
*
* Implemented by doing 8 consecutive lookups, building the result 4-bit at a time
**/
inline __device__ uint32_t get_len3_mask(uint32_t v0, uint32_t v1, uint32_t v2)
{
uint32_t m, v, m4, n;
v = v0;
m4 = k_len3lut[v & 0x3ff];
m = m4 & 0xf;
n = m4 >> 4; // 8..12
v = v0 >> n;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 4;
n += m4 >> 4; // 16..24
v = __funnelshift_r(v0, v1, n);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 8;
n += m4 >> 4; // 24..36
v >>= (m4 >> 4);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 12;
n = (n + (m4 >> 4)) & 0x1f; // (32..48) % 32 = 0..16
v1 = __funnelshift_r(v1, v2, n);
v2 >>= n;
v = v1;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 16;
n = m4 >> 4; // 8..12
v = v1 >> n;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 20;
n += m4 >> 4; // 16..24
v = __funnelshift_r(v1, v2, n);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 24;
n += m4 >> 4; // 24..36
v >>= (m4 >> 4);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 28;
return m;
}
/**
* @brief Returns a 32-bit mask where each 2-bit pair contains the symbol length
* minus 2, given two input masks each containing bit0 or bit1 of the corresponding
* code length minus 2 for up to 32 bytes
**/
inline __device__ uint32_t get_len5_mask(uint32_t v0, uint32_t v1)
{
uint32_t m;
m = (v1 & 1) * 2 + (v0 & 1);
v0 >>= (m + 2);
v1 >>= (m + 1);
for (uint32_t i = 1; i < 16; i++) {
uint32_t m2 = (v1 & 2) | (v0 & 1);
uint32_t n = m2 + 2;
m |= m2 << (i * 2);
v0 >>= n;
v1 >>= n;
}
return m;
}
#define READ_BYTE(pos) s->q.buf[(pos) & (PREFETCH_SIZE - 1)]
/**
* @brief decode symbols and output LZ77 batches (single-warp)
*
* @param s decompression state
* @param t warp lane id
**/
__device__ void snappy_decode_symbols(unsnap_state_s *s, uint32_t t)
{
uint32_t cur = 0;
uint32_t end = static_cast<uint32_t>(s->end - s->base);
uint32_t bytes_left = s->uncompressed_size;
uint32_t dst_pos = 0;
int32_t batch = 0;
for (;;) {
int32_t batch_len;
volatile unsnap_batch_s *b;
// Wait for prefetcher
if (t == 0) {
s->q.prefetch_rdpos = cur;
#pragma unroll(1) // We don't want unrolling here
while (s->q.prefetch_wrpos < min(cur + 5 * BATCH_SIZE, end)) { NANOSLEEP(50); }
b = &s->q.batch[batch * BATCH_SIZE];
}
// Process small symbols in parallel: for data that does not get good compression,
// the stream will consist of a large number of short literals (1-byte or 2-byte)
// followed by short repeat runs. This results in many 2-byte or 3-byte symbols
// that can all be decoded in parallel once we know the symbol length.
{
uint32_t v0, v1, v2, len3_mask, cur_t, is_long_sym, short_sym_mask;
uint32_t b0;
cur = SHFL0(cur);
cur_t = cur + t;
b0 = READ_BYTE(cur_t);
v0 = BALLOT((b0 == 4) || (b0 & 2));
b0 = READ_BYTE(cur_t + 32);
v1 = BALLOT((b0 == 4) || (b0 & 2));
b0 = READ_BYTE(cur_t + 64);
v2 = BALLOT((b0 == 4) || (b0 & 2));
len3_mask = SHFL0((t == 0) ? get_len3_mask(v0, v1, v2) : 0);
cur_t = cur + 2 * t + __popc(len3_mask & ((1 << t) - 1));
b0 = READ_BYTE(cur_t);
is_long_sym = ((b0 & ~4) != 0) && (((b0 + 1) & 2) == 0);
short_sym_mask = BALLOT(is_long_sym);
batch_len = 0;
b = reinterpret_cast<volatile unsnap_batch_s *>(SHFL0(reinterpret_cast<uintptr_t>(b)));
if (!(short_sym_mask & 1)) {
batch_len = SHFL0((t == 0) ? (short_sym_mask) ? __ffs(short_sym_mask) - 1 : 32 : 0);
if (batch_len != 0) {
uint32_t blen = 0;
int32_t ofs = 0;
if (t < batch_len) {
blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1);
ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | READ_BYTE(cur_t + 1)
: (b0 & 2) ? READ_BYTE(cur_t + 1) | (READ_BYTE(cur_t + 2) << 8)
: -(int32_t)(cur_t + 1);
b[t].len = blen;
b[t].offset = ofs;
ofs += blen; // for correct out-of-range detection below
}
blen = WarpReducePos32(blen, t);
bytes_left = SHFL0(bytes_left);
dst_pos = SHFL0(dst_pos);
short_sym_mask = __ffs(BALLOT(blen > bytes_left || ofs > (int32_t)(dst_pos + blen)));
if (short_sym_mask != 0) { batch_len = min(batch_len, short_sym_mask - 1); }
if (batch_len != 0) {
blen = SHFL(blen, batch_len - 1);
cur = SHFL(cur_t, batch_len - 1) + 2 + ((len3_mask >> (batch_len - 1)) & 1);
if (t == 0) {
dst_pos += blen;
bytes_left -= blen;
}
}
}
}
// Check if the batch was stopped by a 3-byte or 4-byte literal
if (batch_len < BATCH_SIZE - 2 && SHFL(b0 & ~4, batch_len) == 8) {
// If so, run a slower version of the above that can also handle 3/4-byte literal sequences
uint32_t batch_add;
do {
uint32_t clen, mask_t;
cur_t = cur + t;
b0 = READ_BYTE(cur_t);
clen = (b0 & 3) ? (b0 & 2) ? 1 : 0 : (b0 >> 2); // symbol length minus 2
v0 = BALLOT(clen & 1);
v1 = BALLOT((clen >> 1) & 1);
len3_mask = SHFL0((t == 0) ? get_len5_mask(v0, v1) : 0);
mask_t = (1 << (2 * t)) - 1;
cur_t = cur + 2 * t + 2 * __popc((len3_mask & 0xaaaaaaaa) & mask_t) +
__popc((len3_mask & 0x55555555) & mask_t);
b0 = READ_BYTE(cur_t);
is_long_sym = ((b0 & 3) ? ((b0 & 3) == 3) : (b0 > 3 * 4)) || (cur_t >= cur + 32) ||
(batch_len + t >= BATCH_SIZE);
batch_add = __ffs(BALLOT(is_long_sym)) - 1;
if (batch_add != 0) {
uint32_t blen = 0;
int32_t ofs = 0;
if (t < batch_add) {
blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1);
ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | READ_BYTE(cur_t + 1)
: (b0 & 2) ? READ_BYTE(cur_t + 1) | (READ_BYTE(cur_t + 2) << 8)
: -(int32_t)(cur_t + 1);
b[batch_len + t].len = blen;
b[batch_len + t].offset = ofs;
ofs += blen; // for correct out-of-range detection below
}
blen = WarpReducePos32(blen, t);
bytes_left = SHFL0(bytes_left);
dst_pos = SHFL0(dst_pos);
short_sym_mask = __ffs(BALLOT(blen > bytes_left || ofs > (int32_t)(dst_pos + blen)));
if (short_sym_mask != 0) { batch_add = min(batch_add, short_sym_mask - 1); }
if (batch_add != 0) {
blen = SHFL(blen, batch_add - 1);
cur = SHFL(cur_t, batch_add - 1) + 2 + ((len3_mask >> ((batch_add - 1) * 2)) & 3);
if (t == 0) {
dst_pos += blen;
bytes_left -= blen;
}
batch_len += batch_add;
}
}
} while (batch_add >= 6 && batch_len < BATCH_SIZE - 2);
}
}
if (t == 0) {
while (bytes_left > 0 && batch_len < BATCH_SIZE) {
uint32_t blen, offset;
uint8_t b0 = READ_BYTE(cur);
if (b0 & 3) {
uint8_t b1 = READ_BYTE(cur + 1);
if (!(b0 & 2)) {
// xxxxxx01.oooooooo: copy with 3-bit length, 11-bit offset
offset = ((b0 & 0xe0) << 3) | b1;
blen = ((b0 >> 2) & 7) + 4;
cur += 2;
} else {
// xxxxxx1x: copy with 6-bit length, 2-byte or 4-byte offset
offset = b1 | (READ_BYTE(cur + 2) << 8);
if (b0 & 1) // 4-byte offset
{
offset |= (READ_BYTE(cur + 3) << 16) | (READ_BYTE(cur + 4) << 24);
cur += 5;
} else {
cur += 3;
}
blen = (b0 >> 2) + 1;
}
dst_pos += blen;
if (offset - 1u >= dst_pos || bytes_left < blen) break;
bytes_left -= blen;
} else if (b0 < 4 * 4) {
// 0000xx00: short literal
blen = (b0 >> 2) + 1;
offset = -(int32_t)(cur + 1);
cur += 1 + blen;
dst_pos += blen;
if (bytes_left < blen) break;
bytes_left -= blen;
} else {
// xxxxxx00: literal
blen = b0 >> 2;
if (blen >= 60) {
uint32_t num_bytes = blen - 59;
blen = READ_BYTE(cur + 1);
if (num_bytes > 1) {
blen |= READ_BYTE(cur + 2) << 8;
if (num_bytes > 2) {
blen |= READ_BYTE(cur + 3) << 16;
if (num_bytes > 3) { blen |= READ_BYTE(cur + 4) << 24; }
}
}
cur += num_bytes;
}
cur += 1;
blen += 1;
offset = -(int32_t)cur;
cur += blen;
// Wait for prefetcher
s->q.prefetch_rdpos = cur;
#pragma unroll(1) // We don't want unrolling here
while (s->q.prefetch_wrpos < min(cur + 5 * BATCH_SIZE, end)) { NANOSLEEP(50); }
dst_pos += blen;
if (bytes_left < blen) break;
bytes_left -= blen;
}
b[batch_len].len = blen;
b[batch_len].offset = offset;
batch_len++;
}
if (batch_len != 0) {
s->q.batch_len[batch] = batch_len;
batch = (batch + 1) & (BATCH_COUNT - 1);
}
}
batch_len = SHFL0(batch_len);
if (t == 0) {
while (s->q.batch_len[batch] != 0) { NANOSLEEP(100); }
}
if (batch_len != BATCH_SIZE) { break; }
}
if (!t) {
s->q.prefetch_end = 1;
s->q.batch_len[batch] = -1;
s->bytes_left = bytes_left;
if (bytes_left != 0) { s->error = -2; }
}
}
/**
* @brief process LZ77 symbols and output uncompressed stream
*
* @param s decompression state
* @param t thread id within participating group (lane id)
*
* NOTE: No error checks at this stage (WARP0 responsible for not sending offsets and lengths that
*would result in out-of-bounds accesses)
**/
__device__ void snappy_process_symbols(unsnap_state_s *s, int t)
{
const uint8_t *literal_base = s->base;
uint8_t *out = reinterpret_cast<uint8_t *>(s->in.dstDevice);
int batch = 0;
do {
volatile unsnap_batch_s *b = &s->q.batch[batch * BATCH_SIZE];
int32_t batch_len, blen_t, dist_t;
if (t == 0) {
while ((batch_len = s->q.batch_len[batch]) == 0) { NANOSLEEP(100); }
} else {
batch_len = 0;
}
batch_len = SHFL0(batch_len);
if (batch_len <= 0) { break; }
if (t < batch_len) {
blen_t = b[t].len;
dist_t = b[t].offset;
} else {
blen_t = dist_t = 0;
}
// Try to combine as many small entries as possible, but try to avoid doing that
// if we see a small repeat distance 8 bytes or less
if (SHFL0(min((uint32_t)dist_t, (uint32_t)SHFL_XOR(dist_t, 1))) > 8) {
uint32_t n;
do {
uint32_t bofs = WarpReducePos32(blen_t, t);
uint32_t stop_mask = BALLOT((uint32_t)dist_t < bofs);
uint32_t start_mask = WarpReduceSum32((bofs < 32 && t < batch_len) ? 1 << bofs : 0);
n = min(min((uint32_t)__popc(start_mask), (uint32_t)(__ffs(stop_mask) - 1u)),
(uint32_t)batch_len);
if (n != 0) {
uint32_t it = __popc(start_mask & ((2 << t) - 1));
uint32_t tr = t - SHFL(bofs - blen_t, it);
int32_t dist = SHFL(dist_t, it);
if (it < n) {
const uint8_t *src = (dist > 0) ? (out + t - dist) : (literal_base + tr - dist);
out[t] = *src;
}
out += SHFL(bofs, n - 1);
blen_t = SHFL(blen_t, (n + t) & 0x1f);
dist_t = SHFL(dist_t, (n + t) & 0x1f);
batch_len -= n;
}
} while (n >= 4);
}
for (int i = 0; i < batch_len; i++) {
int32_t blen = SHFL(blen_t, i);
int32_t dist = SHFL(dist_t, i);
int32_t blen2 = (i + 1 < batch_len) ? SHFL(blen_t, i + 1) : 32;
// Try to combine consecutive small entries if they are independent
if ((uint32_t)dist >= (uint32_t)blen && blen + blen2 <= 32) {
int32_t dist2 = SHFL(dist_t, i + 1);
if ((uint32_t)dist2 >= (uint32_t)(blen + blen2)) {
int32_t d;
if (t < blen) {
d = dist;
} else {
dist = dist2;
d = (dist2 <= 0) ? dist2 + blen : dist2;
}
blen += blen2;
if (t < blen) {
const uint8_t *src = (dist > 0) ? (out - d) : (literal_base - d);
out[t] = src[t];
}
out += blen;
i++;
continue;
}
}
if (dist > 0) {
// Copy
uint8_t b0, b1;
if (t < blen) {
uint32_t pos = t;
const uint8_t *src = out + ((pos >= dist) ? (pos % dist) : pos) - dist;
b0 = *src;
}
if (32 + t < blen) {
uint32_t pos = 32 + t;
const uint8_t *src = out + ((pos >= dist) ? (pos % dist) : pos) - dist;
b1 = *src;
}
if (t < blen) { out[t] = b0; }
if (32 + t < blen) { out[32 + t] = b1; }
} else {
// Literal
uint8_t b0, b1;
dist = -dist;
while (blen >= 64) {
b0 = literal_base[dist + t];
b1 = literal_base[dist + 32 + t];
out[t] = b0;
out[32 + t] = b1;
dist += 64;
out += 64;
blen -= 64;
}
if (t < blen) { b0 = literal_base[dist + t]; }
if (32 + t < blen) { b1 = literal_base[dist + 32 + t]; }
if (t < blen) { out[t] = b0; }
if (32 + t < blen) { out[32 + t] = b1; }
}
out += blen;
}
SYNCWARP();
if (t == 0) { s->q.batch_len[batch] = 0; }
batch = (batch + 1) & (BATCH_COUNT - 1);
} while (1);
}
/**
* @brief Snappy decompression kernel
* See http://github.com/google/snappy/blob/master/format_description.txt
*
* blockDim {128,1,1}
*
* @param[in] inputs Source & destination information per block
* @param[out] outputs Decompression status per block
**/
extern "C" __global__ void __launch_bounds__(128)
unsnap_kernel(gpu_inflate_input_s *inputs, gpu_inflate_status_s *outputs)
{
__shared__ __align__(16) unsnap_state_s state_g;
int t = threadIdx.x;
unsnap_state_s *s = &state_g;
int strm_id = blockIdx.x;
if (t < sizeof(gpu_inflate_input_s) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->in)[t] =
reinterpret_cast<const uint32_t *>(&inputs[strm_id])[t];
__threadfence_block();
}
if (t < BATCH_COUNT) { s->q.batch_len[t] = 0; }
__syncthreads();
if (!t) {
const uint8_t *cur = reinterpret_cast<const uint8_t *>(s->in.srcDevice);
const uint8_t *end = cur + s->in.srcSize;
s->error = 0;
#if LOG_CYCLECOUNT
s->tstart = clock();
#endif
if (cur < end) {
// Read uncompressed size (varint), limited to 32-bit
uint32_t uncompressed_size = *cur++;
if (uncompressed_size > 0x7f) {
uint32_t c = (cur < end) ? *cur++ : 0;
uncompressed_size = (uncompressed_size & 0x7f) | (c << 7);
if (uncompressed_size >= (0x80 << 7)) {
c = (cur < end) ? *cur++ : 0;
uncompressed_size = (uncompressed_size & ((0x7f << 7) | 0x7f)) | (c << 14);
if (uncompressed_size >= (0x80 << 14)) {
c = (cur < end) ? *cur++ : 0;
uncompressed_size =
(uncompressed_size & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | (c << 21);
if (uncompressed_size >= (0x80 << 21)) {
c = (cur < end) ? *cur++ : 0;
if (c < 0x8)
uncompressed_size =
(uncompressed_size & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) |
(c << 28);
else
s->error = -1;
}
}
}
}
s->uncompressed_size = uncompressed_size;
s->bytes_left = uncompressed_size;
s->base = cur;
s->end = end;
if ((cur >= end && uncompressed_size != 0) || (uncompressed_size > s->in.dstSize)) {
s->error = -1;
}
} else {
s->error = -1;
}
s->q.prefetch_end = 0;
s->q.prefetch_wrpos = 0;
s->q.prefetch_rdpos = 0;
}
__syncthreads();
if (!s->error) {
if (t < 32) {
// WARP0: decode lengths and offsets
snappy_decode_symbols(s, t);
} else if (t < 64) {
// WARP1: prefetch byte stream for WARP0
snappy_prefetch_bytestream(s, t & 0x1f);
} else if (t < 96) {
// WARP2: LZ77
snappy_process_symbols(s, t & 0x1f);
}
__syncthreads();
}
if (!t) {
outputs[strm_id].bytes_written = s->uncompressed_size - s->bytes_left;
outputs[strm_id].status = s->error;
#if LOG_CYCLECOUNT
outputs[strm_id].reserved = clock() - s->tstart;
#else
outputs[strm_id].reserved = 0;
#endif
}
}
cudaError_t __host__ gpu_unsnap(gpu_inflate_input_s *inputs,
gpu_inflate_status_s *outputs,
int count,
cudaStream_t stream)
{
uint32_t count32 = (count > 0) ? count : 0;
dim3 dim_block(128, 1); // 4 warps per stream, 1 stream per block
dim3 dim_grid(count32, 1); // TODO: Check max grid dimensions vs max expected count
unsnap_kernel<<<dim_grid, dim_block, 0, stream>>>(inputs, outputs);
return cudaSuccess;
}
} // namespace io
} // namespace cudf
|
d3e73ddad73afee07f46f4799ac767ab19860dd5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
__global__ void updatePointsByTime(Point* allPoints, Point* result, int numOfPoints, double time)
{
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < numOfPoints) {
result[index].x = allPoints[index].orgx + allPoints[index].vx * time;
result[index].y = allPoints[index].orgy + allPoints[index].vy * time;
result[index].z = allPoints[index].orgz + allPoints[index].vz * time;
}
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t updatePointByTime_CUDA(Point* allPoints, Point* result, int numOfPoints, double time)
{
Point *dev_points = 0;
Point *dev_result = 0;
hipError_t cudaStatus;
int numOfblock = numOfPoints / MAX_BLOCK_SIZE_CUDA;
if (numOfPoints%MAX_BLOCK_SIZE_CUDA != 0)
numOfblock = numOfPoints / MAX_BLOCK_SIZE_CUDA + 1;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (one input, one output) .
cudaStatus = hipMalloc((void**)&dev_points, numOfPoints * sizeof(Point));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_result, numOfPoints * sizeof(Point));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_points, allPoints, numOfPoints * sizeof(Point), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_result, result, numOfPoints * sizeof(Point), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
updatePointsByTime << <numOfblock, MAX_BLOCK_SIZE_CUDA >> >(dev_points, dev_result, numOfPoints, time);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(result, dev_result, numOfPoints * sizeof(Point), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_result);
hipFree(dev_points);
return cudaStatus;
} | d3e73ddad73afee07f46f4799ac767ab19860dd5.cu | #include "kernel.h"
__global__ void updatePointsByTime(Point* allPoints, Point* result, int numOfPoints, double time)
{
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < numOfPoints) {
result[index].x = allPoints[index].orgx + allPoints[index].vx * time;
result[index].y = allPoints[index].orgy + allPoints[index].vy * time;
result[index].z = allPoints[index].orgz + allPoints[index].vz * time;
}
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t updatePointByTime_CUDA(Point* allPoints, Point* result, int numOfPoints, double time)
{
Point *dev_points = 0;
Point *dev_result = 0;
cudaError_t cudaStatus;
int numOfblock = numOfPoints / MAX_BLOCK_SIZE_CUDA;
if (numOfPoints%MAX_BLOCK_SIZE_CUDA != 0)
numOfblock = numOfPoints / MAX_BLOCK_SIZE_CUDA + 1;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (one input, one output) .
cudaStatus = cudaMalloc((void**)&dev_points, numOfPoints * sizeof(Point));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_result, numOfPoints * sizeof(Point));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_points, allPoints, numOfPoints * sizeof(Point), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_result, result, numOfPoints * sizeof(Point), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
updatePointsByTime << <numOfblock, MAX_BLOCK_SIZE_CUDA >> >(dev_points, dev_result, numOfPoints, time);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(result, dev_result, numOfPoints * sizeof(Point), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_result);
cudaFree(dev_points);
return cudaStatus;
} |
617a2bfbf84efeccfac9d3ae5fee51adcff49ff2.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "b2b_gemm_f16t_f16n_f16t_tensor_op_f16_sm75.h"
#include "b2b_gemm_f16t_f16n_f16t_tensor_op_f16_sm80.h"
#include "b2b_gemm_s8n_s8t_s8n_tensor_op_s32_sm75.h"
#include "b2b_gemm_s8n_s8t_s8n_tensor_op_s32_sm80.h"
int run() {
#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
std::cout << "Running on SM80" << std::endl;
run_nonfused_gemm_f16_sm80();
run_fused_gemm_f16_sm80();
run_nonfused_gemm_s8_sm80();
run_fused_gemm_s8_sm80();
#elif defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED)
std::cout << "Running on SM75" << std::endl;
run_nonfused_gemm_f16();
run_fused_gemm_f16();
run_nonfused_gemm_s8();
run_fused_gemm_s8();
#endif
return 0;
}
int main() {
bool notSupported = false;
// Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2.
//
// CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cerr << "Tensor Core operations used in this example must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
notSupported = true;
}
hipDeviceProp_t props;
hipError_t error = hipGetDeviceProperties(&props, 0);
if (error != hipSuccess) {
std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl;
return -1;
}
if (!(props.major * 10 + props.minor >= 75)) {
std::cerr << "Tensor Ops used in this example must be run on a machine with compute capability at least 75."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
return run();
}
| 617a2bfbf84efeccfac9d3ae5fee51adcff49ff2.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "b2b_gemm_f16t_f16n_f16t_tensor_op_f16_sm75.h"
#include "b2b_gemm_f16t_f16n_f16t_tensor_op_f16_sm80.h"
#include "b2b_gemm_s8n_s8t_s8n_tensor_op_s32_sm75.h"
#include "b2b_gemm_s8n_s8t_s8n_tensor_op_s32_sm80.h"
int run() {
#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
std::cout << "Running on SM80" << std::endl;
run_nonfused_gemm_f16_sm80();
run_fused_gemm_f16_sm80();
run_nonfused_gemm_s8_sm80();
run_fused_gemm_s8_sm80();
#elif defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED)
std::cout << "Running on SM75" << std::endl;
run_nonfused_gemm_f16();
run_fused_gemm_f16();
run_nonfused_gemm_s8();
run_fused_gemm_s8();
#endif
return 0;
}
int main() {
bool notSupported = false;
// Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2.
//
// CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cerr << "Tensor Core operations used in this example must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!(props.major * 10 + props.minor >= 75)) {
std::cerr << "Tensor Ops used in this example must be run on a machine with compute capability at least 75."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
return run();
}
|
0ee13b5026b93f7ccc2c55e567598ad1b6b7ddcf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void convolution_2D_basic_kernel(float* in, float* mask, float* out, int mask_width, int width, int height) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < height && col < width) {
int curr_row, curr_col;
float val = 0.0;
int n_start_row = row - (mask_width / 2);
int n_start_col = col - (mask_width / 2);
for (int i = 0; i < mask_width; i++) { // row
for (int j = 0; j < mask_width; j++) { // col
curr_row = n_start_row + i;
curr_col = n_start_col + j;
if (curr_row >= 0 && curr_row < height && curr_col >= 0 && curr_col < width) {
val += in[curr_row * width + curr_col] * mask[mask_width * i + j];
__syncthreads();
}
}
}
out[row * width + col] = val;
}
}
int main() {
int mask_width = 3, width = 7, height = 7;
int size = width * height * sizeof(float);
float host_in[49] = { 193, 245, 178, 215, 64, 234, 13,
70, 87, 228, 65, 157, 73, 135,
174, 149, 245, 208, 121, 193, 199,
167, 57, 140, 62, 90, 192, 239,
41, 192, 35, 237, 212, 97, 33,
30, 65, 38, 89, 149, 145, 145,
127, 129, 65, 50, 140, 19, 120 };
float host_mask[9] = { 1, 2, 1,
2, 3, 2,
1, 2, 1 };
float *host_out = (float *) malloc(size);
float *device_in, *device_out, *device_mask;
hipMalloc((void**)&device_in, size);
hipMalloc((void**)&device_out, size);
hipMalloc((void**)&device_mask, mask_width * mask_width * sizeof(float));
hipMemcpy(device_in, host_in, size, hipMemcpyHostToDevice);
hipMemcpy(device_mask, host_mask, mask_width * mask_width * sizeof(float), hipMemcpyHostToDevice);
dim3 threadsPerBlock(width, height);
dim3 blocksPerGrid(1);
convolution_2D_basic_kernel <<< blocksPerGrid, threadsPerBlock >> > (device_in, device_mask, device_out, mask_width, width, height);
hipDeviceSynchronize();
hipMemcpy(host_out, device_out, size, hipMemcpyDeviceToHost);
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; j++) {
printf("%.f\t", host_out[i * width + j]);
}
printf("\n");
}
printf("\n");
free(host_out);
hipFree(device_in);
hipFree(device_out);
hipFree(device_mask);
return 0;
}
| 0ee13b5026b93f7ccc2c55e567598ad1b6b7ddcf.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void convolution_2D_basic_kernel(float* in, float* mask, float* out, int mask_width, int width, int height) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < height && col < width) {
int curr_row, curr_col;
float val = 0.0;
int n_start_row = row - (mask_width / 2);
int n_start_col = col - (mask_width / 2);
for (int i = 0; i < mask_width; i++) { // row
for (int j = 0; j < mask_width; j++) { // col
curr_row = n_start_row + i;
curr_col = n_start_col + j;
if (curr_row >= 0 && curr_row < height && curr_col >= 0 && curr_col < width) {
val += in[curr_row * width + curr_col] * mask[mask_width * i + j];
__syncthreads();
}
}
}
out[row * width + col] = val;
}
}
int main() {
int mask_width = 3, width = 7, height = 7;
int size = width * height * sizeof(float);
float host_in[49] = { 193, 245, 178, 215, 64, 234, 13,
70, 87, 228, 65, 157, 73, 135,
174, 149, 245, 208, 121, 193, 199,
167, 57, 140, 62, 90, 192, 239,
41, 192, 35, 237, 212, 97, 33,
30, 65, 38, 89, 149, 145, 145,
127, 129, 65, 50, 140, 19, 120 };
float host_mask[9] = { 1, 2, 1,
2, 3, 2,
1, 2, 1 };
float *host_out = (float *) malloc(size);
float *device_in, *device_out, *device_mask;
cudaMalloc((void**)&device_in, size);
cudaMalloc((void**)&device_out, size);
cudaMalloc((void**)&device_mask, mask_width * mask_width * sizeof(float));
cudaMemcpy(device_in, host_in, size, cudaMemcpyHostToDevice);
cudaMemcpy(device_mask, host_mask, mask_width * mask_width * sizeof(float), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(width, height);
dim3 blocksPerGrid(1);
convolution_2D_basic_kernel <<< blocksPerGrid, threadsPerBlock >> > (device_in, device_mask, device_out, mask_width, width, height);
cudaDeviceSynchronize();
cudaMemcpy(host_out, device_out, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; j++) {
printf("%.f\t", host_out[i * width + j]);
}
printf("\n");
}
printf("\n");
free(host_out);
cudaFree(device_in);
cudaFree(device_out);
cudaFree(device_mask);
return 0;
}
|
f6d1dfeeaf514b17175d60fa2934f6bfaa2fb3d5.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<hip/hip_runtime_api.h>
#include<hipfft.h>
#include"comfft.h"
int fft2d(hipfftComplex *src_data, hipfftComplex *dst_data, int nx, int ny){
hipfftHandle plan;
hipfftComplex *dev_src, *dev_dst;
//Allocate memory on the GPU and copy over the src array
cudaSafeCall(hipMalloc((void**) &dev_src, sizeof(hipfftComplex)*nx*ny));
cudaSafeCall(hipMalloc((void**) &dev_dst, sizeof(hipfftComplex)*nx*ny));
cudaSafeCall(hipMemcpy(dev_src, src_data, sizeof(hipfftComplex)*nx*ny, hipMemcpyHostToDevice));
//Create a 2d fft plan
//cufft functions return cufftResults, which require different error handling
cufftSafeCall(hipfftPlan2d(&plan, nx, ny, HIPFFT_C2C));
cufftSafeCall(hipfftExecC2C(plan, dev_src, dev_dst, HIPFFT_FORWARD));
cudaSafeCall(hipMemcpy(dst_data, dev_dst, sizeof(hipfftComplex)*nx*ny, hipMemcpyDeviceToHost));
CudaCheckError();
//Free GPU memory
cufftSafeCall(hipfftDestroy(plan));
hipFree(dev_src);
hipFree(dev_dst);
return 0;
}
| f6d1dfeeaf514b17175d60fa2934f6bfaa2fb3d5.cu | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<cuda_runtime_api.h>
#include<cufft.h>
#include"comfft.h"
int fft2d(cufftComplex *src_data, cufftComplex *dst_data, int nx, int ny){
cufftHandle plan;
cufftComplex *dev_src, *dev_dst;
//Allocate memory on the GPU and copy over the src array
cudaSafeCall(cudaMalloc((void**) &dev_src, sizeof(cufftComplex)*nx*ny));
cudaSafeCall(cudaMalloc((void**) &dev_dst, sizeof(cufftComplex)*nx*ny));
cudaSafeCall(cudaMemcpy(dev_src, src_data, sizeof(cufftComplex)*nx*ny, cudaMemcpyHostToDevice));
//Create a 2d fft plan
//cufft functions return cufftResults, which require different error handling
cufftSafeCall(cufftPlan2d(&plan, nx, ny, CUFFT_C2C));
cufftSafeCall(cufftExecC2C(plan, dev_src, dev_dst, CUFFT_FORWARD));
cudaSafeCall(cudaMemcpy(dst_data, dev_dst, sizeof(cufftComplex)*nx*ny, cudaMemcpyDeviceToHost));
CudaCheckError();
//Free GPU memory
cufftSafeCall(cufftDestroy(plan));
cudaFree(dev_src);
cudaFree(dev_dst);
return 0;
}
|
9a6283994826bfcf35c5e6c78aab088cf0ff06f4.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_pos2;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
#define testing27
#ifdef testing27
gridCellWidth = ::max(::max(rule1Distance, rule2Distance), rule3Distance);
#else
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
#endif
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
hipMalloc((void**)&dev_pos2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos2 failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
// Rule 1: Boids try to fly towards the center of mass of neighboring boids.
glm::vec3 perceivedCenter(0.f);
int numberOfNeighbors = 0;
for (int i = 0; i < N; i++) {
if (i != iSelf && glm::distance(pos[iSelf], pos[i]) < rule1Distance) {
perceivedCenter += pos[i];
numberOfNeighbors++;
}
}
glm::vec3 v1(0.f);
if (numberOfNeighbors > 0) {
perceivedCenter /= numberOfNeighbors;
v1 = (perceivedCenter - pos[iSelf]) * rule1Scale;
}
// Rule 2: Boids try to keep a small distance away from other objects.
glm::vec3 c(0.f);
for (int i = 0; i < N; i++) {
if (i != iSelf && glm::distance(pos[iSelf], pos[i]) < rule2Distance) {
c -= pos[i] - pos[iSelf];
}
}
glm::vec3 v2 = c * rule2Scale;
// Rule 3: Boids try to match velocity with near boids.
glm::vec3 perceivedVelocity(0.f);
numberOfNeighbors = 0;
for (int i = 0; i < N; i++) {
if (i != iSelf && glm::distance(pos[iSelf], pos[i]) < rule3Distance) {
perceivedVelocity += vel[i];
numberOfNeighbors++;
}
}
glm::vec3 v3(0.f);
if (numberOfNeighbors > 0) {
perceivedVelocity /= numberOfNeighbors;
v3 = perceivedVelocity * rule3Scale;
}
return v1 + v2 + v3;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 newVel = vel1[index] + computeVelocityChange(N, index, pos, vel1);
if (glm::length(newVel) > maxSpeed) {
newVel *= maxSpeed / glm::length(newVel);
}
// Because other threads will potentially access velocity of this boid,
// we cannot directly update vel1. Instead, we store the value in vel2.
vel2[index] = newVel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= N) {
return;
}
glm::vec3 position = pos[index];
glm::vec3 offset = (position - gridMin) * inverseCellWidth;
glm::vec3 floor = glm::floor(offset);
int gi = gridIndex3Dto1D((int)floor.x, (int)floor.y, (int)floor.z,
gridResolution);
gridIndices[index] = gi;
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= N) {
return;
}
else if (index == 0) {
int curCell = particleGridIndices[index];
gridCellStartIndices[curCell] = 0;
}
else {
int curCell = particleGridIndices[index];
int prevCell = particleGridIndices[index - 1];
if (curCell != prevCell) {
gridCellStartIndices[curCell] = index;
gridCellEndIndices[prevCell] = index;
}
if (index == N - 1) {
gridCellEndIndices[curCell] = N;
}
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= N) {
return;
}
int boid = particleArrayIndices[index];
glm::vec3 position = pos[boid];
glm::vec3 offset = (position - gridMin) * inverseCellWidth;
glm::vec3 floor = glm::floor(offset);
#ifdef testing27
int sz = 3;
int3 start = make_int3((int)floor.x - 1, (int)floor.y - 1, (int)floor.z - 1);
int3 next = make_int3(1, 1, 1);
#else
int sz = 2;
int3 start = make_int3((int)floor.x, (int)floor.y, (int)floor.z);
int3 next = make_int3(
(offset.x > floor.x + 0.5f) ? 1 : -1,
(offset.y > floor.y + 0.5f) ? 1 : -1,
(offset.z > floor.z + 0.5f) ? 1 : -1
);
#endif
glm::vec3 v1, v2, v3;
int numberOfNeighbors1 = 0;
int numberOfNeighbors3 = 0;
glm::vec3 perceivedCenter(0.f);
glm::vec3 c(0.f);
glm::vec3 perceivedVelocity(0.f);
for (int k = 0; k < sz; k++) {
for (int j = 0; j < sz; j++) {
for (int i = 0; i < sz; i++) {
int3 cur = make_int3(
start.x + i * next.x,
start.y + j * next.y,
start.z + k * next.z
);
if (0 <= cur.x && cur.x < gridResolution &&
0 <= cur.y && cur.y < gridResolution &&
0 <= cur.z && cur.z < gridResolution) {
int gi = gridIndex3Dto1D(cur.x, cur.y, cur.z, gridResolution);
for (int nIdx = gridCellStartIndices[gi];
nIdx < gridCellEndIndices[gi];
nIdx++) {
int neighbor = particleArrayIndices[nIdx];
if (boid != neighbor) {
float dist = glm::distance(pos[neighbor], pos[boid]);
// Rule 1
if (dist < rule1Distance) {
perceivedCenter += pos[neighbor];
numberOfNeighbors1++;
}
// Rule 2
if (dist < rule2Distance) {
c -= (pos[neighbor] - pos[boid]);
}
// Rule 3
if (dist < rule3Distance) {
perceivedVelocity += vel1[neighbor];
numberOfNeighbors3++;
}
}
}
}
}
}
}
// Continue Rule 1
if (numberOfNeighbors1 > 0) {
perceivedCenter /= numberOfNeighbors1;
v1 = (perceivedCenter - pos[boid]) * rule1Scale;
}
// Continue Rule 2
v2 = c * rule2Scale;
// Continue Rule 3
if (numberOfNeighbors3 > 0) {
perceivedVelocity /= numberOfNeighbors3;
v3 = perceivedVelocity * rule3Scale;
}
glm::vec3 newVel = vel1[boid] + v1 + v2 + v3;
if (glm::length(newVel) > maxSpeed) {
newVel *= maxSpeed / glm::length(newVel);
}
vel2[boid] = newVel;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= N) {
return;
}
glm::vec3 offset = (pos[index] - gridMin) * inverseCellWidth;
glm::vec3 floor = glm::floor(offset);
#ifdef testing27
int sz = 3;
int3 start = make_int3((int)floor.x - 1, (int)floor.y - 1, (int)floor.z - 1);
int3 next = make_int3(1, 1, 1);
#else
int sz = 2;
int3 start = make_int3((int)floor.x, (int)floor.y, (int)floor.z);
int3 next = make_int3(
(offset.x > floor.x + 0.5f) ? 1 : -1,
(offset.y > floor.y + 0.5f) ? 1 : -1,
(offset.z > floor.z + 0.5f) ? 1 : -1
);
#endif
glm::vec3 v1, v2, v3;
int numberOfNeighbors1 = 0;
int numberOfNeighbors3 = 0;
glm::vec3 perceivedCenter(0.f);
glm::vec3 c(0.f);
glm::vec3 perceivedVelocity(0.f);
for (int k = 0; k < sz; k++) {
for (int j = 0; j < sz; j++) {
for (int i = 0; i < sz; i++) {
int3 cur = make_int3(
start.x + i * next.x,
start.y + j * next.y,
start.z + k * next.z
);
if (0 <= cur.x && cur.x < gridResolution &&
0 <= cur.y && cur.y < gridResolution &&
0 <= cur.z && cur.z < gridResolution) {
int gi = gridIndex3Dto1D(cur.x, cur.y, cur.z, gridResolution);
for (int nIdx = gridCellStartIndices[gi];
nIdx < gridCellEndIndices[gi];
nIdx++) {
if (index != nIdx) {
float dist = glm::distance(pos[nIdx], pos[index]);
// Rule 1
if (dist < rule1Distance) {
perceivedCenter += pos[nIdx];
numberOfNeighbors1++;
}
// Rule 2
if (dist < rule2Distance) {
c -= (pos[nIdx] - pos[index]);
}
// Rule 3
if (dist < rule3Distance) {
perceivedVelocity += vel1[nIdx];
numberOfNeighbors3++;
}
}
}
}
}
}
}
// Continue Rule 1
if (numberOfNeighbors1 > 0) {
perceivedCenter /= numberOfNeighbors1;
v1 = (perceivedCenter - pos[index]) * rule1Scale;
}
// Continue Rule 2
v2 = c * rule2Scale;
// Continue Rule 3
if (numberOfNeighbors3 > 0) {
perceivedVelocity /= numberOfNeighbors3;
v3 = perceivedVelocity * rule3Scale;
}
glm::vec3 newVel = vel1[index] + v1 + v2 + v3;
if (glm::length(newVel) > maxSpeed) {
newVel *= maxSpeed / glm::length(newVel);
}
vel2[index] = newVel;
}
__global__ void kernReshuffleParticle(
int N, int *particleArrayIndices,
glm::vec3* pos1, glm::vec3* pos2,
glm::vec3* vel1, glm::vec3* vel2) {
// Reshuffle the particle data
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= N) {
return;
}
pos2[index] = pos1[particleArrayIndices[index]];
vel2[index] = vel1[particleArrayIndices[index]];
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 blocks(numObjects / threadsPerBlock.x + 1);
hipLaunchKernelGGL(( kernUpdateVelocityBruteForce), dim3(blocks), dim3(threadsPerBlock), 0, 0, numObjects, dev_pos,
dev_vel1, dev_vel2);
hipLaunchKernelGGL(( kernUpdatePos), dim3(blocks), dim3(threadsPerBlock), 0, 0, numObjects, dt, dev_pos, dev_vel2);
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
// Be careful of different array length. Use different block numbers.
dim3 boidBlocks(numObjects / threadsPerBlock.x + 1);
dim3 cellBlocks(gridCellCount / threadsPerBlock.x + 1);
kernComputeIndices << <boidBlocks, threadsPerBlock >> > (numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices,
dev_particleGridIndices);
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices,
dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices);
kernResetIntBuffer << <cellBlocks, threadsPerBlock >> > (gridCellCount,
dev_gridCellStartIndices, - 1);
kernResetIntBuffer << <cellBlocks, threadsPerBlock >> > (gridCellCount,
dev_gridCellEndIndices, -1);
kernIdentifyCellStartEnd << <boidBlocks, threadsPerBlock >> > (numObjects,
dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
kernUpdateVelNeighborSearchScattered << <boidBlocks, threadsPerBlock >> > (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
kernUpdatePos << <boidBlocks, threadsPerBlock >> > (numObjects, dt, dev_pos, dev_vel2);
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 boidBlocks(numObjects / threadsPerBlock.x + 1);
dim3 cellBlocks(gridCellCount / threadsPerBlock.x + 1);
kernComputeIndices << <boidBlocks, threadsPerBlock >> > (numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices,
dev_particleGridIndices);
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices,
dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices);
kernResetIntBuffer << <cellBlocks, threadsPerBlock >> > (gridCellCount,
dev_gridCellStartIndices, -1);
kernResetIntBuffer << <cellBlocks, threadsPerBlock >> > (gridCellCount,
dev_gridCellEndIndices, -1);
kernIdentifyCellStartEnd << <boidBlocks, threadsPerBlock >> > (numObjects,
dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
kernReshuffleParticle << <boidBlocks, threadsPerBlock >> > (numObjects,
dev_particleArrayIndices, dev_pos, dev_pos2, dev_vel1, dev_vel2);
std::swap(dev_pos, dev_pos2);
std::swap(dev_vel1, dev_vel2);
kernUpdateVelNeighborSearchCoherent << <boidBlocks, threadsPerBlock >> > (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos, dev_vel1, dev_vel2);
kernUpdatePos << <boidBlocks, threadsPerBlock >> > (numObjects, dt, dev_pos, dev_vel2);
std::swap(dev_vel1, dev_vel2);
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_pos2);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| 9a6283994826bfcf35c5e6c78aab088cf0ff06f4.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_pos2;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
#define testing27
#ifdef testing27
gridCellWidth = std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
#else
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
#endif
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
cudaMalloc((void**)&dev_pos2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos2 failed!");
cudaDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
// Rule 1: Boids try to fly towards the center of mass of neighboring boids.
glm::vec3 perceivedCenter(0.f);
int numberOfNeighbors = 0;
for (int i = 0; i < N; i++) {
if (i != iSelf && glm::distance(pos[iSelf], pos[i]) < rule1Distance) {
perceivedCenter += pos[i];
numberOfNeighbors++;
}
}
glm::vec3 v1(0.f);
if (numberOfNeighbors > 0) {
perceivedCenter /= numberOfNeighbors;
v1 = (perceivedCenter - pos[iSelf]) * rule1Scale;
}
// Rule 2: Boids try to keep a small distance away from other objects.
glm::vec3 c(0.f);
for (int i = 0; i < N; i++) {
if (i != iSelf && glm::distance(pos[iSelf], pos[i]) < rule2Distance) {
c -= pos[i] - pos[iSelf];
}
}
glm::vec3 v2 = c * rule2Scale;
// Rule 3: Boids try to match velocity with near boids.
glm::vec3 perceivedVelocity(0.f);
numberOfNeighbors = 0;
for (int i = 0; i < N; i++) {
if (i != iSelf && glm::distance(pos[iSelf], pos[i]) < rule3Distance) {
perceivedVelocity += vel[i];
numberOfNeighbors++;
}
}
glm::vec3 v3(0.f);
if (numberOfNeighbors > 0) {
perceivedVelocity /= numberOfNeighbors;
v3 = perceivedVelocity * rule3Scale;
}
return v1 + v2 + v3;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 newVel = vel1[index] + computeVelocityChange(N, index, pos, vel1);
if (glm::length(newVel) > maxSpeed) {
newVel *= maxSpeed / glm::length(newVel);
}
// Because other threads will potentially access velocity of this boid,
// we cannot directly update vel1. Instead, we store the value in vel2.
vel2[index] = newVel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= N) {
return;
}
glm::vec3 position = pos[index];
glm::vec3 offset = (position - gridMin) * inverseCellWidth;
glm::vec3 floor = glm::floor(offset);
int gi = gridIndex3Dto1D((int)floor.x, (int)floor.y, (int)floor.z,
gridResolution);
gridIndices[index] = gi;
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= N) {
return;
}
else if (index == 0) {
int curCell = particleGridIndices[index];
gridCellStartIndices[curCell] = 0;
}
else {
int curCell = particleGridIndices[index];
int prevCell = particleGridIndices[index - 1];
if (curCell != prevCell) {
gridCellStartIndices[curCell] = index;
gridCellEndIndices[prevCell] = index;
}
if (index == N - 1) {
gridCellEndIndices[curCell] = N;
}
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= N) {
return;
}
int boid = particleArrayIndices[index];
glm::vec3 position = pos[boid];
glm::vec3 offset = (position - gridMin) * inverseCellWidth;
glm::vec3 floor = glm::floor(offset);
#ifdef testing27
int sz = 3;
int3 start = make_int3((int)floor.x - 1, (int)floor.y - 1, (int)floor.z - 1);
int3 next = make_int3(1, 1, 1);
#else
int sz = 2;
int3 start = make_int3((int)floor.x, (int)floor.y, (int)floor.z);
int3 next = make_int3(
(offset.x > floor.x + 0.5f) ? 1 : -1,
(offset.y > floor.y + 0.5f) ? 1 : -1,
(offset.z > floor.z + 0.5f) ? 1 : -1
);
#endif
glm::vec3 v1, v2, v3;
int numberOfNeighbors1 = 0;
int numberOfNeighbors3 = 0;
glm::vec3 perceivedCenter(0.f);
glm::vec3 c(0.f);
glm::vec3 perceivedVelocity(0.f);
for (int k = 0; k < sz; k++) {
for (int j = 0; j < sz; j++) {
for (int i = 0; i < sz; i++) {
int3 cur = make_int3(
start.x + i * next.x,
start.y + j * next.y,
start.z + k * next.z
);
if (0 <= cur.x && cur.x < gridResolution &&
0 <= cur.y && cur.y < gridResolution &&
0 <= cur.z && cur.z < gridResolution) {
int gi = gridIndex3Dto1D(cur.x, cur.y, cur.z, gridResolution);
for (int nIdx = gridCellStartIndices[gi];
nIdx < gridCellEndIndices[gi];
nIdx++) {
int neighbor = particleArrayIndices[nIdx];
if (boid != neighbor) {
float dist = glm::distance(pos[neighbor], pos[boid]);
// Rule 1
if (dist < rule1Distance) {
perceivedCenter += pos[neighbor];
numberOfNeighbors1++;
}
// Rule 2
if (dist < rule2Distance) {
c -= (pos[neighbor] - pos[boid]);
}
// Rule 3
if (dist < rule3Distance) {
perceivedVelocity += vel1[neighbor];
numberOfNeighbors3++;
}
}
}
}
}
}
}
// Continue Rule 1
if (numberOfNeighbors1 > 0) {
perceivedCenter /= numberOfNeighbors1;
v1 = (perceivedCenter - pos[boid]) * rule1Scale;
}
// Continue Rule 2
v2 = c * rule2Scale;
// Continue Rule 3
if (numberOfNeighbors3 > 0) {
perceivedVelocity /= numberOfNeighbors3;
v3 = perceivedVelocity * rule3Scale;
}
glm::vec3 newVel = vel1[boid] + v1 + v2 + v3;
if (glm::length(newVel) > maxSpeed) {
newVel *= maxSpeed / glm::length(newVel);
}
vel2[boid] = newVel;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= N) {
return;
}
glm::vec3 offset = (pos[index] - gridMin) * inverseCellWidth;
glm::vec3 floor = glm::floor(offset);
#ifdef testing27
int sz = 3;
int3 start = make_int3((int)floor.x - 1, (int)floor.y - 1, (int)floor.z - 1);
int3 next = make_int3(1, 1, 1);
#else
int sz = 2;
int3 start = make_int3((int)floor.x, (int)floor.y, (int)floor.z);
int3 next = make_int3(
(offset.x > floor.x + 0.5f) ? 1 : -1,
(offset.y > floor.y + 0.5f) ? 1 : -1,
(offset.z > floor.z + 0.5f) ? 1 : -1
);
#endif
glm::vec3 v1, v2, v3;
int numberOfNeighbors1 = 0;
int numberOfNeighbors3 = 0;
glm::vec3 perceivedCenter(0.f);
glm::vec3 c(0.f);
glm::vec3 perceivedVelocity(0.f);
for (int k = 0; k < sz; k++) {
for (int j = 0; j < sz; j++) {
for (int i = 0; i < sz; i++) {
int3 cur = make_int3(
start.x + i * next.x,
start.y + j * next.y,
start.z + k * next.z
);
if (0 <= cur.x && cur.x < gridResolution &&
0 <= cur.y && cur.y < gridResolution &&
0 <= cur.z && cur.z < gridResolution) {
int gi = gridIndex3Dto1D(cur.x, cur.y, cur.z, gridResolution);
for (int nIdx = gridCellStartIndices[gi];
nIdx < gridCellEndIndices[gi];
nIdx++) {
if (index != nIdx) {
float dist = glm::distance(pos[nIdx], pos[index]);
// Rule 1
if (dist < rule1Distance) {
perceivedCenter += pos[nIdx];
numberOfNeighbors1++;
}
// Rule 2
if (dist < rule2Distance) {
c -= (pos[nIdx] - pos[index]);
}
// Rule 3
if (dist < rule3Distance) {
perceivedVelocity += vel1[nIdx];
numberOfNeighbors3++;
}
}
}
}
}
}
}
// Continue Rule 1
if (numberOfNeighbors1 > 0) {
perceivedCenter /= numberOfNeighbors1;
v1 = (perceivedCenter - pos[index]) * rule1Scale;
}
// Continue Rule 2
v2 = c * rule2Scale;
// Continue Rule 3
if (numberOfNeighbors3 > 0) {
perceivedVelocity /= numberOfNeighbors3;
v3 = perceivedVelocity * rule3Scale;
}
glm::vec3 newVel = vel1[index] + v1 + v2 + v3;
if (glm::length(newVel) > maxSpeed) {
newVel *= maxSpeed / glm::length(newVel);
}
vel2[index] = newVel;
}
__global__ void kernReshuffleParticle(
int N, int *particleArrayIndices,
glm::vec3* pos1, glm::vec3* pos2,
glm::vec3* vel1, glm::vec3* vel2) {
// Reshuffle the particle data
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= N) {
return;
}
pos2[index] = pos1[particleArrayIndices[index]];
vel2[index] = vel1[particleArrayIndices[index]];
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 blocks(numObjects / threadsPerBlock.x + 1);
kernUpdateVelocityBruteForce<<<blocks, threadsPerBlock>>>(numObjects, dev_pos,
dev_vel1, dev_vel2);
kernUpdatePos<<<blocks, threadsPerBlock>>>(numObjects, dt, dev_pos, dev_vel2);
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
// Be careful of different array length. Use different block numbers.
dim3 boidBlocks(numObjects / threadsPerBlock.x + 1);
dim3 cellBlocks(gridCellCount / threadsPerBlock.x + 1);
kernComputeIndices << <boidBlocks, threadsPerBlock >> > (numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices,
dev_particleGridIndices);
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices,
dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices);
kernResetIntBuffer << <cellBlocks, threadsPerBlock >> > (gridCellCount,
dev_gridCellStartIndices, - 1);
kernResetIntBuffer << <cellBlocks, threadsPerBlock >> > (gridCellCount,
dev_gridCellEndIndices, -1);
kernIdentifyCellStartEnd << <boidBlocks, threadsPerBlock >> > (numObjects,
dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
kernUpdateVelNeighborSearchScattered << <boidBlocks, threadsPerBlock >> > (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
kernUpdatePos << <boidBlocks, threadsPerBlock >> > (numObjects, dt, dev_pos, dev_vel2);
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 boidBlocks(numObjects / threadsPerBlock.x + 1);
dim3 cellBlocks(gridCellCount / threadsPerBlock.x + 1);
kernComputeIndices << <boidBlocks, threadsPerBlock >> > (numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices,
dev_particleGridIndices);
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices,
dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices);
kernResetIntBuffer << <cellBlocks, threadsPerBlock >> > (gridCellCount,
dev_gridCellStartIndices, -1);
kernResetIntBuffer << <cellBlocks, threadsPerBlock >> > (gridCellCount,
dev_gridCellEndIndices, -1);
kernIdentifyCellStartEnd << <boidBlocks, threadsPerBlock >> > (numObjects,
dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
kernReshuffleParticle << <boidBlocks, threadsPerBlock >> > (numObjects,
dev_particleArrayIndices, dev_pos, dev_pos2, dev_vel1, dev_vel2);
std::swap(dev_pos, dev_pos2);
std::swap(dev_vel1, dev_vel2);
kernUpdateVelNeighborSearchCoherent << <boidBlocks, threadsPerBlock >> > (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos, dev_vel1, dev_vel2);
kernUpdatePos << <boidBlocks, threadsPerBlock >> > (numObjects, dt, dev_pos, dev_vel2);
std::swap(dev_vel1, dev_vel2);
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_pos2);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
4cfa37dca268a48417dd8aca30cc6da729b859e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (C) 2016 Bruno Golosio
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_error.h"
#include "nested_loop.h"
__device__ int *TestArray;
__global__ void InitTestArray(int *test_array)
{
TestArray = test_array;
}
__device__ void NestedLoopFunction(int ix, int iy, int val)
{
atomicAdd(&TestArray[ix], iy*val);
}
int SetNy(int Nx, int Ny_max, int *Ny, int k);
int main(int argc, char*argv[])
{
int Nx_max;
int Ny_max;
float k;
if (argc!=4) {
printf("Usage: %s Nx Ny_max k\n", argv[0]);
return 0;
}
sscanf(argv[1], "%d", &Nx_max);
sscanf(argv[2], "%d", &Ny_max);
sscanf(argv[3], "%f", &k);
int Nx = Nx_max;
int *h_Ny;
int *d_Ny;
int *h_test_array;
int *d_test_array;
int *ref_array;
h_test_array = new int[Nx_max];
ref_array = new int[Nx_max];
CudaSafeCall(hipMalloc(&d_test_array, Nx_max*sizeof(int)));
hipLaunchKernelGGL(( InitTestArray), dim3(1), dim3(1), 0, 0, d_test_array);
h_Ny = new int[Nx_max];
CudaSafeCall(hipMalloc(&d_Ny, Nx_max*sizeof(int)));
NestedLoop::Init();
printf("Testing Frame1DNestedLoop...\n");
SetNy(Nx, Ny_max, h_Ny, k);
hipMemcpy(d_Ny, h_Ny, Nx*sizeof(int), hipMemcpyHostToDevice);
for(int ix=0; ix<Nx_max; ix++) {
ref_array[ix] = h_Ny[ix]*(h_Ny[ix] - 1);
}
hipMemset(d_test_array, 0, Nx_max*sizeof(int));
NestedLoop::Frame1DNestedLoop(Nx, d_Ny);
hipMemcpy(h_test_array, d_test_array, Nx_max*sizeof(int),
hipMemcpyDeviceToHost);
for(int ix=0; ix<Nx_max; ix++) {
if (h_test_array[ix] != ref_array[ix]) {
printf("Frame1DNestedLoop error at ix = %d\n", ix);
exit(-1);
}
}
printf("OK\n\n");
printf("Testing Frame2DNestedLoop...\n");
hipMemset(d_test_array, 0, Nx_max*sizeof(int));
NestedLoop::Frame2DNestedLoop(Nx, d_Ny);
hipMemcpy(h_test_array, d_test_array, Nx_max*sizeof(int),
hipMemcpyDeviceToHost);
for(int ix=0; ix<Nx_max; ix++) {
if (h_test_array[ix] != ref_array[ix]) {
printf("Frame2DNestedLoop error at ix = %d\n", ix);
exit(-1);
}
}
printf("OK\n\n");
printf("Testing Smart2DNestedLoop...\n");
hipMemset(d_test_array, 0, Nx_max*sizeof(int));
NestedLoop::Smart2DNestedLoop(Nx, d_Ny);
hipMemcpy(h_test_array, d_test_array, Nx_max*sizeof(int),
hipMemcpyDeviceToHost);
for(int ix=0; ix<Nx_max; ix++) {
if (h_test_array[ix] != ref_array[ix]) {
printf("Smart2DNestedLoop error at ix = %d\n", ix);
exit(-1);
}
}
printf("OK\n\n");
printf("Testing Smart1DNestedLoop...\n");
hipMemset(d_test_array, 0, Nx_max*sizeof(int));
NestedLoop::Smart1DNestedLoop(Nx, d_Ny);
hipMemcpy(h_test_array, d_test_array, Nx_max*sizeof(int),
hipMemcpyDeviceToHost);
for(int ix=0; ix<Nx_max; ix++) {
if (h_test_array[ix] != ref_array[ix]) {
printf("Smart1DNestedLoop error at ix = %d\n", ix);
exit(-1);
}
}
printf("OK\n\n");
printf("Testing SimpleNestedLoop...\n");
hipMemset(d_test_array, 0, Nx_max*sizeof(int));
NestedLoop::SimpleNestedLoop(Nx, d_Ny);
hipMemcpy(h_test_array, d_test_array, Nx_max*sizeof(int),
hipMemcpyDeviceToHost);
for(int ix=0; ix<Nx_max; ix++) {
if (h_test_array[ix] != ref_array[ix]) {
printf("SimpleNestedLoop error at ix = %d\n", ix);
exit(-1);
}
}
printf("OK\n\n");
printf("Testing ParallelInnerNestedLoop...\n");
hipMemset(d_test_array, 0, Nx_max*sizeof(int));
NestedLoop::ParallelInnerNestedLoop(Nx, d_Ny);
hipMemcpy(h_test_array, d_test_array, Nx_max*sizeof(int),
hipMemcpyDeviceToHost);
for(int ix=0; ix<Nx_max; ix++) {
if (h_test_array[ix] != ref_array[ix]) {
printf("ParallelInnerNestedLoop error at ix = %d\n", ix);
exit(-1);
}
}
printf("OK\n\n");
printf("Testing ParallelOuterNestedLoop...\n");
hipMemset(d_test_array, 0, Nx_max*sizeof(int));
NestedLoop::ParallelOuterNestedLoop(Nx, d_Ny);
hipMemcpy(h_test_array, d_test_array, Nx_max*sizeof(int),
hipMemcpyDeviceToHost);
for(int ix=0; ix<Nx_max; ix++) {
if (h_test_array[ix] != ref_array[ix]) {
printf("ParallelOuterNestedLoop error at ix = %d\n", ix);
exit(-1);
}
}
printf("OK\n\n");
#ifdef WITH_CUMUL_SUM
printf("Testing CumulSumNestedLoop...\n");
hipMemset(d_test_array, 0, Nx_max*sizeof(int));
NestedLoop::CumulSumNestedLoop(Nx, d_Ny);
hipMemcpy(h_test_array, d_test_array, Nx_max*sizeof(int),
hipMemcpyDeviceToHost);
for(int ix=0; ix<Nx_max; ix++) {
if (h_test_array[ix] != ref_array[ix]) {
printf("CumulSumNestedLoop error at ix = %d\n", ix);
exit(-1);
}
}
printf("OK\n\n");
#endif
printf("Evaluating execution time...\n");
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
long n_iter = (1000000000l/Nx/Ny_max);
if (n_iter<1) n_iter=1;
if (n_iter>1000) n_iter=1000;
float frame1D_nested_loop_time = 0;
float frame2D_nested_loop_time = 0;
float smart1D_nested_loop_time = 0;
float smart2D_nested_loop_time = 0;
float simple_nested_loop_time = 0;
float parall_in_nested_loop_time = 0;
float parall_out_nested_loop_time = 0;
#ifdef WITH_CUMUL_SUM
float cumul_sum_nested_loop_time = 0;
#endif
for (long i_iter=0; i_iter<n_iter; i_iter++) {
SetNy(Nx, Ny_max, h_Ny, k);
hipMemcpy(d_Ny, h_Ny, Nx*sizeof(int), hipMemcpyHostToDevice);
hipMemset(d_test_array, 0, Nx_max*sizeof(int));
hipEventRecord(start);
NestedLoop::Frame1DNestedLoop(Nx, d_Ny);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
frame1D_nested_loop_time += milliseconds;
hipMemset(d_test_array, 0, Nx_max*sizeof(int));
hipEventRecord(start);
NestedLoop::Frame2DNestedLoop(Nx, d_Ny);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
frame2D_nested_loop_time += milliseconds;
hipMemset(d_test_array, 0, Nx_max*sizeof(int));
hipEventRecord(start);
NestedLoop::Smart1DNestedLoop(Nx, d_Ny);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
smart1D_nested_loop_time += milliseconds;
hipMemset(d_test_array, 0, Nx_max*sizeof(int));
hipEventRecord(start);
NestedLoop::Smart2DNestedLoop(Nx, d_Ny);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
smart2D_nested_loop_time += milliseconds;
hipMemset(d_test_array, 0, Nx_max*sizeof(int));
hipEventRecord(start);
NestedLoop::SimpleNestedLoop(Nx, d_Ny);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
simple_nested_loop_time += milliseconds;
hipMemset(d_test_array, 0, Nx_max*sizeof(int));
hipEventRecord(start);
NestedLoop::ParallelInnerNestedLoop(Nx, d_Ny);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
parall_in_nested_loop_time += milliseconds;
hipMemset(d_test_array, 0, Nx_max*sizeof(int));
hipEventRecord(start);
NestedLoop::ParallelOuterNestedLoop(Nx, d_Ny);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
parall_out_nested_loop_time += milliseconds;
#ifdef WITH_CUMUL_SUM
hipMemset(d_test_array, 0, Nx_max*sizeof(int));
hipEventRecord(start);
NestedLoop::CumulSumNestedLoop(Nx, d_Ny);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
cumul_sum_nested_loop_time += milliseconds;
#endif
}
frame1D_nested_loop_time = frame1D_nested_loop_time / n_iter;
frame2D_nested_loop_time = frame2D_nested_loop_time / n_iter;
smart1D_nested_loop_time = smart1D_nested_loop_time / n_iter;
smart2D_nested_loop_time = smart2D_nested_loop_time / n_iter;
simple_nested_loop_time = simple_nested_loop_time / n_iter;
parall_in_nested_loop_time = parall_in_nested_loop_time / n_iter;
parall_out_nested_loop_time = parall_out_nested_loop_time / n_iter;
printf ("Frame1DNestedLoop average time: %f ms\n", frame1D_nested_loop_time);
printf ("Frame2DNestedLoop average time: %f ms\n", frame2D_nested_loop_time);
printf ("Smart1DNestedLoop average time: %f ms\n", smart1D_nested_loop_time);
printf ("Smart2DNestedLoop average time: %f ms\n", smart2D_nested_loop_time);
printf ("SimpleNestedLoop average time: %f ms\n", simple_nested_loop_time);
printf ("ParallelInnerNestedLoop average time: %f ms\n",
parall_in_nested_loop_time);
printf ("ParallelOuterNestedLoop average time: %f ms\n",
parall_out_nested_loop_time);
#ifdef WITH_CUMUL_SUM
cumul_sum_nested_loop_time = cumul_sum_nested_loop_time / n_iter;
printf ("CumulSumNestedLoop average time: %f ms\n",
cumul_sum_nested_loop_time);
#endif
return 0;
}
float rnd()
{
float r = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
return r;
}
float rnd_distribution(float k)
{
if (k<1.e-6) return rnd();
float eps = 0.01;
float C = k/(1.-exp(-k));
float x, y, f;
do {
x = rnd();
y = rnd();
f = eps + (1.-eps)*C*exp(-k*x);
} while (y>f);
return x;
}
int SetNy(int Nx, int Ny_max, int *Ny, int k)
{
for (int ix=0; ix<Nx; ix++) {
int ny = (int)floor(rnd_distribution(k)*Ny_max);
if (ny == 0) ny = 1;
Ny[ix] = ny;
}
return 0;
}
| 4cfa37dca268a48417dd8aca30cc6da729b859e6.cu | /*
Copyright (C) 2016 Bruno Golosio
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_error.h"
#include "nested_loop.h"
__device__ int *TestArray;
__global__ void InitTestArray(int *test_array)
{
TestArray = test_array;
}
__device__ void NestedLoopFunction(int ix, int iy, int val)
{
atomicAdd(&TestArray[ix], iy*val);
}
int SetNy(int Nx, int Ny_max, int *Ny, int k);
int main(int argc, char*argv[])
{
int Nx_max;
int Ny_max;
float k;
if (argc!=4) {
printf("Usage: %s Nx Ny_max k\n", argv[0]);
return 0;
}
sscanf(argv[1], "%d", &Nx_max);
sscanf(argv[2], "%d", &Ny_max);
sscanf(argv[3], "%f", &k);
int Nx = Nx_max;
int *h_Ny;
int *d_Ny;
int *h_test_array;
int *d_test_array;
int *ref_array;
h_test_array = new int[Nx_max];
ref_array = new int[Nx_max];
CudaSafeCall(cudaMalloc(&d_test_array, Nx_max*sizeof(int)));
InitTestArray<<<1, 1>>>(d_test_array);
h_Ny = new int[Nx_max];
CudaSafeCall(cudaMalloc(&d_Ny, Nx_max*sizeof(int)));
NestedLoop::Init();
printf("Testing Frame1DNestedLoop...\n");
SetNy(Nx, Ny_max, h_Ny, k);
cudaMemcpy(d_Ny, h_Ny, Nx*sizeof(int), cudaMemcpyHostToDevice);
for(int ix=0; ix<Nx_max; ix++) {
ref_array[ix] = h_Ny[ix]*(h_Ny[ix] - 1);
}
cudaMemset(d_test_array, 0, Nx_max*sizeof(int));
NestedLoop::Frame1DNestedLoop(Nx, d_Ny);
cudaMemcpy(h_test_array, d_test_array, Nx_max*sizeof(int),
cudaMemcpyDeviceToHost);
for(int ix=0; ix<Nx_max; ix++) {
if (h_test_array[ix] != ref_array[ix]) {
printf("Frame1DNestedLoop error at ix = %d\n", ix);
exit(-1);
}
}
printf("OK\n\n");
printf("Testing Frame2DNestedLoop...\n");
cudaMemset(d_test_array, 0, Nx_max*sizeof(int));
NestedLoop::Frame2DNestedLoop(Nx, d_Ny);
cudaMemcpy(h_test_array, d_test_array, Nx_max*sizeof(int),
cudaMemcpyDeviceToHost);
for(int ix=0; ix<Nx_max; ix++) {
if (h_test_array[ix] != ref_array[ix]) {
printf("Frame2DNestedLoop error at ix = %d\n", ix);
exit(-1);
}
}
printf("OK\n\n");
printf("Testing Smart2DNestedLoop...\n");
cudaMemset(d_test_array, 0, Nx_max*sizeof(int));
NestedLoop::Smart2DNestedLoop(Nx, d_Ny);
cudaMemcpy(h_test_array, d_test_array, Nx_max*sizeof(int),
cudaMemcpyDeviceToHost);
for(int ix=0; ix<Nx_max; ix++) {
if (h_test_array[ix] != ref_array[ix]) {
printf("Smart2DNestedLoop error at ix = %d\n", ix);
exit(-1);
}
}
printf("OK\n\n");
printf("Testing Smart1DNestedLoop...\n");
cudaMemset(d_test_array, 0, Nx_max*sizeof(int));
NestedLoop::Smart1DNestedLoop(Nx, d_Ny);
cudaMemcpy(h_test_array, d_test_array, Nx_max*sizeof(int),
cudaMemcpyDeviceToHost);
for(int ix=0; ix<Nx_max; ix++) {
if (h_test_array[ix] != ref_array[ix]) {
printf("Smart1DNestedLoop error at ix = %d\n", ix);
exit(-1);
}
}
printf("OK\n\n");
printf("Testing SimpleNestedLoop...\n");
cudaMemset(d_test_array, 0, Nx_max*sizeof(int));
NestedLoop::SimpleNestedLoop(Nx, d_Ny);
cudaMemcpy(h_test_array, d_test_array, Nx_max*sizeof(int),
cudaMemcpyDeviceToHost);
for(int ix=0; ix<Nx_max; ix++) {
if (h_test_array[ix] != ref_array[ix]) {
printf("SimpleNestedLoop error at ix = %d\n", ix);
exit(-1);
}
}
printf("OK\n\n");
printf("Testing ParallelInnerNestedLoop...\n");
cudaMemset(d_test_array, 0, Nx_max*sizeof(int));
NestedLoop::ParallelInnerNestedLoop(Nx, d_Ny);
cudaMemcpy(h_test_array, d_test_array, Nx_max*sizeof(int),
cudaMemcpyDeviceToHost);
for(int ix=0; ix<Nx_max; ix++) {
if (h_test_array[ix] != ref_array[ix]) {
printf("ParallelInnerNestedLoop error at ix = %d\n", ix);
exit(-1);
}
}
printf("OK\n\n");
printf("Testing ParallelOuterNestedLoop...\n");
cudaMemset(d_test_array, 0, Nx_max*sizeof(int));
NestedLoop::ParallelOuterNestedLoop(Nx, d_Ny);
cudaMemcpy(h_test_array, d_test_array, Nx_max*sizeof(int),
cudaMemcpyDeviceToHost);
for(int ix=0; ix<Nx_max; ix++) {
if (h_test_array[ix] != ref_array[ix]) {
printf("ParallelOuterNestedLoop error at ix = %d\n", ix);
exit(-1);
}
}
printf("OK\n\n");
#ifdef WITH_CUMUL_SUM
printf("Testing CumulSumNestedLoop...\n");
cudaMemset(d_test_array, 0, Nx_max*sizeof(int));
NestedLoop::CumulSumNestedLoop(Nx, d_Ny);
cudaMemcpy(h_test_array, d_test_array, Nx_max*sizeof(int),
cudaMemcpyDeviceToHost);
for(int ix=0; ix<Nx_max; ix++) {
if (h_test_array[ix] != ref_array[ix]) {
printf("CumulSumNestedLoop error at ix = %d\n", ix);
exit(-1);
}
}
printf("OK\n\n");
#endif
printf("Evaluating execution time...\n");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
long n_iter = (1000000000l/Nx/Ny_max);
if (n_iter<1) n_iter=1;
if (n_iter>1000) n_iter=1000;
float frame1D_nested_loop_time = 0;
float frame2D_nested_loop_time = 0;
float smart1D_nested_loop_time = 0;
float smart2D_nested_loop_time = 0;
float simple_nested_loop_time = 0;
float parall_in_nested_loop_time = 0;
float parall_out_nested_loop_time = 0;
#ifdef WITH_CUMUL_SUM
float cumul_sum_nested_loop_time = 0;
#endif
for (long i_iter=0; i_iter<n_iter; i_iter++) {
SetNy(Nx, Ny_max, h_Ny, k);
cudaMemcpy(d_Ny, h_Ny, Nx*sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(d_test_array, 0, Nx_max*sizeof(int));
cudaEventRecord(start);
NestedLoop::Frame1DNestedLoop(Nx, d_Ny);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
frame1D_nested_loop_time += milliseconds;
cudaMemset(d_test_array, 0, Nx_max*sizeof(int));
cudaEventRecord(start);
NestedLoop::Frame2DNestedLoop(Nx, d_Ny);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
frame2D_nested_loop_time += milliseconds;
cudaMemset(d_test_array, 0, Nx_max*sizeof(int));
cudaEventRecord(start);
NestedLoop::Smart1DNestedLoop(Nx, d_Ny);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
smart1D_nested_loop_time += milliseconds;
cudaMemset(d_test_array, 0, Nx_max*sizeof(int));
cudaEventRecord(start);
NestedLoop::Smart2DNestedLoop(Nx, d_Ny);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
smart2D_nested_loop_time += milliseconds;
cudaMemset(d_test_array, 0, Nx_max*sizeof(int));
cudaEventRecord(start);
NestedLoop::SimpleNestedLoop(Nx, d_Ny);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
simple_nested_loop_time += milliseconds;
cudaMemset(d_test_array, 0, Nx_max*sizeof(int));
cudaEventRecord(start);
NestedLoop::ParallelInnerNestedLoop(Nx, d_Ny);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
parall_in_nested_loop_time += milliseconds;
cudaMemset(d_test_array, 0, Nx_max*sizeof(int));
cudaEventRecord(start);
NestedLoop::ParallelOuterNestedLoop(Nx, d_Ny);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
parall_out_nested_loop_time += milliseconds;
#ifdef WITH_CUMUL_SUM
cudaMemset(d_test_array, 0, Nx_max*sizeof(int));
cudaEventRecord(start);
NestedLoop::CumulSumNestedLoop(Nx, d_Ny);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cumul_sum_nested_loop_time += milliseconds;
#endif
}
frame1D_nested_loop_time = frame1D_nested_loop_time / n_iter;
frame2D_nested_loop_time = frame2D_nested_loop_time / n_iter;
smart1D_nested_loop_time = smart1D_nested_loop_time / n_iter;
smart2D_nested_loop_time = smart2D_nested_loop_time / n_iter;
simple_nested_loop_time = simple_nested_loop_time / n_iter;
parall_in_nested_loop_time = parall_in_nested_loop_time / n_iter;
parall_out_nested_loop_time = parall_out_nested_loop_time / n_iter;
printf ("Frame1DNestedLoop average time: %f ms\n", frame1D_nested_loop_time);
printf ("Frame2DNestedLoop average time: %f ms\n", frame2D_nested_loop_time);
printf ("Smart1DNestedLoop average time: %f ms\n", smart1D_nested_loop_time);
printf ("Smart2DNestedLoop average time: %f ms\n", smart2D_nested_loop_time);
printf ("SimpleNestedLoop average time: %f ms\n", simple_nested_loop_time);
printf ("ParallelInnerNestedLoop average time: %f ms\n",
parall_in_nested_loop_time);
printf ("ParallelOuterNestedLoop average time: %f ms\n",
parall_out_nested_loop_time);
#ifdef WITH_CUMUL_SUM
cumul_sum_nested_loop_time = cumul_sum_nested_loop_time / n_iter;
printf ("CumulSumNestedLoop average time: %f ms\n",
cumul_sum_nested_loop_time);
#endif
return 0;
}
float rnd()
{
float r = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
return r;
}
float rnd_distribution(float k)
{
if (k<1.e-6) return rnd();
float eps = 0.01;
float C = k/(1.-exp(-k));
float x, y, f;
do {
x = rnd();
y = rnd();
f = eps + (1.-eps)*C*exp(-k*x);
} while (y>f);
return x;
}
int SetNy(int Nx, int Ny_max, int *Ny, int k)
{
for (int ix=0; ix<Nx; ix++) {
int ny = (int)floor(rnd_distribution(k)*Ny_max);
if (ny == 0) ny = 1;
Ny[ix] = ny;
}
return 0;
}
|
7582c5bb174b08cd3dd82a0983d6c0c66251af4a.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--warp-sync=32 --blockDim=32 --gridDim=1 --equality-abstraction --no-inline
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#define N 2//32
__global__ void foo(int * A, int * B) {
A[threadIdx.x] = 1;
volatile int x = A[threadIdx.x];
B[threadIdx.x] = 1;
volatile int y = A[threadIdx.x];
assert(x==y);
}
int main(){
int *a, *b;
int *dev_a, *dev_b;
int size = N*sizeof(int);
hipMalloc((void**)&dev_a, size);
hipMalloc((void**)&dev_b, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = 0;
for (int i = 0; i < N; i++)
b[i] = 2;
hipMemcpy(dev_a,a,size, hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( foo), dim3(1),dim3(N), 0, 0, dev_a, dev_b);
//ESBMC_verify_kernel(foo, 1, N, dev_a, dev_b);
hipMemcpy(a,dev_a,size,hipMemcpyDeviceToHost);
hipMemcpy(b,dev_b,size,hipMemcpyDeviceToHost);
free(a); free(b);
hipFree(dev_a); hipFree(dev_b);
return 0;
}
| 7582c5bb174b08cd3dd82a0983d6c0c66251af4a.cu | //pass
//--warp-sync=32 --blockDim=32 --gridDim=1 --equality-abstraction --no-inline
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#define N 2//32
__global__ void foo(int * A, int * B) {
A[threadIdx.x] = 1;
volatile int x = A[threadIdx.x];
B[threadIdx.x] = 1;
volatile int y = A[threadIdx.x];
assert(x==y);
}
int main(){
int *a, *b;
int *dev_a, *dev_b;
int size = N*sizeof(int);
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = 0;
for (int i = 0; i < N; i++)
b[i] = 2;
cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size, cudaMemcpyHostToDevice);
foo<<<1,N>>>(dev_a, dev_b);
//ESBMC_verify_kernel(foo, 1, N, dev_a, dev_b);
cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost);
cudaMemcpy(b,dev_b,size,cudaMemcpyDeviceToHost);
free(a); free(b);
cudaFree(dev_a); cudaFree(dev_b);
return 0;
}
|
e1c11aa3a5fd31f9cd7f8f70130a690e098c14f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include <device_launch_parameters.h>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
float color = 0.0f;
for (int f_y = 0; f_y < filterWidth; ++f_y) {
for (int f_x = 0; f_x < filterWidth; ++f_x) {
int c_x = min(max(thread_2D_pos.x + f_x - filterWidth / 2, 0), static_cast<int>(numCols - 1));
int c_y = min(max(thread_2D_pos.y + f_y - filterWidth / 2, 0), static_cast<int>(numRows - 1));
color += filter[f_y * filterWidth + f_x] * static_cast<float>(inputChannel[c_y * numCols + c_x]);
}
}
outputChannel[thread_1D_pos] = color;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(32, 32);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize((numCols - 1) / blockSize.x + 1, (numRows - 1) / blockSize.y + 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red,
d_redBlurred,
numRows,
numCols,
d_filter,
filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue,
d_blueBlurred,
numRows,
numCols,
d_filter,
filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green,
d_greenBlurred,
numRows,
numCols,
d_filter,
filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
| e1c11aa3a5fd31f9cd7f8f70130a690e098c14f4.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include <device_launch_parameters.h>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
float color = 0.0f;
for (int f_y = 0; f_y < filterWidth; ++f_y) {
for (int f_x = 0; f_x < filterWidth; ++f_x) {
int c_x = min(max(thread_2D_pos.x + f_x - filterWidth / 2, 0), static_cast<int>(numCols - 1));
int c_y = min(max(thread_2D_pos.y + f_y - filterWidth / 2, 0), static_cast<int>(numRows - 1));
color += filter[f_y * filterWidth + f_x] * static_cast<float>(inputChannel[c_y * numCols + c_x]);
}
}
outputChannel[thread_1D_pos] = color;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(32, 32);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize((numCols - 1) / blockSize.x + 1, (numRows - 1) / blockSize.y + 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red,
d_redBlurred,
numRows,
numCols,
d_filter,
filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(d_blue,
d_blueBlurred,
numRows,
numCols,
d_filter,
filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(d_green,
d_greenBlurred,
numRows,
numCols,
d_filter,
filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
72e1197792f3bf5460bfbd094bbba792d0f4c72e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zswapblk.cu normal z -> d, Tue Sep 2 12:38:16 2014
*/
#include "common_magma.h"
#define BLOCK_SIZE 64
/*********************************************************/
/*
* Blocked version: swap several pairs of lines
*/
typedef struct {
double *A1;
double *A2;
int n, lda1, lda2, npivots;
short ipiv[BLOCK_SIZE];
} magmagpu_dswapblk_params_t;
__global__ void magmagpu_dswapblkrm( magmagpu_dswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
if( y < params.n )
{
double *A1 = params.A1 + y - params.lda1;
double *A2 = params.A2 + y;
for( int i = 0; i < params.npivots; i++ )
{
A1 += params.lda1;
if ( params.ipiv[i] == -1 )
continue;
double tmp1 = *A1;
double *tmp2 = A2 + params.ipiv[i]*params.lda2;
*A1 = *tmp2;
*tmp2 = tmp1;
}
}
}
__global__ void magmagpu_dswapblkcm( magmagpu_dswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int offset1 = y*params.lda1;
unsigned int offset2 = y*params.lda2;
if( y < params.n )
{
double *A1 = params.A1 + offset1 - 1;
double *A2 = params.A2 + offset2;
for( int i = 0; i < params.npivots; i++ )
{
A1++;
if ( params.ipiv[i] == -1 )
continue;
double tmp1 = *A1;
double *tmp2 = A2 + params.ipiv[i];
*A1 = *tmp2;
*tmp2 = tmp1;
}
}
__syncthreads();
}
/**
@ingroup magma_dblas2
********************************************************************/
extern "C" void
magmablas_dswapblk_q(
magma_order_t order, magma_int_t n,
double *dA1T, magma_int_t lda1,
double *dA2T, magma_int_t lda2,
magma_int_t i1, magma_int_t i2,
const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset,
magma_queue_t queue )
{
magma_int_t blocksize = 64;
dim3 blocks( (n+blocksize-1) / blocksize, 1, 1);
magma_int_t k, im;
/* Quick return */
if ( n == 0 )
return;
if ( order == MagmaColMajor ) {
for( k=(i1-1); k<i2; k+=BLOCK_SIZE )
{
magma_int_t sb = min(BLOCK_SIZE, i2-k);
magmagpu_dswapblk_params_t params = { dA1T+k, dA2T, n, lda1, lda2, sb };
for( magma_int_t j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im )
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
hipLaunchKernelGGL(( magmagpu_dswapblkcm), dim3(blocks), dim3(blocksize), 0, queue , params );
}
}
else {
for( k=(i1-1); k<i2; k+=BLOCK_SIZE )
{
magma_int_t sb = min(BLOCK_SIZE, i2-k);
magmagpu_dswapblk_params_t params = { dA1T+k*lda1, dA2T, n, lda1, lda2, sb };
for( magma_int_t j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im )
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
hipLaunchKernelGGL(( magmagpu_dswapblkrm), dim3(blocks), dim3(blocksize), 0, queue , params );
}
}
}
/**
@see magmablas_dswapblk_q
@ingroup magma_dblas2
********************************************************************/
extern "C" void
magmablas_dswapblk(
magma_order_t order, magma_int_t n,
double *dA1T, magma_int_t lda1,
double *dA2T, magma_int_t lda2,
magma_int_t i1, magma_int_t i2,
const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset )
{
magmablas_dswapblk_q(
order, n, dA1T, lda1, dA2T, lda2, i1, i2, ipiv, inci, offset, magma_stream );
}
| 72e1197792f3bf5460bfbd094bbba792d0f4c72e.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zswapblk.cu normal z -> d, Tue Sep 2 12:38:16 2014
*/
#include "common_magma.h"
#define BLOCK_SIZE 64
/*********************************************************/
/*
* Blocked version: swap several pairs of lines
*/
typedef struct {
double *A1;
double *A2;
int n, lda1, lda2, npivots;
short ipiv[BLOCK_SIZE];
} magmagpu_dswapblk_params_t;
__global__ void magmagpu_dswapblkrm( magmagpu_dswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
if( y < params.n )
{
double *A1 = params.A1 + y - params.lda1;
double *A2 = params.A2 + y;
for( int i = 0; i < params.npivots; i++ )
{
A1 += params.lda1;
if ( params.ipiv[i] == -1 )
continue;
double tmp1 = *A1;
double *tmp2 = A2 + params.ipiv[i]*params.lda2;
*A1 = *tmp2;
*tmp2 = tmp1;
}
}
}
__global__ void magmagpu_dswapblkcm( magmagpu_dswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int offset1 = y*params.lda1;
unsigned int offset2 = y*params.lda2;
if( y < params.n )
{
double *A1 = params.A1 + offset1 - 1;
double *A2 = params.A2 + offset2;
for( int i = 0; i < params.npivots; i++ )
{
A1++;
if ( params.ipiv[i] == -1 )
continue;
double tmp1 = *A1;
double *tmp2 = A2 + params.ipiv[i];
*A1 = *tmp2;
*tmp2 = tmp1;
}
}
__syncthreads();
}
/**
@ingroup magma_dblas2
********************************************************************/
extern "C" void
magmablas_dswapblk_q(
magma_order_t order, magma_int_t n,
double *dA1T, magma_int_t lda1,
double *dA2T, magma_int_t lda2,
magma_int_t i1, magma_int_t i2,
const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset,
magma_queue_t queue )
{
magma_int_t blocksize = 64;
dim3 blocks( (n+blocksize-1) / blocksize, 1, 1);
magma_int_t k, im;
/* Quick return */
if ( n == 0 )
return;
if ( order == MagmaColMajor ) {
for( k=(i1-1); k<i2; k+=BLOCK_SIZE )
{
magma_int_t sb = min(BLOCK_SIZE, i2-k);
magmagpu_dswapblk_params_t params = { dA1T+k, dA2T, n, lda1, lda2, sb };
for( magma_int_t j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im )
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
magmagpu_dswapblkcm<<< blocks, blocksize, 0, queue >>>( params );
}
}
else {
for( k=(i1-1); k<i2; k+=BLOCK_SIZE )
{
magma_int_t sb = min(BLOCK_SIZE, i2-k);
magmagpu_dswapblk_params_t params = { dA1T+k*lda1, dA2T, n, lda1, lda2, sb };
for( magma_int_t j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im )
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
magmagpu_dswapblkrm<<< blocks, blocksize, 0, queue >>>( params );
}
}
}
/**
@see magmablas_dswapblk_q
@ingroup magma_dblas2
********************************************************************/
extern "C" void
magmablas_dswapblk(
magma_order_t order, magma_int_t n,
double *dA1T, magma_int_t lda1,
double *dA2T, magma_int_t lda2,
magma_int_t i1, magma_int_t i2,
const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset )
{
magmablas_dswapblk_q(
order, n, dA1T, lda1, dA2T, lda2, i1, i2, ipiv, inci, offset, magma_stream );
}
|
8227d0dc6a9255fb4453b9f0823f8aa55abdef19.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/device/common_cub.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
#if __CUDA_ARCH__ >= 350
#define LDG(x, i) __ldg(x + i)
#define LDG2(x, i) convert::To<AccT>(__ldg(x + i))
#else
#define LDG(x, i) x[i]
#define LDG2(x, i) convert::To<AccT>(x[i])
#endif
template <typename T>
__global__ void _GroupNormFusedParams(
const int N,
const int G,
const int D,
const T* mu,
const T* rsig,
const T* gamma,
const T* beta,
T* scale,
T* bias) {
const int outer_dim = N * G;
CUDA_2D_KERNEL_LOOP1(i, outer_dim) {
const int g = i % G;
const T mu_val = LDG(mu, i);
const T rsig_val = LDG(rsig, i);
CUDA_2D_KERNEL_LOOP2(j, D) {
const int wi = i * D + j;
const int gi = g * D + j;
const T w = LDG(gamma, gi) * rsig_val;
scale[wi] = w;
bias[wi] = fma(-w, mu_val, LDG(beta, gi));
}
}
}
template <typename T, typename AccT>
__global__ void _GroupNormAffineNCHW(
const int N,
const int C,
const int S,
const T* x,
const AccT* scale,
const AccT* bias,
T* y) {
const int outer_dim = N * C;
CUDA_2D_KERNEL_LOOP1(i, outer_dim) {
const AccT w = LDG(scale, i);
const AccT b = LDG(bias, i);
CUDA_2D_KERNEL_LOOP2(j, S) {
const int xi = i * S + j;
y[xi] = convert::To<AccT>(fma(LDG2(x, xi), w, b));
}
}
}
template <typename T, typename AccT>
__global__ void _GroupNormAffineNHWC(
const int N,
const int C,
const int S,
const T* x,
const AccT* scale,
const AccT* bias,
T* y) {
const int outer_dim = N * S;
CUDA_2D_KERNEL_LOOP1(i, outer_dim) {
const int n = i / S;
CUDA_2D_KERNEL_LOOP2(j, C) {
const int xi = i * C + j;
const int wi = n * C + j;
y[xi] = convert::To<T>(fma(LDG2(x, xi), LDG(scale, wi), LDG(bias, wi)));
}
}
}
template <typename T, typename AccT, StorageOrder kOrder>
__global__ void _GroupNormWGrad(
const int N,
const int G,
const int D,
const int S,
const T* x,
const AccT* mu,
const AccT* rsig,
const T* dy,
AccT* dgamma,
AccT* dbeta) {
const int outer_dim = G * D;
const int inner_dim = N * S;
__shared__ typename BlockReduce<AccT>::TempStorage dg_storage;
__shared__ typename BlockReduce<AccT>::TempStorage db_storage;
CUDA_2D_KERNEL_LOOP1(i, outer_dim) {
AccT dg_val = AccT(0), db_val = AccT(0);
CUDA_2D_KERNEL_LOOP2(j, inner_dim) {
const int n = j / S;
const int xi = kOrder == StorageOrder::NCHW
? (n * outer_dim + i) * S + j % S
: j * outer_dim + i;
const int mi = n * G + i / D;
dg_val += LDG2(dy, xi) * (LDG2(x, xi) - LDG(mu, mi)) * LDG(rsig, mi);
db_val += LDG2(dy, xi);
}
dg_val = BlockReduce<AccT>(dg_storage).Sum(dg_val);
db_val = BlockReduce<AccT>(db_storage).Sum(db_val);
if (threadIdx.x == 0) {
dgamma[i] = dg_val;
dbeta[i] = db_val;
}
}
}
template <typename T, typename AccT, StorageOrder kOrder>
__global__ void _GroupNormInternalGrad(
const int N,
const int G,
const int D,
const int S,
const T* x,
const AccT* gamma,
const T* dy,
AccT* ds,
AccT* db) {
const int outer_dim = N * G;
const int inner_dim = D * S;
__shared__ typename BlockReduce<AccT>::TempStorage ds_storage;
__shared__ typename BlockReduce<AccT>::TempStorage db_storage;
CUDA_2D_KERNEL_LOOP1(i, outer_dim) {
AccT ds_val = AccT(0), db_val = AccT(0);
CUDA_2D_KERNEL_LOOP2(j, inner_dim) {
const int gi = i % G * D + j / S;
const int xi = kOrder == StorageOrder::NCHW
? i * inner_dim + j
: (i / G * S + j % S) * G * D + gi;
ds_val += LDG(gamma, gi) * LDG2(dy, xi) * LDG2(x, xi);
db_val += LDG(gamma, gi) * LDG2(dy, xi);
}
ds_val = BlockReduce<AccT>(ds_storage).Sum(ds_val);
db_val = BlockReduce<AccT>(db_storage).Sum(db_val);
if (threadIdx.x == 0) {
ds[i] = ds_val;
db[i] = db_val;
}
}
}
template <typename T, typename AccT, StorageOrder kOrder>
__global__ void _GroupNormGrad(
const int nthreads,
const int G,
const int D,
const int S,
const T* x,
const AccT* mu,
const AccT* rsig,
const AccT* gamma,
const AccT* ds,
const AccT* db,
const T* dy,
T* dx) {
const int C = G * D;
const AccT denom = AccT(1) / AccT(D * S);
CUDA_1D_KERNEL_LOOP(i, nthreads) {
const int mi = kOrder == StorageOrder::NCHW ? i / (D * S)
: i / (C * S) * G + (i / D % G);
const int gi = kOrder == StorageOrder::NCHW ? (i / S) % C : i % C;
const AccT u = fma(LDG(db, mi), LDG(mu, mi), -LDG(ds, mi)) *
(LDG2(x, i) - LDG(mu, mi)) * math::utils::Cube(LDG(rsig, mi));
const AccT v = LDG(db, mi) * LDG(rsig, mi);
dx[i] = convert::To<T>(
LDG(gamma, gi) * LDG2(dy, i) * LDG(rsig, mi) + (u - v) * denom);
}
}
#undef LDG
#undef LDG2
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DISPATCH_GROUPNORM_KERNEL(name, T, AccT, nblocks, nthreads, ...) \
if (data_format == "NCHW") { \
hipLaunchKernelGGL(( name<T, AccT, StorageOrder::NCHW>) \
, dim3(nblocks), dim3(nthreads), 0, ctx->cuda_stream(), __VA_ARGS__); \
} else if (data_format == "NHWC") { \
hipLaunchKernelGGL(( name<T, AccT, StorageOrder::NHWC>) \
, dim3(nblocks), dim3(nthreads), 0, ctx->cuda_stream(), __VA_ARGS__); \
} else { \
LOG(FATAL) << "Unknown DataFormat: " << data_format; \
}
#define DEFINE_KERNEL_LAUNCHER(T, AccT) \
template <> \
void GroupNorm<T, AccT, CUDAContext>( \
const int N, \
const int G, \
const int D, \
const int S, \
const string& data_format, \
const T* x, \
const AccT* mu, \
const AccT* rsig, \
const AccT* gamma, \
const AccT* beta, \
AccT* scale, \
AccT* bias, \
T* y, \
CUDAContext* ctx) { \
const int C = G * D; \
hipLaunchKernelGGL(( _GroupNormFusedParams), \
CUDA_2D_BLOCKS(N* G), \
CUDA_THREADS, \
0, \
ctx->cuda_stream(), N, G, D, mu, rsig, gamma, beta, scale, bias); \
if (data_format == "NCHW") { \
hipLaunchKernelGGL(( _GroupNormAffineNCHW), \
CUDA_2D_BLOCKS(N* C), \
CUDA_THREADS, \
0, \
ctx->cuda_stream(), \
N, \
C, \
S, \
reinterpret_cast<const math::ScalarType<T>::type*>(x), \
scale, \
bias, \
reinterpret_cast<math::ScalarType<T>::type*>(y)); \
} else if (data_format == "NHWC") { \
hipLaunchKernelGGL(( _GroupNormAffineNHWC), \
CUDA_2D_BLOCKS(N* C), \
CUDA_THREADS, \
0, \
ctx->cuda_stream(), \
N, \
C, \
S, \
reinterpret_cast<const math::ScalarType<T>::type*>(x), \
scale, \
bias, \
reinterpret_cast<math::ScalarType<T>::type*>(y)); \
} \
}
#define DEFINE_GRAD_KERNEL_LAUNCHER(T, AccT) \
template <> \
void GroupNormGrad<T, AccT, CUDAContext>( \
const int N, \
const int G, \
const int D, \
const int S, \
const string& data_format, \
const T* x, \
const AccT* mu, \
const AccT* rsig, \
const AccT* gamma, \
const T* dy, \
AccT* ds, \
AccT* db, \
AccT* dgamma, \
AccT* dbeta, \
T* dx, \
CUDAContext* ctx) { \
auto nthreads = N * G * D * S; \
DISPATCH_GROUPNORM_KERNEL( \
_GroupNormWGrad, \
math::ScalarType<T>::type, \
AccT, \
CUDA_2D_BLOCKS(G* D), \
CUDA_THREADS, \
N, \
G, \
D, \
S, \
reinterpret_cast<const math::ScalarType<T>::type*>(x), \
mu, \
rsig, \
reinterpret_cast<const math::ScalarType<T>::type*>(dy), \
dgamma, \
dbeta); \
DISPATCH_GROUPNORM_KERNEL( \
_GroupNormInternalGrad, \
math::ScalarType<T>::type, \
AccT, \
CUDA_2D_BLOCKS(N* G), \
CUDA_THREADS, \
N, \
G, \
D, \
S, \
reinterpret_cast<const math::ScalarType<T>::type*>(x), \
gamma, \
reinterpret_cast<const math::ScalarType<T>::type*>(dy), \
ds, \
db); \
DISPATCH_GROUPNORM_KERNEL( \
_GroupNormGrad, \
math::ScalarType<T>::type, \
AccT, \
CUDA_BLOCKS(nthreads), \
CUDA_THREADS, \
nthreads, \
G, \
D, \
S, \
reinterpret_cast<const math::ScalarType<T>::type*>(x), \
mu, \
rsig, \
gamma, \
ds, \
db, \
reinterpret_cast<const math::ScalarType<T>::type*>(dy), \
reinterpret_cast<math::ScalarType<T>::type*>(dx)); \
}
DEFINE_KERNEL_LAUNCHER(float16, float);
DEFINE_KERNEL_LAUNCHER(float, float);
DEFINE_KERNEL_LAUNCHER(double, double);
DEFINE_GRAD_KERNEL_LAUNCHER(float16, float);
DEFINE_GRAD_KERNEL_LAUNCHER(float, float);
DEFINE_GRAD_KERNEL_LAUNCHER(double, double);
#undef DEFINE_KERNEL_LAUNCHER
#undef DEFINE_GRAD_KERNEL_LAUNCHER
#undef DISPATCH_GROUPNORM_KERNEL
} // namespace kernel
} // namespace dragon
#endif // USE_ROCM
| 8227d0dc6a9255fb4453b9f0823f8aa55abdef19.cu | #ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/device/common_cub.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
#if __CUDA_ARCH__ >= 350
#define LDG(x, i) __ldg(x + i)
#define LDG2(x, i) convert::To<AccT>(__ldg(x + i))
#else
#define LDG(x, i) x[i]
#define LDG2(x, i) convert::To<AccT>(x[i])
#endif
template <typename T>
__global__ void _GroupNormFusedParams(
const int N,
const int G,
const int D,
const T* mu,
const T* rsig,
const T* gamma,
const T* beta,
T* scale,
T* bias) {
const int outer_dim = N * G;
CUDA_2D_KERNEL_LOOP1(i, outer_dim) {
const int g = i % G;
const T mu_val = LDG(mu, i);
const T rsig_val = LDG(rsig, i);
CUDA_2D_KERNEL_LOOP2(j, D) {
const int wi = i * D + j;
const int gi = g * D + j;
const T w = LDG(gamma, gi) * rsig_val;
scale[wi] = w;
bias[wi] = fma(-w, mu_val, LDG(beta, gi));
}
}
}
template <typename T, typename AccT>
__global__ void _GroupNormAffineNCHW(
const int N,
const int C,
const int S,
const T* x,
const AccT* scale,
const AccT* bias,
T* y) {
const int outer_dim = N * C;
CUDA_2D_KERNEL_LOOP1(i, outer_dim) {
const AccT w = LDG(scale, i);
const AccT b = LDG(bias, i);
CUDA_2D_KERNEL_LOOP2(j, S) {
const int xi = i * S + j;
y[xi] = convert::To<AccT>(fma(LDG2(x, xi), w, b));
}
}
}
template <typename T, typename AccT>
__global__ void _GroupNormAffineNHWC(
const int N,
const int C,
const int S,
const T* x,
const AccT* scale,
const AccT* bias,
T* y) {
const int outer_dim = N * S;
CUDA_2D_KERNEL_LOOP1(i, outer_dim) {
const int n = i / S;
CUDA_2D_KERNEL_LOOP2(j, C) {
const int xi = i * C + j;
const int wi = n * C + j;
y[xi] = convert::To<T>(fma(LDG2(x, xi), LDG(scale, wi), LDG(bias, wi)));
}
}
}
template <typename T, typename AccT, StorageOrder kOrder>
__global__ void _GroupNormWGrad(
const int N,
const int G,
const int D,
const int S,
const T* x,
const AccT* mu,
const AccT* rsig,
const T* dy,
AccT* dgamma,
AccT* dbeta) {
const int outer_dim = G * D;
const int inner_dim = N * S;
__shared__ typename BlockReduce<AccT>::TempStorage dg_storage;
__shared__ typename BlockReduce<AccT>::TempStorage db_storage;
CUDA_2D_KERNEL_LOOP1(i, outer_dim) {
AccT dg_val = AccT(0), db_val = AccT(0);
CUDA_2D_KERNEL_LOOP2(j, inner_dim) {
const int n = j / S;
const int xi = kOrder == StorageOrder::NCHW
? (n * outer_dim + i) * S + j % S
: j * outer_dim + i;
const int mi = n * G + i / D;
dg_val += LDG2(dy, xi) * (LDG2(x, xi) - LDG(mu, mi)) * LDG(rsig, mi);
db_val += LDG2(dy, xi);
}
dg_val = BlockReduce<AccT>(dg_storage).Sum(dg_val);
db_val = BlockReduce<AccT>(db_storage).Sum(db_val);
if (threadIdx.x == 0) {
dgamma[i] = dg_val;
dbeta[i] = db_val;
}
}
}
template <typename T, typename AccT, StorageOrder kOrder>
__global__ void _GroupNormInternalGrad(
const int N,
const int G,
const int D,
const int S,
const T* x,
const AccT* gamma,
const T* dy,
AccT* ds,
AccT* db) {
const int outer_dim = N * G;
const int inner_dim = D * S;
__shared__ typename BlockReduce<AccT>::TempStorage ds_storage;
__shared__ typename BlockReduce<AccT>::TempStorage db_storage;
CUDA_2D_KERNEL_LOOP1(i, outer_dim) {
AccT ds_val = AccT(0), db_val = AccT(0);
CUDA_2D_KERNEL_LOOP2(j, inner_dim) {
const int gi = i % G * D + j / S;
const int xi = kOrder == StorageOrder::NCHW
? i * inner_dim + j
: (i / G * S + j % S) * G * D + gi;
ds_val += LDG(gamma, gi) * LDG2(dy, xi) * LDG2(x, xi);
db_val += LDG(gamma, gi) * LDG2(dy, xi);
}
ds_val = BlockReduce<AccT>(ds_storage).Sum(ds_val);
db_val = BlockReduce<AccT>(db_storage).Sum(db_val);
if (threadIdx.x == 0) {
ds[i] = ds_val;
db[i] = db_val;
}
}
}
template <typename T, typename AccT, StorageOrder kOrder>
__global__ void _GroupNormGrad(
const int nthreads,
const int G,
const int D,
const int S,
const T* x,
const AccT* mu,
const AccT* rsig,
const AccT* gamma,
const AccT* ds,
const AccT* db,
const T* dy,
T* dx) {
const int C = G * D;
const AccT denom = AccT(1) / AccT(D * S);
CUDA_1D_KERNEL_LOOP(i, nthreads) {
const int mi = kOrder == StorageOrder::NCHW ? i / (D * S)
: i / (C * S) * G + (i / D % G);
const int gi = kOrder == StorageOrder::NCHW ? (i / S) % C : i % C;
const AccT u = fma(LDG(db, mi), LDG(mu, mi), -LDG(ds, mi)) *
(LDG2(x, i) - LDG(mu, mi)) * math::utils::Cube(LDG(rsig, mi));
const AccT v = LDG(db, mi) * LDG(rsig, mi);
dx[i] = convert::To<T>(
LDG(gamma, gi) * LDG2(dy, i) * LDG(rsig, mi) + (u - v) * denom);
}
}
#undef LDG
#undef LDG2
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DISPATCH_GROUPNORM_KERNEL(name, T, AccT, nblocks, nthreads, ...) \
if (data_format == "NCHW") { \
name<T, AccT, StorageOrder::NCHW> \
<<<nblocks, nthreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \
} else if (data_format == "NHWC") { \
name<T, AccT, StorageOrder::NHWC> \
<<<nblocks, nthreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \
} else { \
LOG(FATAL) << "Unknown DataFormat: " << data_format; \
}
#define DEFINE_KERNEL_LAUNCHER(T, AccT) \
template <> \
void GroupNorm<T, AccT, CUDAContext>( \
const int N, \
const int G, \
const int D, \
const int S, \
const string& data_format, \
const T* x, \
const AccT* mu, \
const AccT* rsig, \
const AccT* gamma, \
const AccT* beta, \
AccT* scale, \
AccT* bias, \
T* y, \
CUDAContext* ctx) { \
const int C = G * D; \
_GroupNormFusedParams<<< \
CUDA_2D_BLOCKS(N* G), \
CUDA_THREADS, \
0, \
ctx->cuda_stream()>>>(N, G, D, mu, rsig, gamma, beta, scale, bias); \
if (data_format == "NCHW") { \
_GroupNormAffineNCHW<<< \
CUDA_2D_BLOCKS(N* C), \
CUDA_THREADS, \
0, \
ctx->cuda_stream()>>>( \
N, \
C, \
S, \
reinterpret_cast<const math::ScalarType<T>::type*>(x), \
scale, \
bias, \
reinterpret_cast<math::ScalarType<T>::type*>(y)); \
} else if (data_format == "NHWC") { \
_GroupNormAffineNHWC<<< \
CUDA_2D_BLOCKS(N* C), \
CUDA_THREADS, \
0, \
ctx->cuda_stream()>>>( \
N, \
C, \
S, \
reinterpret_cast<const math::ScalarType<T>::type*>(x), \
scale, \
bias, \
reinterpret_cast<math::ScalarType<T>::type*>(y)); \
} \
}
#define DEFINE_GRAD_KERNEL_LAUNCHER(T, AccT) \
template <> \
void GroupNormGrad<T, AccT, CUDAContext>( \
const int N, \
const int G, \
const int D, \
const int S, \
const string& data_format, \
const T* x, \
const AccT* mu, \
const AccT* rsig, \
const AccT* gamma, \
const T* dy, \
AccT* ds, \
AccT* db, \
AccT* dgamma, \
AccT* dbeta, \
T* dx, \
CUDAContext* ctx) { \
auto nthreads = N * G * D * S; \
DISPATCH_GROUPNORM_KERNEL( \
_GroupNormWGrad, \
math::ScalarType<T>::type, \
AccT, \
CUDA_2D_BLOCKS(G* D), \
CUDA_THREADS, \
N, \
G, \
D, \
S, \
reinterpret_cast<const math::ScalarType<T>::type*>(x), \
mu, \
rsig, \
reinterpret_cast<const math::ScalarType<T>::type*>(dy), \
dgamma, \
dbeta); \
DISPATCH_GROUPNORM_KERNEL( \
_GroupNormInternalGrad, \
math::ScalarType<T>::type, \
AccT, \
CUDA_2D_BLOCKS(N* G), \
CUDA_THREADS, \
N, \
G, \
D, \
S, \
reinterpret_cast<const math::ScalarType<T>::type*>(x), \
gamma, \
reinterpret_cast<const math::ScalarType<T>::type*>(dy), \
ds, \
db); \
DISPATCH_GROUPNORM_KERNEL( \
_GroupNormGrad, \
math::ScalarType<T>::type, \
AccT, \
CUDA_BLOCKS(nthreads), \
CUDA_THREADS, \
nthreads, \
G, \
D, \
S, \
reinterpret_cast<const math::ScalarType<T>::type*>(x), \
mu, \
rsig, \
gamma, \
ds, \
db, \
reinterpret_cast<const math::ScalarType<T>::type*>(dy), \
reinterpret_cast<math::ScalarType<T>::type*>(dx)); \
}
DEFINE_KERNEL_LAUNCHER(float16, float);
DEFINE_KERNEL_LAUNCHER(float, float);
DEFINE_KERNEL_LAUNCHER(double, double);
DEFINE_GRAD_KERNEL_LAUNCHER(float16, float);
DEFINE_GRAD_KERNEL_LAUNCHER(float, float);
DEFINE_GRAD_KERNEL_LAUNCHER(double, double);
#undef DEFINE_KERNEL_LAUNCHER
#undef DEFINE_GRAD_KERNEL_LAUNCHER
#undef DISPATCH_GROUPNORM_KERNEL
} // namespace kernel
} // namespace dragon
#endif // USE_CUDA
|
3d266200ba784e01b03c3f03c338042c6b573612.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
#include "cpu_anim.h"
#include <vector>
static constexpr int DIM = 1024;
static constexpr float MAX_TEMP = 1.0f;
static constexpr float MIN_TEMP = 0.0001f;
static constexpr float SPEED = 0.25f;
texture<float, 2> tex_const;
texture<float, 2> tex_in;
texture<float, 2> tex_out;
__global__ void copy_const_kernel(float *iptr) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float c = tex2D(tex_const, x, y);
if (c != 0.0f) {
iptr[offset] = c;
}
}
__global__ void blend_kernel(float *out, bool dst_out) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float t, l, c, r, b;
if (dst_out) {
t = tex2D(tex_in, x, y - 1);
l = tex2D(tex_in, x - 1, y);
c = tex2D(tex_in, x, y);
r = tex2D(tex_in, x + 1, y);
b = tex2D(tex_in, x, y + 1);
} else {
t = tex2D(tex_out, x, y - 1);
l = tex2D(tex_out, x - 1, y);
c = tex2D(tex_out, x, y);
r = tex2D(tex_out, x + 1, y);
b = tex2D(tex_out, x, y + 1);
}
out[offset] = c + SPEED * (t + b + r + l - 4.0f * c);
}
struct DataBlock {
unsigned char *output_bitmap;
float *dev_in;
float *dev_out;
float *dev_const;
CPUAnimBitmap *bitmap;
hipEvent_t start, stop;
float total_time = .0f;
float frames = .0f;
void init(CPUAnimBitmap *bm) {
bitmap = bm;
CHECK(hipEventCreate(&start));
CHECK(hipEventCreate(&stop));
const long image_size = bitmap->image_size();
CHECK(hipMalloc(&output_bitmap, image_size));
CHECK(hipMalloc(&dev_in, image_size));
CHECK(hipMalloc(&dev_out, image_size));
CHECK(hipMalloc(&dev_const, image_size));
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
CHECK(hipBindTexture2D(nullptr, tex_const, dev_const, desc, DIM, DIM,
sizeof(float) * DIM));
CHECK(hipBindTexture2D(nullptr, tex_in, dev_in, desc, DIM, DIM,
sizeof(float) * DIM));
CHECK(hipBindTexture2D(nullptr, tex_out, dev_out, desc, DIM, DIM,
sizeof(float) * DIM));
std::vector<float> temp(DIM * DIM);
for (int i = 0; i < DIM * DIM; i++) {
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if ((x > 300) && (x < 600) && (y > 310) && (y < 601)) {
temp[i] = MAX_TEMP;
}
}
temp[DIM * 100 + 100] = (MAX_TEMP + MIN_TEMP) / 2.0f;
temp[DIM * 700 + 100] = MIN_TEMP;
temp[DIM * 300 + 300] = MIN_TEMP;
temp[DIM * 200 + 700] = MIN_TEMP;
for (int y = 800; y < 900; y++) {
for (int x = 400; x < 500; x++) {
temp[x + y * DIM] = MIN_TEMP;
}
}
CHECK(hipMemcpy(dev_const, temp.data(), bitmap->image_size(),
hipMemcpyHostToDevice));
for (int y = 800; y < DIM; y++) {
for (int x = 0; x < 200; x++) {
temp[x + y * DIM] = MAX_TEMP;
}
}
CHECK(hipMemcpy(dev_in, temp.data(), bitmap->image_size(),
hipMemcpyHostToDevice));
}
~DataBlock() {
CHECK(hipEventDestroy(start));
CHECK(hipEventDestroy(stop));
CHECK(hipFree(dev_in));
CHECK(hipFree(dev_out));
CHECK(hipFree(dev_const));
}
};
void anim_gpu(void *p, int) {
auto d = static_cast<DataBlock *>(p);
CHECK(hipEventRecord(d->start, 0));
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
CPUAnimBitmap *bitmap = d->bitmap;
volatile bool dst_out = true;
for (int i = 0; i < 90; i++) {
float *in, *out;
if (dst_out) {
in = d->dev_in;
out = d->dev_out;
} else {
in = d->dev_out;
out = d->dev_in;
}
hipLaunchKernelGGL(( copy_const_kernel), dim3(blocks), dim3(threads), 0, 0, in);
hipLaunchKernelGGL(( blend_kernel), dim3(blocks), dim3(threads), 0, 0, out, dst_out);
dst_out = !dst_out;
}
hipLaunchKernelGGL(( float_to_color), dim3(blocks), dim3(threads), 0, 0, d->output_bitmap, d->dev_in);
CHECK(hipMemcpy(bitmap->get_ptr(), d->output_bitmap, bitmap->image_size(),
hipMemcpyDeviceToHost));
CHECK(hipEventRecord(d->stop, 0));
CHECK(hipEventSynchronize(d->stop));
float elapsed_time;
CHECK(hipEventElapsedTime(&elapsed_time, d->start, d->stop));
d->total_time += elapsed_time;
++d->frames;
printf("Average Time per frame: %3.1f ms\n", d->total_time / d->frames);
}
void anim_exit(void *) {
CHECK(hipUnbindTexture(tex_in));
CHECK(hipUnbindTexture(tex_out));
CHECK(hipUnbindTexture(tex_const));
}
int main() {
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.init(&bitmap);
bitmap.anim_and_exit(anim_gpu, anim_exit);
return 0;
}
| 3d266200ba784e01b03c3f03c338042c6b573612.cu | #include "common.h"
#include "cpu_anim.h"
#include <vector>
static constexpr int DIM = 1024;
static constexpr float MAX_TEMP = 1.0f;
static constexpr float MIN_TEMP = 0.0001f;
static constexpr float SPEED = 0.25f;
texture<float, 2> tex_const;
texture<float, 2> tex_in;
texture<float, 2> tex_out;
__global__ void copy_const_kernel(float *iptr) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float c = tex2D(tex_const, x, y);
if (c != 0.0f) {
iptr[offset] = c;
}
}
__global__ void blend_kernel(float *out, bool dst_out) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float t, l, c, r, b;
if (dst_out) {
t = tex2D(tex_in, x, y - 1);
l = tex2D(tex_in, x - 1, y);
c = tex2D(tex_in, x, y);
r = tex2D(tex_in, x + 1, y);
b = tex2D(tex_in, x, y + 1);
} else {
t = tex2D(tex_out, x, y - 1);
l = tex2D(tex_out, x - 1, y);
c = tex2D(tex_out, x, y);
r = tex2D(tex_out, x + 1, y);
b = tex2D(tex_out, x, y + 1);
}
out[offset] = c + SPEED * (t + b + r + l - 4.0f * c);
}
struct DataBlock {
unsigned char *output_bitmap;
float *dev_in;
float *dev_out;
float *dev_const;
CPUAnimBitmap *bitmap;
cudaEvent_t start, stop;
float total_time = .0f;
float frames = .0f;
void init(CPUAnimBitmap *bm) {
bitmap = bm;
CHECK(cudaEventCreate(&start));
CHECK(cudaEventCreate(&stop));
const long image_size = bitmap->image_size();
CHECK(cudaMalloc(&output_bitmap, image_size));
CHECK(cudaMalloc(&dev_in, image_size));
CHECK(cudaMalloc(&dev_out, image_size));
CHECK(cudaMalloc(&dev_const, image_size));
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
CHECK(cudaBindTexture2D(nullptr, tex_const, dev_const, desc, DIM, DIM,
sizeof(float) * DIM));
CHECK(cudaBindTexture2D(nullptr, tex_in, dev_in, desc, DIM, DIM,
sizeof(float) * DIM));
CHECK(cudaBindTexture2D(nullptr, tex_out, dev_out, desc, DIM, DIM,
sizeof(float) * DIM));
std::vector<float> temp(DIM * DIM);
for (int i = 0; i < DIM * DIM; i++) {
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if ((x > 300) && (x < 600) && (y > 310) && (y < 601)) {
temp[i] = MAX_TEMP;
}
}
temp[DIM * 100 + 100] = (MAX_TEMP + MIN_TEMP) / 2.0f;
temp[DIM * 700 + 100] = MIN_TEMP;
temp[DIM * 300 + 300] = MIN_TEMP;
temp[DIM * 200 + 700] = MIN_TEMP;
for (int y = 800; y < 900; y++) {
for (int x = 400; x < 500; x++) {
temp[x + y * DIM] = MIN_TEMP;
}
}
CHECK(cudaMemcpy(dev_const, temp.data(), bitmap->image_size(),
cudaMemcpyHostToDevice));
for (int y = 800; y < DIM; y++) {
for (int x = 0; x < 200; x++) {
temp[x + y * DIM] = MAX_TEMP;
}
}
CHECK(cudaMemcpy(dev_in, temp.data(), bitmap->image_size(),
cudaMemcpyHostToDevice));
}
~DataBlock() {
CHECK(cudaEventDestroy(start));
CHECK(cudaEventDestroy(stop));
CHECK(cudaFree(dev_in));
CHECK(cudaFree(dev_out));
CHECK(cudaFree(dev_const));
}
};
void anim_gpu(void *p, int) {
auto d = static_cast<DataBlock *>(p);
CHECK(cudaEventRecord(d->start, 0));
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
CPUAnimBitmap *bitmap = d->bitmap;
volatile bool dst_out = true;
for (int i = 0; i < 90; i++) {
float *in, *out;
if (dst_out) {
in = d->dev_in;
out = d->dev_out;
} else {
in = d->dev_out;
out = d->dev_in;
}
copy_const_kernel<<<blocks, threads>>>(in);
blend_kernel<<<blocks, threads>>>(out, dst_out);
dst_out = !dst_out;
}
float_to_color<<<blocks, threads>>>(d->output_bitmap, d->dev_in);
CHECK(cudaMemcpy(bitmap->get_ptr(), d->output_bitmap, bitmap->image_size(),
cudaMemcpyDeviceToHost));
CHECK(cudaEventRecord(d->stop, 0));
CHECK(cudaEventSynchronize(d->stop));
float elapsed_time;
CHECK(cudaEventElapsedTime(&elapsed_time, d->start, d->stop));
d->total_time += elapsed_time;
++d->frames;
printf("Average Time per frame: %3.1f ms\n", d->total_time / d->frames);
}
void anim_exit(void *) {
CHECK(cudaUnbindTexture(tex_in));
CHECK(cudaUnbindTexture(tex_out));
CHECK(cudaUnbindTexture(tex_const));
}
int main() {
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.init(&bitmap);
bitmap.anim_and_exit(anim_gpu, anim_exit);
return 0;
}
|
9942cfcbe5172cc909d76eee285300e9435f7579.hip | // !!! This is a file automatically generated by hipify!!!
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_USE_GPU
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
void test_cuda_nullary() {
Tensor<std::complex<float>, 1, 0, int> in1(2);
Tensor<std::complex<float>, 1, 0, int> in2(2);
in1.setRandom();
in2.setRandom();
std::size_t float_bytes = in1.size() * sizeof(float);
std::size_t complex_bytes = in1.size() * sizeof(std::complex<float>);
std::complex<float>* d_in1;
std::complex<float>* d_in2;
float* d_out2;
hipMalloc((void**)(&d_in1), complex_bytes);
hipMalloc((void**)(&d_in2), complex_bytes);
hipMalloc((void**)(&d_out2), float_bytes);
hipMemcpy(d_in1, in1.data(), complex_bytes, hipMemcpyHostToDevice);
hipMemcpy(d_in2, in2.data(), complex_bytes, hipMemcpyHostToDevice);
Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<std::complex<float>, 1, 0, int>, Eigen::Aligned> gpu_in1(
d_in1, 2);
Eigen::TensorMap<Eigen::Tensor<std::complex<float>, 1, 0, int>, Eigen::Aligned> gpu_in2(
d_in2, 2);
Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_out2(
d_out2, 2);
gpu_in1.device(gpu_device) = gpu_in1.constant(std::complex<float>(3.14f, 2.7f));
gpu_out2.device(gpu_device) = gpu_in2.abs();
Tensor<std::complex<float>, 1, 0, int> new1(2);
Tensor<float, 1, 0, int> new2(2);
assert(hipMemcpyAsync(new1.data(), d_in1, complex_bytes, hipMemcpyDeviceToHost,
gpu_device.stream()) == hipSuccess);
assert(hipMemcpyAsync(new2.data(), d_out2, float_bytes, hipMemcpyDeviceToHost,
gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
for (int i = 0; i < 2; ++i) {
VERIFY_IS_APPROX(new1(i), std::complex<float>(3.14f, 2.7f));
VERIFY_IS_APPROX(new2(i), std::abs(in2(i)));
}
hipFree(d_in1);
hipFree(d_in2);
hipFree(d_out2);
}
static void test_cuda_sum_reductions() {
Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
const int num_cols = internal::random<int>(1024, 5*1024);
Tensor<std::complex<float>, 2> in(num_rows, num_cols);
in.setRandom();
Tensor<std::complex<float>, 0> full_redux;
full_redux = in.sum();
std::size_t in_bytes = in.size() * sizeof(std::complex<float>);
std::size_t out_bytes = full_redux.size() * sizeof(std::complex<float>);
std::complex<float>* gpu_in_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(in_bytes));
std::complex<float>* gpu_out_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(out_bytes));
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols);
TensorMap<Tensor<std::complex<float>, 0> > out_gpu(gpu_out_ptr);
out_gpu.device(gpu_device) = in_gpu.sum();
Tensor<std::complex<float>, 0> full_redux_gpu;
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
static void test_cuda_mean_reductions() {
Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
const int num_cols = internal::random<int>(1024, 5*1024);
Tensor<std::complex<float>, 2> in(num_rows, num_cols);
in.setRandom();
Tensor<std::complex<float>, 0> full_redux;
full_redux = in.mean();
std::size_t in_bytes = in.size() * sizeof(std::complex<float>);
std::size_t out_bytes = full_redux.size() * sizeof(std::complex<float>);
std::complex<float>* gpu_in_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(in_bytes));
std::complex<float>* gpu_out_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(out_bytes));
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols);
TensorMap<Tensor<std::complex<float>, 0> > out_gpu(gpu_out_ptr);
out_gpu.device(gpu_device) = in_gpu.mean();
Tensor<std::complex<float>, 0> full_redux_gpu;
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
static void test_cuda_product_reductions() {
Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
const int num_cols = internal::random<int>(1024, 5*1024);
Tensor<std::complex<float>, 2> in(num_rows, num_cols);
in.setRandom();
Tensor<std::complex<float>, 0> full_redux;
full_redux = in.prod();
std::size_t in_bytes = in.size() * sizeof(std::complex<float>);
std::size_t out_bytes = full_redux.size() * sizeof(std::complex<float>);
std::complex<float>* gpu_in_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(in_bytes));
std::complex<float>* gpu_out_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(out_bytes));
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols);
TensorMap<Tensor<std::complex<float>, 0> > out_gpu(gpu_out_ptr);
out_gpu.device(gpu_device) = in_gpu.prod();
Tensor<std::complex<float>, 0> full_redux_gpu;
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
EIGEN_DECLARE_TEST(test_cxx11_tensor_complex)
{
CALL_SUBTEST(test_cuda_nullary());
CALL_SUBTEST(test_cuda_sum_reductions());
CALL_SUBTEST(test_cuda_mean_reductions());
CALL_SUBTEST(test_cuda_product_reductions());
}
| 9942cfcbe5172cc909d76eee285300e9435f7579.cu | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_USE_GPU
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
void test_cuda_nullary() {
Tensor<std::complex<float>, 1, 0, int> in1(2);
Tensor<std::complex<float>, 1, 0, int> in2(2);
in1.setRandom();
in2.setRandom();
std::size_t float_bytes = in1.size() * sizeof(float);
std::size_t complex_bytes = in1.size() * sizeof(std::complex<float>);
std::complex<float>* d_in1;
std::complex<float>* d_in2;
float* d_out2;
cudaMalloc((void**)(&d_in1), complex_bytes);
cudaMalloc((void**)(&d_in2), complex_bytes);
cudaMalloc((void**)(&d_out2), float_bytes);
cudaMemcpy(d_in1, in1.data(), complex_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, in2.data(), complex_bytes, cudaMemcpyHostToDevice);
Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<std::complex<float>, 1, 0, int>, Eigen::Aligned> gpu_in1(
d_in1, 2);
Eigen::TensorMap<Eigen::Tensor<std::complex<float>, 1, 0, int>, Eigen::Aligned> gpu_in2(
d_in2, 2);
Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_out2(
d_out2, 2);
gpu_in1.device(gpu_device) = gpu_in1.constant(std::complex<float>(3.14f, 2.7f));
gpu_out2.device(gpu_device) = gpu_in2.abs();
Tensor<std::complex<float>, 1, 0, int> new1(2);
Tensor<float, 1, 0, int> new2(2);
assert(cudaMemcpyAsync(new1.data(), d_in1, complex_bytes, cudaMemcpyDeviceToHost,
gpu_device.stream()) == cudaSuccess);
assert(cudaMemcpyAsync(new2.data(), d_out2, float_bytes, cudaMemcpyDeviceToHost,
gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
for (int i = 0; i < 2; ++i) {
VERIFY_IS_APPROX(new1(i), std::complex<float>(3.14f, 2.7f));
VERIFY_IS_APPROX(new2(i), std::abs(in2(i)));
}
cudaFree(d_in1);
cudaFree(d_in2);
cudaFree(d_out2);
}
static void test_cuda_sum_reductions() {
Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
const int num_cols = internal::random<int>(1024, 5*1024);
Tensor<std::complex<float>, 2> in(num_rows, num_cols);
in.setRandom();
Tensor<std::complex<float>, 0> full_redux;
full_redux = in.sum();
std::size_t in_bytes = in.size() * sizeof(std::complex<float>);
std::size_t out_bytes = full_redux.size() * sizeof(std::complex<float>);
std::complex<float>* gpu_in_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(in_bytes));
std::complex<float>* gpu_out_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(out_bytes));
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols);
TensorMap<Tensor<std::complex<float>, 0> > out_gpu(gpu_out_ptr);
out_gpu.device(gpu_device) = in_gpu.sum();
Tensor<std::complex<float>, 0> full_redux_gpu;
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
static void test_cuda_mean_reductions() {
Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
const int num_cols = internal::random<int>(1024, 5*1024);
Tensor<std::complex<float>, 2> in(num_rows, num_cols);
in.setRandom();
Tensor<std::complex<float>, 0> full_redux;
full_redux = in.mean();
std::size_t in_bytes = in.size() * sizeof(std::complex<float>);
std::size_t out_bytes = full_redux.size() * sizeof(std::complex<float>);
std::complex<float>* gpu_in_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(in_bytes));
std::complex<float>* gpu_out_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(out_bytes));
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols);
TensorMap<Tensor<std::complex<float>, 0> > out_gpu(gpu_out_ptr);
out_gpu.device(gpu_device) = in_gpu.mean();
Tensor<std::complex<float>, 0> full_redux_gpu;
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
static void test_cuda_product_reductions() {
Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
const int num_cols = internal::random<int>(1024, 5*1024);
Tensor<std::complex<float>, 2> in(num_rows, num_cols);
in.setRandom();
Tensor<std::complex<float>, 0> full_redux;
full_redux = in.prod();
std::size_t in_bytes = in.size() * sizeof(std::complex<float>);
std::size_t out_bytes = full_redux.size() * sizeof(std::complex<float>);
std::complex<float>* gpu_in_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(in_bytes));
std::complex<float>* gpu_out_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(out_bytes));
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols);
TensorMap<Tensor<std::complex<float>, 0> > out_gpu(gpu_out_ptr);
out_gpu.device(gpu_device) = in_gpu.prod();
Tensor<std::complex<float>, 0> full_redux_gpu;
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
EIGEN_DECLARE_TEST(test_cxx11_tensor_complex)
{
CALL_SUBTEST(test_cuda_nullary());
CALL_SUBTEST(test_cuda_sum_reductions());
CALL_SUBTEST(test_cuda_mean_reductions());
CALL_SUBTEST(test_cuda_product_reductions());
}
|
0b9a47f45f9a6339648083c2d329c60cfe8ac1b6.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Reduce.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/NumericLimits.cuh>
#include <THH/THHNumerics.cuh>
#include <ATen/native/ReduceOps.h>
#include<ATen/native/ReduceAllOps.h>
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/native/TensorCompare.h>
namespace at { namespace native {
template <typename acc_t>
struct MaxNanFunctor {
__device__ __forceinline__ acc_t operator()(acc_t a, acc_t b) const {
return (THCNumerics<acc_t>::isnan(a) || a > b) ? a : b;
}
};
template <typename scalar_t, typename acc_t=scalar_t>
void max_values_kernel_cuda_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter, func_wrapper<acc_t> (MaxNanFunctor<acc_t>()),
at::numeric_limits<acc_t>::lower_bound());
}
template <typename acc_t>
struct MinNanFunctor {
__device__ __forceinline__ acc_t operator()(acc_t a, acc_t b) const {
return (THCNumerics<acc_t>::isnan(a) || a < b) ? a : b;
}
};
template <typename scalar_t, typename acc_t=scalar_t>
void min_values_kernel_cuda_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter, func_wrapper<acc_t> (MinNanFunctor<acc_t>()),
at::numeric_limits<acc_t>::upper_bound());
}
void max_values_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "max_values_cuda", [&]() {
max_values_kernel_cuda_impl<scalar_t>(iter);
});
}
void min_values_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "min_values_cuda", [&]() {
min_values_kernel_cuda_impl<scalar_t>(iter);
});
}
template <typename scalar_t, typename acc_t=scalar_t>
void argmax_kernel_cuda_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, int64_t>(
iter,
ArgMaxOps<acc_t>{},
thrust::pair<acc_t, int64_t>(at::numeric_limits<acc_t>::lower_bound(), 0));
};
template <typename scalar_t, typename acc_t=scalar_t>
void argmin_kernel_cuda_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, int64_t>(
iter,
ArgMinOps<acc_t>{},
thrust::pair<acc_t, int64_t>(at::numeric_limits<acc_t>::upper_bound(), 0));
};
void argmax_kernel_cuda(TensorIterator& iter) {
// For float16 & bfloat16, instead of implementing is_nan and warp_shfl_down,
// we can convert float16 & bfloat16 to float and do all the operations in float.
if (iter.dtype(1) == kHalf) {
argmax_kernel_cuda_impl<at::Half, float>(iter);
} else if (iter.dtype(1) == kBFloat16) {
argmax_kernel_cuda_impl<at::BFloat16, float>(iter);
} else {
AT_DISPATCH_ALL_TYPES(iter.dtype(1), "argmax_cuda", [&]() {
argmax_kernel_cuda_impl<scalar_t>(iter);
});
}
}
void argmin_kernel_cuda(TensorIterator& iter) {
// For float16 & bfloat16, instead of implementing is_nan and warp_shfl_down,
// we can convert float16 & bfloat16 to float and do all the operations in float.
if (iter.dtype(1) == kHalf) {
argmin_kernel_cuda_impl<at::Half, float>(iter);
} else if (iter.dtype(1) == kBFloat16) {
argmin_kernel_cuda_impl<at::BFloat16, float>(iter);
} else {
AT_DISPATCH_ALL_TYPES(iter.dtype(1), "argmin_cuda", [&]() {
argmin_kernel_cuda_impl<scalar_t>(iter);
});
}
}
static void min_kernel_impl(const Tensor& result, const Tensor& indice, const Tensor& self, int64_t dim, bool keepdim) {
auto iter = meta::make_reduction(self, result, indice, dim, keepdim, self.scalar_type(), kLong);
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(2), "min_cuda", [&]() {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter,
MinOps<scalar_t>{},
thrust::pair<scalar_t, int64_t>(at::numeric_limits<scalar_t>::upper_bound(), 0));
});
}
static void max_kernel_impl(const Tensor& result, const Tensor& indice, const Tensor& self, int64_t dim, bool keepdim) {
auto iter = meta::make_reduction(self, result, indice, dim, keepdim, self.scalar_type(), kLong);
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(2), "max_cuda", [&]() {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter,
MaxOps<scalar_t>{},
thrust::pair<scalar_t, int64_t>(at::numeric_limits<scalar_t>::lower_bound(), 0));
});
}
static void aminmax_kernel_impl(
const Tensor& self,
int64_t dim,
bool keepdim,
Tensor& min_result,
Tensor& max_result) {
at::TensorIterator iter = make_reduction("aminmax_cuda", min_result,
max_result, self, dim, keepdim, self.scalar_type());
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, self.scalar_type(), "aminmax_cuda", [&]() {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter,
MinMaxOps<scalar_t, scalar_t, int32_t>{},
thrust::pair<scalar_t, scalar_t>(
at::numeric_limits<scalar_t>::upper_bound(),
at::numeric_limits<scalar_t>::lower_bound()
)
);
});
}
static void min_all_kernel_impl(Tensor& result, const Tensor& input) {
auto dtype = input.scalar_type();
auto iter = make_reduction("min_all", result, input, std::vector<int64_t>{}, false, dtype);
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, dtype, "min_all_cuda", [&] {
min_values_kernel_cuda_impl<scalar_t>(iter);
});
}
static void max_all_kernel_impl(Tensor& result, const Tensor& input) {
auto dtype = input.scalar_type();
auto iter = make_reduction("max_all", result, input, std::vector<int64_t>{}, false, dtype);
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, dtype, "max_all_cuda", [&] {
max_values_kernel_cuda_impl<scalar_t>(iter);
});
}
template <typename scalar_t>
void _min_max_values_kernel_cuda_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter, MinMaxOps<scalar_t, scalar_t, int32_t>{}, thrust::pair<scalar_t, scalar_t>(
at::numeric_limits<scalar_t>::upper_bound(),
at::numeric_limits<scalar_t>::lower_bound()
));
}
void aminmax_allreduce_kernel_impl(const Tensor& input, Tensor& min_result, Tensor& max_result) {
auto dtype = input.scalar_type();
auto iter = make_reduction("aminmax_cuda", min_result, max_result, input,
std::vector<int64_t>{}, false, dtype);
TORCH_CHECK(iter.numel() > 0, "min_max on a tensor with no elements is not defined.");
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, dtype, "aminmax_all_cuda", [&] {
_min_max_values_kernel_cuda_impl<scalar_t>(iter);
});
}
REGISTER_DISPATCH(max_values_stub, &max_values_kernel_cuda);
REGISTER_DISPATCH(min_values_stub, &min_values_kernel_cuda);
REGISTER_DISPATCH(argmax_stub, &argmax_kernel_cuda);
REGISTER_DISPATCH(argmin_stub, &argmin_kernel_cuda);
REGISTER_DISPATCH(min_stub, &min_kernel_impl);
REGISTER_DISPATCH(max_stub, &max_kernel_impl);
REGISTER_DISPATCH(aminmax_stub, &aminmax_kernel_impl);
REGISTER_DISPATCH(min_all_stub, &min_all_kernel_impl);
REGISTER_DISPATCH(max_all_stub, &max_all_kernel_impl);
REGISTER_DISPATCH(aminmax_allreduce_stub, &aminmax_allreduce_kernel_impl);
}} // namespace at::native
| 0b9a47f45f9a6339648083c2d329c60cfe8ac1b6.cu | #include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Reduce.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <THC/THCNumerics.cuh>
#include <ATen/native/ReduceOps.h>
#include<ATen/native/ReduceAllOps.h>
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/native/TensorCompare.h>
namespace at { namespace native {
template <typename acc_t>
struct MaxNanFunctor {
__device__ __forceinline__ acc_t operator()(acc_t a, acc_t b) const {
return (THCNumerics<acc_t>::isnan(a) || a > b) ? a : b;
}
};
template <typename scalar_t, typename acc_t=scalar_t>
void max_values_kernel_cuda_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter, func_wrapper<acc_t> (MaxNanFunctor<acc_t>()),
at::numeric_limits<acc_t>::lower_bound());
}
template <typename acc_t>
struct MinNanFunctor {
__device__ __forceinline__ acc_t operator()(acc_t a, acc_t b) const {
return (THCNumerics<acc_t>::isnan(a) || a < b) ? a : b;
}
};
template <typename scalar_t, typename acc_t=scalar_t>
void min_values_kernel_cuda_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter, func_wrapper<acc_t> (MinNanFunctor<acc_t>()),
at::numeric_limits<acc_t>::upper_bound());
}
void max_values_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "max_values_cuda", [&]() {
max_values_kernel_cuda_impl<scalar_t>(iter);
});
}
void min_values_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "min_values_cuda", [&]() {
min_values_kernel_cuda_impl<scalar_t>(iter);
});
}
template <typename scalar_t, typename acc_t=scalar_t>
void argmax_kernel_cuda_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, int64_t>(
iter,
ArgMaxOps<acc_t>{},
thrust::pair<acc_t, int64_t>(at::numeric_limits<acc_t>::lower_bound(), 0));
};
template <typename scalar_t, typename acc_t=scalar_t>
void argmin_kernel_cuda_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, int64_t>(
iter,
ArgMinOps<acc_t>{},
thrust::pair<acc_t, int64_t>(at::numeric_limits<acc_t>::upper_bound(), 0));
};
void argmax_kernel_cuda(TensorIterator& iter) {
// For float16 & bfloat16, instead of implementing is_nan and warp_shfl_down,
// we can convert float16 & bfloat16 to float and do all the operations in float.
if (iter.dtype(1) == kHalf) {
argmax_kernel_cuda_impl<at::Half, float>(iter);
} else if (iter.dtype(1) == kBFloat16) {
argmax_kernel_cuda_impl<at::BFloat16, float>(iter);
} else {
AT_DISPATCH_ALL_TYPES(iter.dtype(1), "argmax_cuda", [&]() {
argmax_kernel_cuda_impl<scalar_t>(iter);
});
}
}
void argmin_kernel_cuda(TensorIterator& iter) {
// For float16 & bfloat16, instead of implementing is_nan and warp_shfl_down,
// we can convert float16 & bfloat16 to float and do all the operations in float.
if (iter.dtype(1) == kHalf) {
argmin_kernel_cuda_impl<at::Half, float>(iter);
} else if (iter.dtype(1) == kBFloat16) {
argmin_kernel_cuda_impl<at::BFloat16, float>(iter);
} else {
AT_DISPATCH_ALL_TYPES(iter.dtype(1), "argmin_cuda", [&]() {
argmin_kernel_cuda_impl<scalar_t>(iter);
});
}
}
static void min_kernel_impl(const Tensor& result, const Tensor& indice, const Tensor& self, int64_t dim, bool keepdim) {
auto iter = meta::make_reduction(self, result, indice, dim, keepdim, self.scalar_type(), kLong);
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(2), "min_cuda", [&]() {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter,
MinOps<scalar_t>{},
thrust::pair<scalar_t, int64_t>(at::numeric_limits<scalar_t>::upper_bound(), 0));
});
}
static void max_kernel_impl(const Tensor& result, const Tensor& indice, const Tensor& self, int64_t dim, bool keepdim) {
auto iter = meta::make_reduction(self, result, indice, dim, keepdim, self.scalar_type(), kLong);
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(2), "max_cuda", [&]() {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter,
MaxOps<scalar_t>{},
thrust::pair<scalar_t, int64_t>(at::numeric_limits<scalar_t>::lower_bound(), 0));
});
}
static void aminmax_kernel_impl(
const Tensor& self,
int64_t dim,
bool keepdim,
Tensor& min_result,
Tensor& max_result) {
at::TensorIterator iter = make_reduction("aminmax_cuda", min_result,
max_result, self, dim, keepdim, self.scalar_type());
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, self.scalar_type(), "aminmax_cuda", [&]() {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter,
MinMaxOps<scalar_t, scalar_t, int32_t>{},
thrust::pair<scalar_t, scalar_t>(
at::numeric_limits<scalar_t>::upper_bound(),
at::numeric_limits<scalar_t>::lower_bound()
)
);
});
}
static void min_all_kernel_impl(Tensor& result, const Tensor& input) {
auto dtype = input.scalar_type();
auto iter = make_reduction("min_all", result, input, std::vector<int64_t>{}, false, dtype);
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, dtype, "min_all_cuda", [&] {
min_values_kernel_cuda_impl<scalar_t>(iter);
});
}
static void max_all_kernel_impl(Tensor& result, const Tensor& input) {
auto dtype = input.scalar_type();
auto iter = make_reduction("max_all", result, input, std::vector<int64_t>{}, false, dtype);
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, dtype, "max_all_cuda", [&] {
max_values_kernel_cuda_impl<scalar_t>(iter);
});
}
template <typename scalar_t>
void _min_max_values_kernel_cuda_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter, MinMaxOps<scalar_t, scalar_t, int32_t>{}, thrust::pair<scalar_t, scalar_t>(
at::numeric_limits<scalar_t>::upper_bound(),
at::numeric_limits<scalar_t>::lower_bound()
));
}
void aminmax_allreduce_kernel_impl(const Tensor& input, Tensor& min_result, Tensor& max_result) {
auto dtype = input.scalar_type();
auto iter = make_reduction("aminmax_cuda", min_result, max_result, input,
std::vector<int64_t>{}, false, dtype);
TORCH_CHECK(iter.numel() > 0, "min_max on a tensor with no elements is not defined.");
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, dtype, "aminmax_all_cuda", [&] {
_min_max_values_kernel_cuda_impl<scalar_t>(iter);
});
}
REGISTER_DISPATCH(max_values_stub, &max_values_kernel_cuda);
REGISTER_DISPATCH(min_values_stub, &min_values_kernel_cuda);
REGISTER_DISPATCH(argmax_stub, &argmax_kernel_cuda);
REGISTER_DISPATCH(argmin_stub, &argmin_kernel_cuda);
REGISTER_DISPATCH(min_stub, &min_kernel_impl);
REGISTER_DISPATCH(max_stub, &max_kernel_impl);
REGISTER_DISPATCH(aminmax_stub, &aminmax_kernel_impl);
REGISTER_DISPATCH(min_all_stub, &min_all_kernel_impl);
REGISTER_DISPATCH(max_all_stub, &max_all_kernel_impl);
REGISTER_DISPATCH(aminmax_allreduce_stub, &aminmax_allreduce_kernel_impl);
}} // namespace at::native
|
d440bb3a1b38f53373322aed2af60f017613d326.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* \brief Demonstrate how HyperQ allows supporting devices to avoid false
* dependencies between kernels in different streams.
*/
#include "cuda_util.h"
// This subroutine does no real work but runs for at least the specified number
// of clock ticks.
__global__ void KernelA(clock_t *d_o, clock_t clock_count) {
unsigned int start_clock = (unsigned int)clock();
clock_t clock_offset = 0;
while (clock_offset < clock_count) {
unsigned int end_clock = (unsigned int)clock();
// The code below should work like
// this (thanks to modular arithmetics):
//
// clock_offset = (clock_t) (end_clock > start_clock ?
// end_clock - start_clock :
// end_clock + (0xffffffffu - start_clock));
//
// Indeed, let m = 2^32 then
// end - start = end + m - start (mod m).
clock_offset = (clock_t)(end_clock - start_clock);
}
d_o[0] = clock_offset;
}
void ExperimentA(hipStream_t *streams, int nstreams) {
clock_t time_clocks = 10000000;
cjmcv_cuda_util::GpuTimer timer;
// Allocate device memory for the output (one value for each kernel)
clock_t *d_a = 0;
CUDA_CHECK(hipMalloc((void **)&d_a, nstreams * sizeof(clock_t)));
// Warn up.
KernelA << <1, 1 >> > (&d_a[0], time_clocks);
// Get the running time of KernelA.
float kernel_time = 0;
timer.Start();
KernelA << <1, 1 >> > (&d_a[0], time_clocks);
timer.Stop();
kernel_time = timer.ElapsedMillis();
// Executed in the same stream.
timer.Start();
for (int i = 0; i < nstreams; ++i) {
KernelA << <1, 1 >> > (&d_a[i], time_clocks);
}
timer.Stop();
printf("<In the same stream> Measured time for sample = %.3fs\n", timer.ElapsedMillis() / 1000.0f);
// Executed in separate streams.
timer.Start();
for (int i = 0; i < nstreams; ++i) {
KernelA << <1, 1, 0, streams[i] >> > (&d_a[i], time_clocks);
}
timer.Stop();
printf("<In separate streams> Measured time for sample = %.3fs\n", timer.ElapsedMillis() / 1000.0f);
printf("Expected time for serial execution of %d sets of kernels is between approx. %.3fs\n", nstreams, nstreams *kernel_time / 1000.0f);
printf("Expected time for fully concurrent execution of %d sets of kernels is approx. %.3fs\n", nstreams, kernel_time / 1000.0f);
CUDA_CHECK(hipFree(d_a));
}
bool VerifyOutput(int *d_a, int len, int value) {
int *h_a = (int *)malloc(sizeof(int) * len);
CUDA_CHECK(hipMemcpy(h_a, d_a, sizeof(int) * len, hipMemcpyDeviceToHost));
bool is_pass = true;
for (int i = 0; i < len; i++) {
//printf("%d, ", h_a[i]);
if (h_a[i] != value)
is_pass = false;
}
if (h_a) free(h_a);
return is_pass;
}
__global__ void KernelB(int *data, const int len) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < len; i += blockDim.x * gridDim.x) {
data[i] += 1;
}
}
void ExperimentB(hipStream_t *streams, int nstreams, int test_flag) {
int len = 0;
int threads_per_block = 0;
int blocks_per_grid = 0;
if (test_flag == 0) {
len = 50000000;
threads_per_block = 1024;
blocks_per_grid = (len + threads_per_block - 1) / threads_per_block; // 4.8w+
}
else if (test_flag == 1) {
len = 50000000;
threads_per_block = 512;
blocks_per_grid = 3;
}
else {
len = 50000;
threads_per_block = 1;
blocks_per_grid = 1;
}
cjmcv_cuda_util::GpuTimer timer;
int *d_a = 0;
CUDA_CHECK(hipMalloc((void **)&d_a, sizeof(int)*len));
CUDA_CHECK(hipMemset(d_a, 0, sizeof(float)*len));
// Warn up.
KernelB << <blocks_per_grid, threads_per_block >> > (d_a, len);
// Get the running time of KernelA.
float kernel_time = 0;
timer.Start();
KernelB << <blocks_per_grid, threads_per_block >> > (d_a, len);
timer.Stop();
kernel_time = timer.ElapsedMillis();
// Executed in the same stream.
CUDA_CHECK(hipMemset(d_a, 0, sizeof(float)*len));
timer.Start();
for (int i = 0; i < nstreams; ++i) {
KernelB << <blocks_per_grid, threads_per_block >> > (d_a, len);
}
timer.Stop();
printf("<In the same stream> Measured time for sample = %.3fs, %s\n",
timer.ElapsedMillis() / 1000.0f, (VerifyOutput(d_a, len, nstreams) ? "PASS" : "NOT")); // NOT, means it is not a serial execution.
// Executed in separate streams.
CUDA_CHECK(hipMemset(d_a, 0, sizeof(float)*len));
timer.Start();
for (int i = 0; i < nstreams; ++i) {
KernelB << <blocks_per_grid, threads_per_block, 0, streams[i] >> > (d_a, len);
}
timer.Stop();
printf("<In separate streams> Measured time for sample = %.3fs, %s\n",
timer.ElapsedMillis() / 1000.0f, (VerifyOutput(d_a, len, nstreams) ? "PASS" : "NOT")); // NOT, means it is not a serial execution.
printf("Expected time for serial execution of %d sets of kernels is between approx. %.3fs\n", nstreams, nstreams *kernel_time / 1000.0f);
printf("Expected time for fully concurrent execution of %d sets of kernels is approx. %.3fs\n", nstreams, kernel_time / 1000.0f);
if (d_a) CUDA_CHECK(hipFree(d_a));
}
int main(int argc, char **argv) {
// HyperQ is available in devices of Compute Capability 3.5 and higher
int device_id = 0;
int ret = cjmcv_cuda_util::InitEnvironment(device_id);
if (ret != 0) {
printf("Failed to initialize the environment for cuda.");
return -1;
}
int nstreams = 32;
// Allocate and initialize an array of stream handles
hipStream_t *streams = (hipStream_t *)malloc(nstreams * sizeof(hipStream_t));
for (int i = 0; i < nstreams; i++) {
CUDA_CHECK(hipStreamCreate(&(streams[i])));
}
printf("Experiment A: \n");
ExperimentA(streams, nstreams);
// The acceleration performed by hyperQ is related to the resources invoked when kernel is running.
// If all resources are occupied by a single kernel call, hyperQ cannot be used to accelerate in parallel.
// Refer to ExperimentB_0which takes full use of the resources of device in a kernel call.
// Close to serial execution.
printf("\n\nExperiment B_0: \n");
ExperimentB(streams, nstreams, 0);
// Only part of it accelerated.
printf("\n\nExperiment B_1: \n");
ExperimentB(streams, nstreams, 1);
// Almost fully concurrent execution.
printf("\n\nExperiment B_2: \n");
ExperimentB(streams, nstreams, 2);
// Release resources
for (int i = 0; i < nstreams; i++) {
CUDA_CHECK(hipStreamDestroy(streams[i]));
}
if (streams) free(streams);
cjmcv_cuda_util::CleanUpEnvironment();
}
| d440bb3a1b38f53373322aed2af60f017613d326.cu | /*!
* \brief Demonstrate how HyperQ allows supporting devices to avoid false
* dependencies between kernels in different streams.
*/
#include "cuda_util.h"
// This subroutine does no real work but runs for at least the specified number
// of clock ticks.
__global__ void KernelA(clock_t *d_o, clock_t clock_count) {
unsigned int start_clock = (unsigned int)clock();
clock_t clock_offset = 0;
while (clock_offset < clock_count) {
unsigned int end_clock = (unsigned int)clock();
// The code below should work like
// this (thanks to modular arithmetics):
//
// clock_offset = (clock_t) (end_clock > start_clock ?
// end_clock - start_clock :
// end_clock + (0xffffffffu - start_clock));
//
// Indeed, let m = 2^32 then
// end - start = end + m - start (mod m).
clock_offset = (clock_t)(end_clock - start_clock);
}
d_o[0] = clock_offset;
}
void ExperimentA(cudaStream_t *streams, int nstreams) {
clock_t time_clocks = 10000000;
cjmcv_cuda_util::GpuTimer timer;
// Allocate device memory for the output (one value for each kernel)
clock_t *d_a = 0;
CUDA_CHECK(cudaMalloc((void **)&d_a, nstreams * sizeof(clock_t)));
// Warn up.
KernelA << <1, 1 >> > (&d_a[0], time_clocks);
// Get the running time of KernelA.
float kernel_time = 0;
timer.Start();
KernelA << <1, 1 >> > (&d_a[0], time_clocks);
timer.Stop();
kernel_time = timer.ElapsedMillis();
// Executed in the same stream.
timer.Start();
for (int i = 0; i < nstreams; ++i) {
KernelA << <1, 1 >> > (&d_a[i], time_clocks);
}
timer.Stop();
printf("<In the same stream> Measured time for sample = %.3fs\n", timer.ElapsedMillis() / 1000.0f);
// Executed in separate streams.
timer.Start();
for (int i = 0; i < nstreams; ++i) {
KernelA << <1, 1, 0, streams[i] >> > (&d_a[i], time_clocks);
}
timer.Stop();
printf("<In separate streams> Measured time for sample = %.3fs\n", timer.ElapsedMillis() / 1000.0f);
printf("Expected time for serial execution of %d sets of kernels is between approx. %.3fs\n", nstreams, nstreams *kernel_time / 1000.0f);
printf("Expected time for fully concurrent execution of %d sets of kernels is approx. %.3fs\n", nstreams, kernel_time / 1000.0f);
CUDA_CHECK(cudaFree(d_a));
}
bool VerifyOutput(int *d_a, int len, int value) {
int *h_a = (int *)malloc(sizeof(int) * len);
CUDA_CHECK(cudaMemcpy(h_a, d_a, sizeof(int) * len, cudaMemcpyDeviceToHost));
bool is_pass = true;
for (int i = 0; i < len; i++) {
//printf("%d, ", h_a[i]);
if (h_a[i] != value)
is_pass = false;
}
if (h_a) free(h_a);
return is_pass;
}
__global__ void KernelB(int *data, const int len) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < len; i += blockDim.x * gridDim.x) {
data[i] += 1;
}
}
void ExperimentB(cudaStream_t *streams, int nstreams, int test_flag) {
int len = 0;
int threads_per_block = 0;
int blocks_per_grid = 0;
if (test_flag == 0) {
len = 50000000;
threads_per_block = 1024;
blocks_per_grid = (len + threads_per_block - 1) / threads_per_block; // 4.8w+
}
else if (test_flag == 1) {
len = 50000000;
threads_per_block = 512;
blocks_per_grid = 3;
}
else {
len = 50000;
threads_per_block = 1;
blocks_per_grid = 1;
}
cjmcv_cuda_util::GpuTimer timer;
int *d_a = 0;
CUDA_CHECK(cudaMalloc((void **)&d_a, sizeof(int)*len));
CUDA_CHECK(cudaMemset(d_a, 0, sizeof(float)*len));
// Warn up.
KernelB << <blocks_per_grid, threads_per_block >> > (d_a, len);
// Get the running time of KernelA.
float kernel_time = 0;
timer.Start();
KernelB << <blocks_per_grid, threads_per_block >> > (d_a, len);
timer.Stop();
kernel_time = timer.ElapsedMillis();
// Executed in the same stream.
CUDA_CHECK(cudaMemset(d_a, 0, sizeof(float)*len));
timer.Start();
for (int i = 0; i < nstreams; ++i) {
KernelB << <blocks_per_grid, threads_per_block >> > (d_a, len);
}
timer.Stop();
printf("<In the same stream> Measured time for sample = %.3fs, %s\n",
timer.ElapsedMillis() / 1000.0f, (VerifyOutput(d_a, len, nstreams) ? "PASS" : "NOT")); // NOT, means it is not a serial execution.
// Executed in separate streams.
CUDA_CHECK(cudaMemset(d_a, 0, sizeof(float)*len));
timer.Start();
for (int i = 0; i < nstreams; ++i) {
KernelB << <blocks_per_grid, threads_per_block, 0, streams[i] >> > (d_a, len);
}
timer.Stop();
printf("<In separate streams> Measured time for sample = %.3fs, %s\n",
timer.ElapsedMillis() / 1000.0f, (VerifyOutput(d_a, len, nstreams) ? "PASS" : "NOT")); // NOT, means it is not a serial execution.
printf("Expected time for serial execution of %d sets of kernels is between approx. %.3fs\n", nstreams, nstreams *kernel_time / 1000.0f);
printf("Expected time for fully concurrent execution of %d sets of kernels is approx. %.3fs\n", nstreams, kernel_time / 1000.0f);
if (d_a) CUDA_CHECK(cudaFree(d_a));
}
int main(int argc, char **argv) {
// HyperQ is available in devices of Compute Capability 3.5 and higher
int device_id = 0;
int ret = cjmcv_cuda_util::InitEnvironment(device_id);
if (ret != 0) {
printf("Failed to initialize the environment for cuda.");
return -1;
}
int nstreams = 32;
// Allocate and initialize an array of stream handles
cudaStream_t *streams = (cudaStream_t *)malloc(nstreams * sizeof(cudaStream_t));
for (int i = 0; i < nstreams; i++) {
CUDA_CHECK(cudaStreamCreate(&(streams[i])));
}
printf("Experiment A: \n");
ExperimentA(streams, nstreams);
// The acceleration performed by hyperQ is related to the resources invoked when kernel is running.
// If all resources are occupied by a single kernel call, hyperQ cannot be used to accelerate in parallel.
// Refer to ExperimentB_0£¬which takes full use of the resources of device in a kernel call.
// Close to serial execution.
printf("\n\nExperiment B_0: \n");
ExperimentB(streams, nstreams, 0);
// Only part of it accelerated.
printf("\n\nExperiment B_1: \n");
ExperimentB(streams, nstreams, 1);
// Almost fully concurrent execution.
printf("\n\nExperiment B_2: \n");
ExperimentB(streams, nstreams, 2);
// Release resources
for (int i = 0; i < nstreams; i++) {
CUDA_CHECK(cudaStreamDestroy(streams[i]));
}
if (streams) free(streams);
cjmcv_cuda_util::CleanUpEnvironment();
}
|
595e6ea829613addc4162055d57fdc71968eced4.hip | // !!! This is a file automatically generated by hipify!!!
#include "opencv2/gpu/device/common.hpp"
#include <opencv2/core/core.hpp>
using namespace cv::gpu;
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "../Matting/matting.h"
#if HAVE_GPU ==1
#define BLOCK_WIDE 64
#define BLOCK_HIGH 8
#define alpha_top 40
#define alpha_bottom 40
#define alpha_left 120
#define alpha_right 120
namespace cv {
namespace gpu {
namespace device {
/*__device__ const float motion_TH_f = motion_TH / 255.0;
__device__ const float static_SPEED_f = static_SPEED / 255.0;
__device__ const float long_SPEED_f = long_SPEED / 255.0;
__device__ const float luma_offset_f = luma_offset / 255.0f;
__device__ const float u_gain_f = u_gain;
__device__ const float v_gain_f = v_gain;*/
__constant__ TuningParaFloat Const;
__constant__ HostPara host_para;
__global__ void trace_bg_kernel(PtrStepSz<float> motion_diff_rgb_filted0, PtrStepSz<float> motion_diff_rgb_filted1, PtrStepSz<float> motion_diff_rgb_filted2,
PtrStepSz<float3> frame_yuv, PtrStepSz<float3> bg_yuv, PtrStepSz<float3> bg_diff_yuv, PtrStepSzb static_num, PtrStepSzb is_bg, PtrStepSzb is_body)
{
extern __shared__ float smem[];
typename float * gray = smem;
unsigned int gray_idx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * (blockDim.y-2) + threadIdx.y;
unsigned int x = blockIdx.x * (blockDim.x-2) + threadIdx.x;
if (y < static_num.rows && x < static_num.cols) {
gray[gray_idx] = frame_yuv.ptr(y)[x].x;
__syncthreads();
if (threadIdx.x != 0 && threadIdx.y != 0 && threadIdx.x != blockDim.x - 1 && threadIdx.y != blockDim.y - 1
&& y + 1<static_num.rows && x + 1<static_num.cols) {
float edge_offset = MAX(fabs(gray[gray_idx - blockDim.x - 1] - gray[gray_idx + blockDim.x + 1]),
fabs(gray[gray_idx - blockDim.x + 1] - gray[gray_idx + blockDim.x - 1])) / 2;
float motion_diff = fabs(motion_diff_rgb_filted0.ptr(y)[x]) + fabs(motion_diff_rgb_filted1.ptr(y)[x]) + fabs(motion_diff_rgb_filted2.ptr(y)[x]);
unsigned char static_num_reg = static_num.ptr(y)[x];
if (motion_diff < edge_offset + Const.motion_TH_f)
static_num_reg = MIN(static_num_reg + 1, Const.static_MAX);
else
static_num_reg = 0;
static_num.ptr(y)[x] = static_num_reg;
float3 bg_yuv_reg = bg_yuv.ptr(y)[x];
if (fabs(bg_yuv_reg.x) <= 0.001f && fabs(bg_yuv_reg.y - 1.0f) <= 0.001f && fabs(bg_yuv_reg.z) <=0.001f) {
if (static_num_reg>= Const.init_static_num)
bg_yuv.ptr(y)[x] = frame_yuv.ptr(y)[x];
}
else {
float update_speed;
if (is_bg.ptr(y)[x] && static_num_reg >= Const.static_NUM)
update_speed = Const.static_SPEED_f;
else if (is_body.ptr(y)[x] == 0 && static_num_reg >= Const.long_static_NUM)
update_speed = Const.long_SPEED_f;
else
update_speed = 0;
float3 bg_diff_yuv_reg = bg_diff_yuv.ptr(y)[x];
bg_yuv_reg.x = (bg_diff_yuv_reg.x > 0) ? (bg_yuv_reg.x + update_speed) : (bg_yuv_reg.x - update_speed);
bg_yuv_reg.y = (bg_diff_yuv_reg.y > 0) ? (bg_yuv_reg.y + update_speed) : (bg_yuv_reg.y - update_speed);
bg_yuv_reg.z = (bg_diff_yuv_reg.z > 0) ? (bg_yuv_reg.z + update_speed) : (bg_yuv_reg.z - update_speed);
bg_yuv.ptr(y)[x] = bg_yuv_reg;
}
}
}
}
__global__ void update_mask_bg_kernel(PtrStepSz<float> bg_diff_filted0, PtrStepSz<float> bg_diff_filted1, PtrStepSz<float> bg_diff_filted2,
PtrStepSzb fg_sure, PtrStepSzb fg_maybe, PtrStepSzb is_body)
{
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y + alpha_top;
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x + alpha_left;
float bg_diff_abs_y = fabs(bg_diff_filted0.ptr(y)[x]);
float bg_diff_abs_u = fabs(bg_diff_filted1.ptr(y)[x]);
float bg_diff_abs_v = fabs(bg_diff_filted2.ptr(y)[x]);
bg_diff_abs_y = MAX(0.0f, bg_diff_abs_y - Const.luma_offset_f);
bg_diff_abs_u = bg_diff_abs_u * Const.u_gain_f;
bg_diff_abs_v = bg_diff_abs_v * Const.v_gain_f;
float bg_diff_all = (bg_diff_abs_y + bg_diff_abs_u + bg_diff_abs_v)*(fg_sure.ptr(y)[x] + 1);
float motion_th = Const.alpha_TH_f;
if ((y >= host_para.body_top - 1) && (y <= host_para.body_bottom - 1) && (x >= host_para.body_left - 1) && (x <= host_para.body_right - 1)) {
is_body.ptr(y)[x] = 1;
motion_th = Const.alpha_TH_f / 2;
} else
is_body.ptr(y)[x] = 0;
if (bg_diff_all > motion_th * 2) {
fg_sure.ptr(y)[x] = 255;
fg_maybe.ptr(y)[x] = 255;
}
else {
fg_sure.ptr(y)[x] = 0;
if (bg_diff_all > motion_th)
fg_maybe.ptr(y)[x] = 255;
else
fg_maybe.ptr(y)[x] = 0;
}
}
void trace_bg_(PtrStepSzb motion_diff_rgb_filted0, PtrStepSzb motion_diff_rgb_filted1, PtrStepSzb motion_diff_rgb_filted2,
PtrStepSzb frame_yuv, PtrStepSzb bg_yuv, PtrStepSzb bg_diff_yuv, PtrStepSzb static_num, PtrStepSzb is_bg, PtrStepSzb is_body, hipStream_t stream)
{
const dim3 block(BLOCK_WIDE, BLOCK_HIGH);
const dim3 grid(divUp(frame_yuv.cols - 2, BLOCK_WIDE - 2), divUp(frame_yuv.rows - 2, BLOCK_HIGH - 2));
const size_t smemSize = BLOCK_WIDE * BLOCK_HIGH * sizeof(float);
trace_bg_kernel<< <grid, block, smemSize, stream >> > (static_cast<PtrStepSz<float>>(motion_diff_rgb_filted0), static_cast<PtrStepSz<float>>(motion_diff_rgb_filted1), static_cast<PtrStepSz<float>>(motion_diff_rgb_filted2),
static_cast<PtrStepSz<float3>>(frame_yuv), static_cast<PtrStepSz<float3>>(bg_yuv), static_cast<PtrStepSz<float3>>(bg_diff_yuv), static_num, is_bg, is_body);
}
void update_mask_bg_(PtrStepSzb bg_diff_filted0, PtrStepSzb bg_diff_filted1, PtrStepSzb bg_diff_filted2,
PtrStepSzb fg_sure, PtrStepSzb fg_maybe, PtrStepSzb is_body, hipStream_t stream)
{
const dim3 block(BLOCK_WIDE, BLOCK_HIGH);
const dim3 grid(divUp(fg_sure.cols - alpha_left - alpha_right, BLOCK_WIDE), divUp(fg_sure.rows - alpha_top - alpha_bottom, BLOCK_HIGH));
const size_t smemSize = 0;
update_mask_bg_kernel << <grid, block, smemSize, stream >> > (static_cast<PtrStepSz<float>>(bg_diff_filted0), static_cast<PtrStepSz<float>>(bg_diff_filted1), static_cast<PtrStepSz<float>>(bg_diff_filted2),
fg_sure, fg_maybe, is_body);
}
}
}
}
void trace_bg(PtrStepSzb motion_diff_rgb_filted0, PtrStepSzb motion_diff_rgb_filted1, PtrStepSzb motion_diff_rgb_filted2,
PtrStepSzb frame_yuv, PtrStepSzb bg_yuv, PtrStepSzb bg_diff_yuv, PtrStepSzb static_num, PtrStepSzb is_bg, PtrStepSzb is_body, hipStream_t stream)
{
CV_Assert(motion_diff_rgb_filted0.cols==is_bg.cols && frame_yuv.cols==is_bg.cols && bg_yuv.cols==is_bg.cols && bg_diff_yuv.cols==is_bg.cols
&& static_num.cols==is_bg.cols && is_body.cols==is_bg.cols);
CV_Assert(motion_diff_rgb_filted0.rows==is_bg.rows && frame_yuv.rows==is_bg.rows && bg_yuv.rows==is_bg.rows && bg_diff_yuv.rows==is_bg.rows
&& static_num.rows==is_bg.rows && is_body.rows==is_bg.rows);
device::trace_bg_(motion_diff_rgb_filted0, motion_diff_rgb_filted1, motion_diff_rgb_filted2, frame_yuv, bg_yuv,
bg_diff_yuv, static_num, is_bg, is_body, stream);
}
void update_mask_bg(PtrStepSzb bg_diff_filted0, PtrStepSzb bg_diff_filted1, PtrStepSzb bg_diff_filted2,
PtrStepSzb fg_sure, PtrStepSzb fg_maybe, PtrStepSzb is_body, hipStream_t stream)
{
CV_Assert(bg_diff_filted0.cols==is_body.cols && bg_diff_filted1.cols==is_body.cols && bg_diff_filted2.cols==is_body.cols
&& fg_sure.cols==is_body.cols && fg_maybe.cols==is_body.cols);
CV_Assert(bg_diff_filted0.rows==is_body.rows && bg_diff_filted1.rows==is_body.rows && bg_diff_filted2.rows==is_body.rows
&& fg_sure.rows==is_body.rows && fg_maybe.rows==is_body.rows);
device::update_mask_bg_(bg_diff_filted0, bg_diff_filted1, bg_diff_filted2, fg_sure, fg_maybe, is_body, stream);
}
void tune_gpu_parameter(TuningParaFloat *c)
{
checkCudaErrors(hipMemcpyToSymbol(device::Const, c, sizeof(TuningParaFloat)));
}
void update_host_para(HostPara *p)
{
checkCudaErrors(hipMemcpyToSymbol(device::host_para, p, sizeof(HostPara)));
}
#endif | 595e6ea829613addc4162055d57fdc71968eced4.cu | #include "opencv2/gpu/device/common.hpp"
#include <opencv2/core/core.hpp>
using namespace cv::gpu;
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "../Matting/matting.h"
#if HAVE_GPU ==1
#define BLOCK_WIDE 64
#define BLOCK_HIGH 8
#define alpha_top 40
#define alpha_bottom 40
#define alpha_left 120
#define alpha_right 120
namespace cv {
namespace gpu {
namespace device {
/*__device__ const float motion_TH_f = motion_TH / 255.0;
__device__ const float static_SPEED_f = static_SPEED / 255.0;
__device__ const float long_SPEED_f = long_SPEED / 255.0;
__device__ const float luma_offset_f = luma_offset / 255.0f;
__device__ const float u_gain_f = u_gain;
__device__ const float v_gain_f = v_gain;*/
__constant__ TuningParaFloat Const;
__constant__ HostPara host_para;
__global__ void trace_bg_kernel(PtrStepSz<float> motion_diff_rgb_filted0, PtrStepSz<float> motion_diff_rgb_filted1, PtrStepSz<float> motion_diff_rgb_filted2,
PtrStepSz<float3> frame_yuv, PtrStepSz<float3> bg_yuv, PtrStepSz<float3> bg_diff_yuv, PtrStepSzb static_num, PtrStepSzb is_bg, PtrStepSzb is_body)
{
extern __shared__ float smem[];
typename float * gray = smem;
unsigned int gray_idx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * (blockDim.y-2) + threadIdx.y;
unsigned int x = blockIdx.x * (blockDim.x-2) + threadIdx.x;
if (y < static_num.rows && x < static_num.cols) {
gray[gray_idx] = frame_yuv.ptr(y)[x].x;
__syncthreads();
if (threadIdx.x != 0 && threadIdx.y != 0 && threadIdx.x != blockDim.x - 1 && threadIdx.y != blockDim.y - 1
&& y + 1<static_num.rows && x + 1<static_num.cols) {
float edge_offset = MAX(fabs(gray[gray_idx - blockDim.x - 1] - gray[gray_idx + blockDim.x + 1]),
fabs(gray[gray_idx - blockDim.x + 1] - gray[gray_idx + blockDim.x - 1])) / 2;
float motion_diff = fabs(motion_diff_rgb_filted0.ptr(y)[x]) + fabs(motion_diff_rgb_filted1.ptr(y)[x]) + fabs(motion_diff_rgb_filted2.ptr(y)[x]);
unsigned char static_num_reg = static_num.ptr(y)[x];
if (motion_diff < edge_offset + Const.motion_TH_f)
static_num_reg = MIN(static_num_reg + 1, Const.static_MAX);
else
static_num_reg = 0;
static_num.ptr(y)[x] = static_num_reg;
float3 bg_yuv_reg = bg_yuv.ptr(y)[x];
if (fabs(bg_yuv_reg.x) <= 0.001f && fabs(bg_yuv_reg.y - 1.0f) <= 0.001f && fabs(bg_yuv_reg.z) <=0.001f) {
if (static_num_reg>= Const.init_static_num)
bg_yuv.ptr(y)[x] = frame_yuv.ptr(y)[x];
}
else {
float update_speed;
if (is_bg.ptr(y)[x] && static_num_reg >= Const.static_NUM)
update_speed = Const.static_SPEED_f;
else if (is_body.ptr(y)[x] == 0 && static_num_reg >= Const.long_static_NUM)
update_speed = Const.long_SPEED_f;
else
update_speed = 0;
float3 bg_diff_yuv_reg = bg_diff_yuv.ptr(y)[x];
bg_yuv_reg.x = (bg_diff_yuv_reg.x > 0) ? (bg_yuv_reg.x + update_speed) : (bg_yuv_reg.x - update_speed);
bg_yuv_reg.y = (bg_diff_yuv_reg.y > 0) ? (bg_yuv_reg.y + update_speed) : (bg_yuv_reg.y - update_speed);
bg_yuv_reg.z = (bg_diff_yuv_reg.z > 0) ? (bg_yuv_reg.z + update_speed) : (bg_yuv_reg.z - update_speed);
bg_yuv.ptr(y)[x] = bg_yuv_reg;
}
}
}
}
__global__ void update_mask_bg_kernel(PtrStepSz<float> bg_diff_filted0, PtrStepSz<float> bg_diff_filted1, PtrStepSz<float> bg_diff_filted2,
PtrStepSzb fg_sure, PtrStepSzb fg_maybe, PtrStepSzb is_body)
{
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y + alpha_top;
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x + alpha_left;
float bg_diff_abs_y = fabs(bg_diff_filted0.ptr(y)[x]);
float bg_diff_abs_u = fabs(bg_diff_filted1.ptr(y)[x]);
float bg_diff_abs_v = fabs(bg_diff_filted2.ptr(y)[x]);
bg_diff_abs_y = MAX(0.0f, bg_diff_abs_y - Const.luma_offset_f);
bg_diff_abs_u = bg_diff_abs_u * Const.u_gain_f;
bg_diff_abs_v = bg_diff_abs_v * Const.v_gain_f;
float bg_diff_all = (bg_diff_abs_y + bg_diff_abs_u + bg_diff_abs_v)*(fg_sure.ptr(y)[x] + 1);
float motion_th = Const.alpha_TH_f;
if ((y >= host_para.body_top - 1) && (y <= host_para.body_bottom - 1) && (x >= host_para.body_left - 1) && (x <= host_para.body_right - 1)) {
is_body.ptr(y)[x] = 1;
motion_th = Const.alpha_TH_f / 2;
} else
is_body.ptr(y)[x] = 0;
if (bg_diff_all > motion_th * 2) {
fg_sure.ptr(y)[x] = 255;
fg_maybe.ptr(y)[x] = 255;
}
else {
fg_sure.ptr(y)[x] = 0;
if (bg_diff_all > motion_th)
fg_maybe.ptr(y)[x] = 255;
else
fg_maybe.ptr(y)[x] = 0;
}
}
void trace_bg_(PtrStepSzb motion_diff_rgb_filted0, PtrStepSzb motion_diff_rgb_filted1, PtrStepSzb motion_diff_rgb_filted2,
PtrStepSzb frame_yuv, PtrStepSzb bg_yuv, PtrStepSzb bg_diff_yuv, PtrStepSzb static_num, PtrStepSzb is_bg, PtrStepSzb is_body, cudaStream_t stream)
{
const dim3 block(BLOCK_WIDE, BLOCK_HIGH);
const dim3 grid(divUp(frame_yuv.cols - 2, BLOCK_WIDE - 2), divUp(frame_yuv.rows - 2, BLOCK_HIGH - 2));
const size_t smemSize = BLOCK_WIDE * BLOCK_HIGH * sizeof(float);
trace_bg_kernel<< <grid, block, smemSize, stream >> > (static_cast<PtrStepSz<float>>(motion_diff_rgb_filted0), static_cast<PtrStepSz<float>>(motion_diff_rgb_filted1), static_cast<PtrStepSz<float>>(motion_diff_rgb_filted2),
static_cast<PtrStepSz<float3>>(frame_yuv), static_cast<PtrStepSz<float3>>(bg_yuv), static_cast<PtrStepSz<float3>>(bg_diff_yuv), static_num, is_bg, is_body);
}
void update_mask_bg_(PtrStepSzb bg_diff_filted0, PtrStepSzb bg_diff_filted1, PtrStepSzb bg_diff_filted2,
PtrStepSzb fg_sure, PtrStepSzb fg_maybe, PtrStepSzb is_body, cudaStream_t stream)
{
const dim3 block(BLOCK_WIDE, BLOCK_HIGH);
const dim3 grid(divUp(fg_sure.cols - alpha_left - alpha_right, BLOCK_WIDE), divUp(fg_sure.rows - alpha_top - alpha_bottom, BLOCK_HIGH));
const size_t smemSize = 0;
update_mask_bg_kernel << <grid, block, smemSize, stream >> > (static_cast<PtrStepSz<float>>(bg_diff_filted0), static_cast<PtrStepSz<float>>(bg_diff_filted1), static_cast<PtrStepSz<float>>(bg_diff_filted2),
fg_sure, fg_maybe, is_body);
}
}
}
}
void trace_bg(PtrStepSzb motion_diff_rgb_filted0, PtrStepSzb motion_diff_rgb_filted1, PtrStepSzb motion_diff_rgb_filted2,
PtrStepSzb frame_yuv, PtrStepSzb bg_yuv, PtrStepSzb bg_diff_yuv, PtrStepSzb static_num, PtrStepSzb is_bg, PtrStepSzb is_body, cudaStream_t stream)
{
CV_Assert(motion_diff_rgb_filted0.cols==is_bg.cols && frame_yuv.cols==is_bg.cols && bg_yuv.cols==is_bg.cols && bg_diff_yuv.cols==is_bg.cols
&& static_num.cols==is_bg.cols && is_body.cols==is_bg.cols);
CV_Assert(motion_diff_rgb_filted0.rows==is_bg.rows && frame_yuv.rows==is_bg.rows && bg_yuv.rows==is_bg.rows && bg_diff_yuv.rows==is_bg.rows
&& static_num.rows==is_bg.rows && is_body.rows==is_bg.rows);
device::trace_bg_(motion_diff_rgb_filted0, motion_diff_rgb_filted1, motion_diff_rgb_filted2, frame_yuv, bg_yuv,
bg_diff_yuv, static_num, is_bg, is_body, stream);
}
void update_mask_bg(PtrStepSzb bg_diff_filted0, PtrStepSzb bg_diff_filted1, PtrStepSzb bg_diff_filted2,
PtrStepSzb fg_sure, PtrStepSzb fg_maybe, PtrStepSzb is_body, cudaStream_t stream)
{
CV_Assert(bg_diff_filted0.cols==is_body.cols && bg_diff_filted1.cols==is_body.cols && bg_diff_filted2.cols==is_body.cols
&& fg_sure.cols==is_body.cols && fg_maybe.cols==is_body.cols);
CV_Assert(bg_diff_filted0.rows==is_body.rows && bg_diff_filted1.rows==is_body.rows && bg_diff_filted2.rows==is_body.rows
&& fg_sure.rows==is_body.rows && fg_maybe.rows==is_body.rows);
device::update_mask_bg_(bg_diff_filted0, bg_diff_filted1, bg_diff_filted2, fg_sure, fg_maybe, is_body, stream);
}
void tune_gpu_parameter(TuningParaFloat *c)
{
checkCudaErrors(cudaMemcpyToSymbol(device::Const, c, sizeof(TuningParaFloat)));
}
void update_host_para(HostPara *p)
{
checkCudaErrors(cudaMemcpyToSymbol(device::host_para, p, sizeof(HostPara)));
}
#endif |
e4f285dfea6502a85406a7a752fad78dcd0e28a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "RecurrentNeuralNetwork/ManageDescriptor/LibraryHandleDropoutRNN.h"
#include "RecurrentNeuralNetwork/Modules/GetWeightsAndBias.h"
#include "RecurrentNeuralNetwork/Parameters.h"
#include "RecurrentNeuralNetwork/WeightSpace.h"
#include "Tensors/ManageDescriptor/GetNDTensorDescriptorValues.h"
#include "Tensors/ManageDescriptor/TensorDescriptor.h"
#include "gtest/gtest.h"
using RecurrentNeuralNetwork::DefaultParameters;
using RecurrentNeuralNetwork::ManageDescriptor::LibraryHandleDropoutRNN;
using RecurrentNeuralNetwork::Modules::GetWeightsAndBias;
using RecurrentNeuralNetwork::WeightSpace;
using Tensors::ManageDescriptor::GetNDTensorDescriptorValues;
using Tensors::ManageDescriptor::TensorDescriptor;
namespace GoogleUnitTests
{
namespace RecurrentNeuralNetwork
{
namespace Modules
{
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
TEST(GetWeightsAndBiasTests, Constructs)
{
DefaultParameters parameters {};
GetWeightsAndBias get_weight_and_bias {parameters};
EXPECT_EQ(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_EQ(get_weight_and_bias.bias_address_, nullptr);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
TEST(GetWeightsAndBiasTests, GetWeightsAndBiasWorks)
{
DefaultParameters parameters {};
GetWeightsAndBias get_weight_and_bias {parameters};
LibraryHandleDropoutRNN descriptors {parameters};
WeightSpace weight_space {descriptors};
TensorDescriptor weight_descriptor {};
TensorDescriptor bias_descriptor {};
auto result = get_weight_and_bias.get_weight_and_bias(
descriptors,
0,
weight_space,
0,
weight_descriptor,
bias_descriptor);
EXPECT_TRUE(result.is_success());
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
GetNDTensorDescriptorValues<3> get_values {};
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
TEST(GetWeightsAndBiasTests, GetWeightsAndBiasWorksOnMultipleLayers)
{
DefaultParameters parameters {};
// We will demonstrate that pseudoLayers only goes up to number_of_layers_ for
// this setting in DefaultParameters, CUDNN_UNIDIRECTIONAL and miopenRNNRELU.
parameters.number_of_layers_ = 3;
GetWeightsAndBias get_weight_and_bias {parameters};
LibraryHandleDropoutRNN descriptors {parameters};
WeightSpace weight_space {descriptors};
TensorDescriptor weight_descriptor {};
TensorDescriptor bias_descriptor {};
// For unidirectional RNNs,
// pseudoLayer = 1 is the first hidden layer.
// pseudoLayer = 0 was the RNN input layer.
auto result = get_weight_and_bias.get_weight_and_bias(
descriptors,
1,
weight_space,
0,
weight_descriptor,
bias_descriptor);
EXPECT_TRUE(result.is_success());
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
GetNDTensorDescriptorValues<3> get_values {};
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
// Second hidden layer.
result = get_weight_and_bias.get_weight_and_bias(
descriptors,
2,
weight_space,
0,
weight_descriptor,
bias_descriptor);
EXPECT_TRUE(result.is_success());
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
// No more hidden layers past number_of_layers.
result = get_weight_and_bias.get_weight_and_bias(
descriptors,
3,
weight_space,
0,
weight_descriptor,
bias_descriptor);
EXPECT_FALSE(result.is_success());
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
// We expect to get the previous values.
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
TEST(GetWeightsAndBiasTests, GetWeightsAndBiasLimitedByNumberOfLayersSpecified)
{
// We confirm that the number_of_layers (e.g. = 2 in this case) limits the
// number of pseudoLayers available.
// https://docs.nvidia.com/deeplearning/cudnn/api/index.html#cudnnSetRNNDescriptor_v8
// Recall taht numLayers or number_of_layers is an input and is the number of
// stacked, physical layers in the deep RNN model.
DefaultParameters parameters {};
GetWeightsAndBias get_weight_and_bias {parameters};
LibraryHandleDropoutRNN descriptors {parameters};
WeightSpace weight_space {descriptors};
TensorDescriptor weight_descriptor {};
TensorDescriptor bias_descriptor {};
// From CUDA API, for unidirectional RNNs,
// pseudoLayer = 1 is the first hidden layer.
// pseudoLayer = 0 was the RNN input layer, but
// EY (20230806) If I specified
auto result = get_weight_and_bias.get_weight_and_bias(
descriptors,
1,
weight_space,
0,
weight_descriptor,
bias_descriptor);
EXPECT_TRUE(result.is_success());
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
GetNDTensorDescriptorValues<3> get_values {};
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
// No more.
result = get_weight_and_bias.get_weight_and_bias(
descriptors,
2,
weight_space,
0,
weight_descriptor,
bias_descriptor);
EXPECT_FALSE(result.is_success());
// We expect to get the previous values.
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
TEST(GetWeightsAndBiasTests, OutOfLimitLinearLayerIDGivesNullptr)
{
DefaultParameters parameters {};
GetWeightsAndBias get_weight_and_bias {parameters};
LibraryHandleDropoutRNN descriptors {parameters};
WeightSpace weight_space {descriptors};
TensorDescriptor weight_descriptor {};
TensorDescriptor bias_descriptor {};
auto result = get_weight_and_bias.get_weight_and_bias(
descriptors,
0,
weight_space,
2,
weight_descriptor,
bias_descriptor);
EXPECT_FALSE(result.is_success());
EXPECT_EQ(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_EQ(get_weight_and_bias.bias_address_, nullptr);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
TEST(GetWeightsAndBiasTests, LinearLayerIDOf1ForCUDNN_RNN_RELU)
{
DefaultParameters parameters {};
GetWeightsAndBias get_weight_and_bias {parameters};
LibraryHandleDropoutRNN descriptors {parameters};
WeightSpace weight_space {descriptors};
TensorDescriptor weight_descriptor {};
TensorDescriptor bias_descriptor {};
auto result = get_weight_and_bias.get_weight_and_bias(
descriptors,
0,
weight_space,
1,
weight_descriptor,
bias_descriptor);
EXPECT_TRUE(result.is_success());
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
GetNDTensorDescriptorValues<3> get_values {};
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_weight_and_bias.get_weight_and_bias(
descriptors,
1,
weight_space,
1,
weight_descriptor,
bias_descriptor);
EXPECT_TRUE(result.is_success());
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
// No more hidden layers.
result = get_weight_and_bias.get_weight_and_bias(
descriptors,
2,
weight_space,
1,
weight_descriptor,
bias_descriptor);
EXPECT_FALSE(result.is_success());
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
// No higher linear ID because only values 0, 1 allowed for miopenRNNRELU.
result = get_weight_and_bias.get_weight_and_bias(
descriptors,
1,
weight_space,
2,
weight_descriptor,
bias_descriptor);
EXPECT_FALSE(result.is_success());
EXPECT_EQ(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_EQ(get_weight_and_bias.bias_address_, nullptr);
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 0);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 0);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
}
} // namespace Modules
} // namespace RecurrentNeuralNetwork
} // namespace GoogleUnitTests | e4f285dfea6502a85406a7a752fad78dcd0e28a9.cu | #include "RecurrentNeuralNetwork/ManageDescriptor/LibraryHandleDropoutRNN.h"
#include "RecurrentNeuralNetwork/Modules/GetWeightsAndBias.h"
#include "RecurrentNeuralNetwork/Parameters.h"
#include "RecurrentNeuralNetwork/WeightSpace.h"
#include "Tensors/ManageDescriptor/GetNDTensorDescriptorValues.h"
#include "Tensors/ManageDescriptor/TensorDescriptor.h"
#include "gtest/gtest.h"
using RecurrentNeuralNetwork::DefaultParameters;
using RecurrentNeuralNetwork::ManageDescriptor::LibraryHandleDropoutRNN;
using RecurrentNeuralNetwork::Modules::GetWeightsAndBias;
using RecurrentNeuralNetwork::WeightSpace;
using Tensors::ManageDescriptor::GetNDTensorDescriptorValues;
using Tensors::ManageDescriptor::TensorDescriptor;
namespace GoogleUnitTests
{
namespace RecurrentNeuralNetwork
{
namespace Modules
{
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
TEST(GetWeightsAndBiasTests, Constructs)
{
DefaultParameters parameters {};
GetWeightsAndBias get_weight_and_bias {parameters};
EXPECT_EQ(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_EQ(get_weight_and_bias.bias_address_, nullptr);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
TEST(GetWeightsAndBiasTests, GetWeightsAndBiasWorks)
{
DefaultParameters parameters {};
GetWeightsAndBias get_weight_and_bias {parameters};
LibraryHandleDropoutRNN descriptors {parameters};
WeightSpace weight_space {descriptors};
TensorDescriptor weight_descriptor {};
TensorDescriptor bias_descriptor {};
auto result = get_weight_and_bias.get_weight_and_bias(
descriptors,
0,
weight_space,
0,
weight_descriptor,
bias_descriptor);
EXPECT_TRUE(result.is_success());
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
GetNDTensorDescriptorValues<3> get_values {};
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
TEST(GetWeightsAndBiasTests, GetWeightsAndBiasWorksOnMultipleLayers)
{
DefaultParameters parameters {};
// We will demonstrate that pseudoLayers only goes up to number_of_layers_ for
// this setting in DefaultParameters, CUDNN_UNIDIRECTIONAL and CUDNN_RNN_RELU.
parameters.number_of_layers_ = 3;
GetWeightsAndBias get_weight_and_bias {parameters};
LibraryHandleDropoutRNN descriptors {parameters};
WeightSpace weight_space {descriptors};
TensorDescriptor weight_descriptor {};
TensorDescriptor bias_descriptor {};
// For unidirectional RNNs,
// pseudoLayer = 1 is the first hidden layer.
// pseudoLayer = 0 was the RNN input layer.
auto result = get_weight_and_bias.get_weight_and_bias(
descriptors,
1,
weight_space,
0,
weight_descriptor,
bias_descriptor);
EXPECT_TRUE(result.is_success());
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
GetNDTensorDescriptorValues<3> get_values {};
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
// Second hidden layer.
result = get_weight_and_bias.get_weight_and_bias(
descriptors,
2,
weight_space,
0,
weight_descriptor,
bias_descriptor);
EXPECT_TRUE(result.is_success());
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
// No more hidden layers past number_of_layers.
result = get_weight_and_bias.get_weight_and_bias(
descriptors,
3,
weight_space,
0,
weight_descriptor,
bias_descriptor);
EXPECT_FALSE(result.is_success());
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
// We expect to get the previous values.
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
TEST(GetWeightsAndBiasTests, GetWeightsAndBiasLimitedByNumberOfLayersSpecified)
{
// We confirm that the number_of_layers (e.g. = 2 in this case) limits the
// number of pseudoLayers available.
// https://docs.nvidia.com/deeplearning/cudnn/api/index.html#cudnnSetRNNDescriptor_v8
// Recall taht numLayers or number_of_layers is an input and is the number of
// stacked, physical layers in the deep RNN model.
DefaultParameters parameters {};
GetWeightsAndBias get_weight_and_bias {parameters};
LibraryHandleDropoutRNN descriptors {parameters};
WeightSpace weight_space {descriptors};
TensorDescriptor weight_descriptor {};
TensorDescriptor bias_descriptor {};
// From CUDA API, for unidirectional RNNs,
// pseudoLayer = 1 is the first hidden layer.
// pseudoLayer = 0 was the RNN input layer, but
// EY (20230806) If I specified
auto result = get_weight_and_bias.get_weight_and_bias(
descriptors,
1,
weight_space,
0,
weight_descriptor,
bias_descriptor);
EXPECT_TRUE(result.is_success());
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
GetNDTensorDescriptorValues<3> get_values {};
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
// No more.
result = get_weight_and_bias.get_weight_and_bias(
descriptors,
2,
weight_space,
0,
weight_descriptor,
bias_descriptor);
EXPECT_FALSE(result.is_success());
// We expect to get the previous values.
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
TEST(GetWeightsAndBiasTests, OutOfLimitLinearLayerIDGivesNullptr)
{
DefaultParameters parameters {};
GetWeightsAndBias get_weight_and_bias {parameters};
LibraryHandleDropoutRNN descriptors {parameters};
WeightSpace weight_space {descriptors};
TensorDescriptor weight_descriptor {};
TensorDescriptor bias_descriptor {};
auto result = get_weight_and_bias.get_weight_and_bias(
descriptors,
0,
weight_space,
2,
weight_descriptor,
bias_descriptor);
EXPECT_FALSE(result.is_success());
EXPECT_EQ(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_EQ(get_weight_and_bias.bias_address_, nullptr);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
TEST(GetWeightsAndBiasTests, LinearLayerIDOf1ForCUDNN_RNN_RELU)
{
DefaultParameters parameters {};
GetWeightsAndBias get_weight_and_bias {parameters};
LibraryHandleDropoutRNN descriptors {parameters};
WeightSpace weight_space {descriptors};
TensorDescriptor weight_descriptor {};
TensorDescriptor bias_descriptor {};
auto result = get_weight_and_bias.get_weight_and_bias(
descriptors,
0,
weight_space,
1,
weight_descriptor,
bias_descriptor);
EXPECT_TRUE(result.is_success());
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
GetNDTensorDescriptorValues<3> get_values {};
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_weight_and_bias.get_weight_and_bias(
descriptors,
1,
weight_space,
1,
weight_descriptor,
bias_descriptor);
EXPECT_TRUE(result.is_success());
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
// No more hidden layers.
result = get_weight_and_bias.get_weight_and_bias(
descriptors,
2,
weight_space,
1,
weight_descriptor,
bias_descriptor);
EXPECT_FALSE(result.is_success());
EXPECT_NE(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_NE(get_weight_and_bias.bias_address_, nullptr);
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 512);
EXPECT_EQ(get_values.strides_array_[0], 262144);
EXPECT_EQ(get_values.strides_array_[1], 512);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 3);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
// No higher linear ID because only values 0, 1 allowed for CUDNN_RNN_RELU.
result = get_weight_and_bias.get_weight_and_bias(
descriptors,
1,
weight_space,
2,
weight_descriptor,
bias_descriptor);
EXPECT_FALSE(result.is_success());
EXPECT_EQ(get_weight_and_bias.weight_matrix_address_, nullptr);
EXPECT_EQ(get_weight_and_bias.bias_address_, nullptr);
result = get_values.get_values(weight_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 0);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
result = get_values.get_values(bias_descriptor, 3);
ASSERT_TRUE(result.is_success());
EXPECT_EQ(*get_values.data_type_, CUDNN_DATA_FLOAT);
EXPECT_EQ(get_values.nb_dims_[0], 0);
EXPECT_EQ(get_values.nb_dims_[1], -1);
EXPECT_EQ(get_values.nb_dims_[2], -1);
EXPECT_EQ(get_values.dimensions_array_[0], 1);
EXPECT_EQ(get_values.dimensions_array_[1], 512);
EXPECT_EQ(get_values.dimensions_array_[2], 1);
EXPECT_EQ(get_values.strides_array_[0], 512);
EXPECT_EQ(get_values.strides_array_[1], 1);
EXPECT_EQ(get_values.strides_array_[2], 1);
}
} // namespace Modules
} // namespace RecurrentNeuralNetwork
} // namespace GoogleUnitTests |
2aff7ec5c401768d1d350b63c551cf01d9d9ec2d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Attention : Extension .cu
#include <iostream>
#include "cudaTools.h"
#include "Device.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__host__ bool helloCuda(void); //__host__ facultatif
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__global__ static void kernelHello(void);
__device__ static void doSomethingHello(void);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* resumer commande cuda:
* http://developer.download.nvidia.com/compute/cuda/4_2/rel/toolkit/docs/online/index.html
*/
__host__ bool helloCuda(void) //__host__ facultatif
{
cout << endl << "[Hello Cuda 1]" << endl;
// Specifier nb thread : ici 1 thread au total !
dim3 dg = dim3(1, 1, 1);
dim3 db = dim3(1, 1, 1);
// Debug
//Device::print(dg, db);
Device::checkDimError(dg,db);
hipLaunchKernelGGL(( kernelHello), dim3(dg),dim3(db), 0, 0, ); // asynchrone !!
Device::checkLastCudaError("kernelHello"); // facultatif
return true;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/**
* output : void
*/
__global__ void kernelHello(void)
{
doSomethingHello();
}
/**
* Can be call only by device
* inliner by nvcc (nvidia compiler)
*/
__device__ void doSomethingHello(void)
{
// rien
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 2aff7ec5c401768d1d350b63c551cf01d9d9ec2d.cu | // Attention : Extension .cu
#include <iostream>
#include "cudaTools.h"
#include "Device.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__host__ bool helloCuda(void); //__host__ facultatif
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__global__ static void kernelHello(void);
__device__ static void doSomethingHello(void);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* resumer commande cuda:
* http://developer.download.nvidia.com/compute/cuda/4_2/rel/toolkit/docs/online/index.html
*/
__host__ bool helloCuda(void) //__host__ facultatif
{
cout << endl << "[Hello Cuda 1]" << endl;
// Specifier nb thread : ici 1 thread au total !
dim3 dg = dim3(1, 1, 1);
dim3 db = dim3(1, 1, 1);
// Debug
//Device::print(dg, db);
Device::checkDimError(dg,db);
kernelHello<<<dg,db>>>(); // asynchrone !!
Device::checkLastCudaError("kernelHello"); // facultatif
return true;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/**
* output : void
*/
__global__ void kernelHello(void)
{
doSomethingHello();
}
/**
* Can be call only by device
* inliner by nvcc (nvidia compiler)
*/
__device__ void doSomethingHello(void)
{
// rien
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
0cc209983cdd41bb7f4947d53acc8e9e0cdbfe03.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// BUG IN SRAD APPLICATIONS SEEMS TO BE SOMEWHERE IN THIS CODE, MEMORY CORRUPTION
// srad kernel
__global__ void srad2( fp d_lambda,
int d_Nr,
int d_Nc,
long d_Ne,
int *d_iN,
int *d_iS,
int *d_jE,
int *d_jW,
fp *d_dN,
fp *d_dS,
fp *d_dE,
fp *d_dW,
fp *d_c,
fp *d_I){
// indexes
int bx = blockIdx.x; // get current horizontal block index
int tx = threadIdx.x; // get current horizontal thread index
int ei = bx*NUMBER_THREADS+tx; // more threads than actual elements !!!
int row; // column, x position
int col; // row, y position
// variables
fp d_cN,d_cS,d_cW,d_cE;
fp d_D;
// figure out row/col location in new matrix
row = (ei+1) % d_Nr - 1; // (0-n) row
col = (ei+1) / d_Nr + 1 - 1; // (0-n) column
if((ei+1) % d_Nr == 0){
row = d_Nr - 1;
col = col - 1;
}
if(ei<d_Ne){ // make sure that only threads matching jobs run
// diffusion coefficent
d_cN = d_c[ei]; // north diffusion coefficient
d_cS = d_c[d_iS[row] + d_Nr*col]; // south diffusion coefficient
d_cW = d_c[ei]; // west diffusion coefficient
d_cE = d_c[row + d_Nr * d_jE[col]]; // east diffusion coefficient
// divergence (equ 58)
d_D = d_cN*d_dN[ei] + d_cS*d_dS[ei] + d_cW*d_dW[ei] + d_cE*d_dE[ei];// divergence
// image update (equ 61) (every element of IMAGE)
d_I[ei] = d_I[ei] + 0.25*d_lambda*d_D; // updates image (based on input time step and divergence)
}
}
| 0cc209983cdd41bb7f4947d53acc8e9e0cdbfe03.cu | // BUG IN SRAD APPLICATIONS SEEMS TO BE SOMEWHERE IN THIS CODE, MEMORY CORRUPTION
// srad kernel
__global__ void srad2( fp d_lambda,
int d_Nr,
int d_Nc,
long d_Ne,
int *d_iN,
int *d_iS,
int *d_jE,
int *d_jW,
fp *d_dN,
fp *d_dS,
fp *d_dE,
fp *d_dW,
fp *d_c,
fp *d_I){
// indexes
int bx = blockIdx.x; // get current horizontal block index
int tx = threadIdx.x; // get current horizontal thread index
int ei = bx*NUMBER_THREADS+tx; // more threads than actual elements !!!
int row; // column, x position
int col; // row, y position
// variables
fp d_cN,d_cS,d_cW,d_cE;
fp d_D;
// figure out row/col location in new matrix
row = (ei+1) % d_Nr - 1; // (0-n) row
col = (ei+1) / d_Nr + 1 - 1; // (0-n) column
if((ei+1) % d_Nr == 0){
row = d_Nr - 1;
col = col - 1;
}
if(ei<d_Ne){ // make sure that only threads matching jobs run
// diffusion coefficent
d_cN = d_c[ei]; // north diffusion coefficient
d_cS = d_c[d_iS[row] + d_Nr*col]; // south diffusion coefficient
d_cW = d_c[ei]; // west diffusion coefficient
d_cE = d_c[row + d_Nr * d_jE[col]]; // east diffusion coefficient
// divergence (equ 58)
d_D = d_cN*d_dN[ei] + d_cS*d_dS[ei] + d_cW*d_dW[ei] + d_cE*d_dE[ei];// divergence
// image update (equ 61) (every element of IMAGE)
d_I[ei] = d_I[ei] + 0.25*d_lambda*d_D; // updates image (based on input time step and divergence)
}
}
|
4fe12c95a439a723ca8701f8406d096928965858.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "transposeNoBankConflicts.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *odata = NULL;
hipMalloc(&odata, XSIZE*YSIZE);
float *idata = NULL;
hipMalloc(&idata, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
transposeNoBankConflicts), dim3(gridBlock),dim3(threadBlock), 0, 0, odata,idata,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
transposeNoBankConflicts), dim3(gridBlock),dim3(threadBlock), 0, 0, odata,idata,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
transposeNoBankConflicts), dim3(gridBlock),dim3(threadBlock), 0, 0, odata,idata,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4fe12c95a439a723ca8701f8406d096928965858.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "transposeNoBankConflicts.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *odata = NULL;
cudaMalloc(&odata, XSIZE*YSIZE);
float *idata = NULL;
cudaMalloc(&idata, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
transposeNoBankConflicts<<<gridBlock,threadBlock>>>(odata,idata,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
transposeNoBankConflicts<<<gridBlock,threadBlock>>>(odata,idata,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
transposeNoBankConflicts<<<gridBlock,threadBlock>>>(odata,idata,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f29d6c1ab838a42159cb9d963a88b348731eb1bb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "__initSeq2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
int nrows = 1;
int ncols = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
__initSeq2), dim3(gridBlock),dim3(threadBlock), 0, 0, A,nrows,ncols);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
__initSeq2), dim3(gridBlock),dim3(threadBlock), 0, 0, A,nrows,ncols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
__initSeq2), dim3(gridBlock),dim3(threadBlock), 0, 0, A,nrows,ncols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f29d6c1ab838a42159cb9d963a88b348731eb1bb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "__initSeq2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
int nrows = 1;
int ncols = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
__initSeq2<<<gridBlock,threadBlock>>>(A,nrows,ncols);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
__initSeq2<<<gridBlock,threadBlock>>>(A,nrows,ncols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
__initSeq2<<<gridBlock,threadBlock>>>(A,nrows,ncols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9af923648afdaf61723a08b9baae92ee6f065b35.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@author Azzam Haidar
@author Tingxing Dong
@precisions normal z -> s d c
*/
#include "common_magma.h"
#include "magmablas.h"
#include "batched_kernel_param.h"
#include "magma_templates.h"
/*
Purpose
-------
These are internal routines that might have many assumption.
They are used in zgetf2_batched.cpp
No documentation is available today.
@ingroup magma_zgesv_aux
*/
#define PRECISION_z
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
//////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ magmaDoubleComplex shared_data[];
extern __shared__ double sdata[];
extern __shared__ int int_sdata[];
//////////////////////////////////////////////////////////////////////////////////////////
__device__ int
izamax_devfunc(int length, const magmaDoubleComplex *x, int incx, double *shared_x, int *shared_idx)
{
int tx = threadIdx.x;
magmaDoubleComplex res;
double res1;
int nchunk = magma_ceildiv( length, zamax );
if ( tx < zamax ) {
shared_x[tx] = 0.0;
shared_idx[tx] = tx; //-1; // -1 will crash the code in case matrix is singular, better is to put =tx and make check info at output
}
__syncthreads();
for (int s =0; s < nchunk; s++)
{
if ( (tx + s * zamax < length) && (tx < zamax) )
{
res = x[(tx + s * zamax) * incx];
res1 = fabs(MAGMA_Z_REAL(res)) + fabs(MAGMA_Z_IMAG(res));
if ( res1 > shared_x[tx] )
{
shared_x[tx] = res1;
shared_idx[tx] = tx + s * zamax;
}
}
__syncthreads();
}
if (length >= zamax) // there are more than 128 threads working ==> all shared_x shared_idx are initialized here so I can call the fixed getidmax
magma_getidmax<zamax>(tx, shared_x, shared_idx);
else
magma_getidmax_n(min(zamax,length), tx, shared_x, shared_idx);
return shared_idx[0];
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
izamax_kernel_batched(int length, int chunk, magmaDoubleComplex **x_array, int incx,
int step, int lda, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep)
{
magmaDoubleComplex *x_start = x_array[blockIdx.z];
const magmaDoubleComplex *x = &(x_start[step + step * lda]);
magma_int_t *ipiv = ipiv_array[blockIdx.z];
int tx = threadIdx.x;
double *shared_x = sdata;
int *shared_idx = (int*)(shared_x + zamax);
izamax_devfunc(length, x, incx, shared_x, shared_idx);
if (tx == 0) {
ipiv[step] = shared_idx[0] + step + 1; // Fortran Indexing
if (shared_x[0] == MAGMA_D_ZERO) {
info_array[blockIdx.z] = shared_idx[0] + step + gbstep + 1;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
tree_izamax_kernel_batched(int length, magmaDoubleComplex **x_array, int incx,
int step, int lda, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep,
double** data_pool_array, magma_int_t** id_pool_array)
{
magmaDoubleComplex *x_start = x_array[blockIdx.z];
const magmaDoubleComplex *x = &(x_start[step + step * lda]);
double *data_pool = data_pool_array[blockIdx.z];
magma_int_t *id_pool = id_pool_array[blockIdx.z];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
int tx = threadIdx.x;
int local_max_id;
__shared__ double shared_x[zamax];
__shared__ int shared_idx[zamax];
x += zamax * blockIdx.x * incx;
izamax_devfunc(min(zamax, length-blockIdx.x * zamax), x, incx, shared_x, shared_idx);
if (tx == 0)
{
local_max_id = shared_idx[0] + zamax * blockIdx.x; // add the offset
if (gridDim.x == 1)
{
ipiv[step] = local_max_id + step + 1; // Fortran Indexing
if (shared_x[0] == MAGMA_D_ZERO)
info_array[blockIdx.z] = local_max_id + step + gbstep + 1;
}
else
{
// put each thread block local max and its index in workspace
data_pool[blockIdx.x] = shared_x[0];
id_pool[blockIdx.x] = local_max_id;
}
}
}
__global__ void
tree_izamax_kernel2_batched(int n, int step, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep, double** data_pool_array, magma_int_t** id_pool_array)
{
__shared__ double shared_x[zamax];
__shared__ int shared_idx[zamax];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
double *data_pool = data_pool_array[blockIdx.z];
magma_int_t *id_pool = id_pool_array[blockIdx.z];
int tx = threadIdx.x;
//read data
if ( tx < n)
{
shared_x[tx] = data_pool[tx];
shared_idx[tx] = id_pool[tx];
}
else
{
shared_x[tx] = 0.0;
shared_idx[tx] = -2;
}
__syncthreads();
// compute local result inside each thread block
magma_getidmax<zamax>(tx, shared_x, shared_idx);
if (tx == 0 )
{
ipiv[step] = shared_idx[0] + step + 1; // Fortran Indexing
if (shared_x[0] == MAGMA_D_ZERO)
info_array[blockIdx.z] = shared_idx[0] + step + gbstep + 1;
}
}
magma_int_t magma_izamax_lg_batched(magma_int_t length, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t lda,
magma_int_t** ipiv_array, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
if (length == 1) return 0;
if (incx < 0) return 1;
double* data_pool;
magma_int_t* id_pool;
double** data_pool_array = NULL;
magma_int_t** id_pool_array = NULL;
magma_int_t num_blocks = (length-1)/(zamax) + 1;
// creat pools(data and index) to store the result of each thread blocks
magma_dmalloc(&data_pool, num_blocks * batchCount);
magma_imalloc(&id_pool, num_blocks * batchCount);
magma_malloc((void**)&data_pool_array, batchCount * sizeof(*data_pool_array));
magma_malloc((void**)&id_pool_array, batchCount * sizeof(*id_pool_array));
#if defined(PRECISION_z) || defined(PRECISION_d)
dset_pointer(data_pool_array, data_pool, 1, 0, 0, num_blocks, batchCount, queue);
#else
sset_pointer(data_pool_array, data_pool, 1, 0, 0, num_blocks, batchCount, queue);
#endif
set_ipointer(id_pool_array, id_pool, 1, 0, 0, num_blocks, batchCount, queue);
if ( num_blocks > zamax)
{
printf("length(=%d), num_blocks(=%d) is too big > zamax(=%d), the second layer reduction can not be launched, Plz incread zamax \n",
int(length), int(num_blocks), int(zamax));
}
else
{
// first level tree reduction
dim3 grid(num_blocks, 1, batchCount);
dim3 threads(zamax, 1, 1);
hipLaunchKernelGGL(( tree_izamax_kernel_batched), dim3(grid), dim3(threads), 0, queue, length, x_array, incx, step, lda, ipiv_array, info_array, gbstep, data_pool_array, id_pool_array);
if ( num_blocks > 1)
{
// second level tree reduction
dim3 grid2(1, 1, batchCount);
hipLaunchKernelGGL(( tree_izamax_kernel2_batched), dim3(grid2), dim3(threads), 0, queue, num_blocks, step, ipiv_array, info_array, gbstep, data_pool_array, id_pool_array);
}
}
magma_free(data_pool);
magma_free(id_pool);
magma_free(data_pool_array);
magma_free(id_pool_array);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
IZAMAX find the index of max absolute value of elements in x and store the index in ipiv
This is an internal routine that might have many assumption.
Arguments
---------
@param[in]
length INTEGER
On entry, length specifies the size of vector x. length >= 0.
@param[in]
x_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array of dimension
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
step INTEGER
the offset of ipiv
@param[in]
lda INTEGER
The leading dimension of each array A, internal use to find the starting position of x.
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
gbstep INTEGER
the offset of info, internal use
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zgesv_aux
********************************************************************/
extern "C"
magma_int_t magma_izamax_batched(magma_int_t length,
magmaDoubleComplex **x_array, magma_int_t incx,
magma_int_t step, magma_int_t lda,
magma_int_t** ipiv_array, magma_int_t *info_array,
magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
if (length == 0 ) return 0;
dim3 grid(1, 1, batchCount);
dim3 threads(zamax, 1, 1);
#if 1
int chunk = magma_ceildiv( length, zamax );
hipLaunchKernelGGL(( izamax_kernel_batched), dim3(grid), dim3(threads), zamax * (sizeof(double) + sizeof(int)), queue ,
length, chunk, x_array, incx, step, lda, ipiv_array, info_array, gbstep);
#else
// the magma_izamax_lg_batched is faster but when cuda launch it as 2 kernels the white space time between these 2 kernels and the next kernel is larger than using the izamax_kernel for that today we are using only izamax_kernel
if ( length <= 10 * zamax )
{
int chunk = magma_ceildiv( length, zamax );
hipLaunchKernelGGL(( izamax_kernel_batched), dim3(grid), dim3(threads), zamax * (sizeof(double) + sizeof(magma_int_t)), queue ,
length, chunk, x_array, incx, step, lda, ipiv_array, info_array, gbstep);
}
else
{
magma_izamax_lg_batched(length, x_array, incx, step, lda, ipiv_array, info_array, gbstep, batchCount);
}
#endif
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void zswap_kernel_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t** ipiv_array)
{
magmaDoubleComplex *x = x_array[blockIdx.z];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
__shared__ int jp;
if (threadIdx.x == 0)
{
jp = ipiv[step] - 1;
//if (blockIdx.z == 1) printf("jp=%d", jp);
}
__syncthreads();
if (jp == step) return; // no pivot
int id = threadIdx.x;
if (id < n) {
magmaDoubleComplex tmp = x[jp + incx*id];
x[jp + incx*id] = x[step + incx*id];
x[step + incx*id] = tmp;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
zswap two row in x. index (ipiv[step]-1)-th and index step -th
This is an internal routine that might have many assumption.
Arguments
---------
@param[in]
n INTEGER
On entry, n specifies the size of vector x. n >= 0.
@param[in]
x_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array of dimension
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
step INTEGER
The starting address of matrix C in A. LDDA >= max(1,M).
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zgesv_aux
********************************************************************/
extern "C"
magma_int_t magma_zswap_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx,
magma_int_t step, magma_int_t** ipiv_array,
magma_int_t batchCount, magma_queue_t queue)
{
/*
zswap two row: (ipiv[step]-1)th and step th
*/
if ( n > MAX_NTHREADS)
{
printf("magma_zswap_batched nb=%d, > %d, not supported\n", int(n), int(MAX_NTHREADS) );
return -15;
}
dim3 grid(1,1, batchCount);
dim3 threads(zamax, 1, 1);
hipLaunchKernelGGL(( zswap_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, x_array, incx, step, ipiv_array);
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void zscal_zgeru_kernel_batched(int m, int n, int step, magmaDoubleComplex **dA_array, int lda, magma_int_t *info_array, int gbstep)
{
// checkinfo to avoid computation of the singular matrix
if (info_array[blockIdx.z] != 0 ) return;
magmaDoubleComplex *A_start = dA_array[blockIdx.z];
magmaDoubleComplex *A = &(A_start[step + step * lda]);
magmaDoubleComplex *shared_y = shared_data;
int tx = threadIdx.x;
int gbidx = blockIdx.x*MAX_NTHREADS + threadIdx.x;
if (tx < n) {
shared_y[tx] = A[lda * tx];
}
__syncthreads();
if (shared_y[0] == MAGMA_Z_ZERO) {
info_array[blockIdx.z] = step + gbstep + 1;
return;
}
if (gbidx < m && gbidx > 0) {
magmaDoubleComplex reg = MAGMA_Z_ZERO;
reg = A[gbidx];
reg *= MAGMA_Z_DIV(MAGMA_Z_ONE, shared_y[0]);
A[gbidx] = reg;
#pragma unroll
for (int i=1; i < n; i++) {
//A[gbidx + i*lda] = A[gbidx + i*lda] - shared_y[i] * reg; //cuda give wrong results with this one
//A[gbidx + i*lda] -= shared_y[i] * reg; //cuda give wrong results with this one
A[gbidx + i*lda] += (MAGMA_Z_NEG_ONE) * shared_y[i] * reg;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
magma_int_t magma_zscal_zgeru_batched(magma_int_t m, magma_int_t n, magma_int_t step,
magmaDoubleComplex **dA_array, magma_int_t lda,
magma_int_t *info_array, magma_int_t gbstep,
magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized kernel which merged zscal and zgeru the two kernels
1) zscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a zgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
if ( n == 0) return 0;
if ( n > MAX_NTHREADS)
{
printf("magma_zscal_zgeru_batched nb=%d, > %d, not supported \n", int(n), int(MAX_NTHREADS) );
return -15;
}
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
size_t shared_size = sizeof(magmaDoubleComplex)*(n);
dim3 grid(nchunk, 1, batchCount);
dim3 threads(min(m, MAX_NTHREADS), 1, 1);
hipLaunchKernelGGL(( zscal_zgeru_kernel_batched), dim3(grid), dim3(threads), shared_size, queue, m, n, step, dA_array, lda, info_array, gbstep);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void zgetf2trsm_kernel_batched(int ib, int n, magmaDoubleComplex **dA_array, int step, int lda)
{
/*
this kernel does the safe nonblocked TRSM operation
B = A^-1 * B
*/
magmaDoubleComplex *A_start = dA_array[blockIdx.z];
magmaDoubleComplex *A = &(A_start[step + step * lda]);
magmaDoubleComplex *B = &(A_start[step + (step+ib) * lda]);
magmaDoubleComplex *shared_a = shared_data;
magmaDoubleComplex *shared_b = shared_data+ib*ib;
int tid = threadIdx.x;
int i,d;
// Read A and B at the same time to the shared memory (shared_a shared_b)
// note that shared_b = shared_a+ib*ib so its contiguous
// I can make it in one loop reading
if ( tid < ib) {
#pragma unroll
for (i=0; i < n+ib; i++) {
shared_a[tid + i*ib] = A[tid + i*lda];
}
}
__syncthreads();
if (tid < n) {
#pragma unroll
for (d=0; d < ib-1; d++) {
for (i=d+1; i < ib; i++) {
shared_b[i+tid*ib] += (MAGMA_Z_NEG_ONE) * shared_a[i+d*ib] * shared_b[d+tid*ib];
}
}
}
__syncthreads();
// write back B
if ( tid < ib) {
#pragma unroll
for (i=0; i < n; i++) {
B[tid + i*lda] = shared_b[tid + i*ib];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
zgetf2trsm solves one of the matrix equations on gpu
B = C^-1 * B
where C, B are part of the matrix A in dA_array,
This version load C, B into shared memory and solve it
and copy back to GPU device memory.
This is an internal routine that might have many assumption.
Arguments
---------
@param[in]
ib INTEGER
The number of rows/columns of each matrix C, and rows of B. ib >= 0.
@param[in]
n INTEGER
The number of columns of each matrix B. n >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[in]
step INTEGER
The starting address of matrix C in A. LDDA >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zgesv_aux
********************************************************************/
extern "C" void
magma_zgetf2trsm_batched(magma_int_t ib, magma_int_t n, magmaDoubleComplex **dA_array,
magma_int_t step, magma_int_t ldda,
magma_int_t batchCount, magma_queue_t queue)
{
/*
*/
if ( n == 0 || ib == 0 ) return;
size_t shared_size = sizeof(magmaDoubleComplex)*(ib*(ib+n));
// TODO TODO TODO
if ( shared_size > (MAX_SHARED_ALLOWED*1024) ) // limit the shared memory to 46K leaving 2K for extra
{
printf("kernel_zgetf2trsm error out of shared memory \n");
return;
}
dim3 grid(1, 1, batchCount);
dim3 threads(max(n,ib), 1, 1);
hipLaunchKernelGGL(( zgetf2trsm_kernel_batched), dim3(grid), dim3(threads), shared_size, queue, ib, n, dA_array, step, ldda);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
zupdate_device(int m, int step, magmaDoubleComplex* x, int ldx, magmaDoubleComplex *A, int lda)
{
int tid = threadIdx.x;
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
int indx;
//magmaDoubleComplex reg = MAGMA_Z_ZERO;
// update the current column by all the previous one
#pragma unroll
for (int i=0; i < step; i++) {
for (int s=0; s < nchunk; s++)
{
indx = tid + s * MAX_NTHREADS;
if ( indx > i && indx < m ) {
A[indx] -= A[i] * x[indx + i*ldx];
//printf(" @ step %d tid %d updating x[tid]*y[i]=A %5.3f %5.3f = %5.3f at i %d \n", step, tid, x[tid + i*ldx], A[i], A[tid],i);
}
}
__syncthreads();
}
//printf(" @ step %d tid %d adding %5.3f to A %5.3f make it %5.3f\n",step,tid,-reg,A[tid],A[tid]-reg);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
zscal5_device(int m, magmaDoubleComplex* x, magmaDoubleComplex alpha)
{
int tid = threadIdx.x;
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
for (int s=0; s < nchunk; s++)
{
if ( (tid + s * MAX_NTHREADS) < m ) {
#if 0
x[tid + s * MAX_NTHREADS] *= MAGMA_Z_DIV(MAGMA_Z_ONE, alpha);
#else
x[tid + s * MAX_NTHREADS] = x[tid + s * MAX_NTHREADS]/alpha;
#endif
}
}
__syncthreads();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
zcomputecolumn_kernel_shared_batched(int m, int paneloffset, int step, magmaDoubleComplex **dA_array, int lda, magma_int_t **ipiv_array, magma_int_t *info_array, int gbstep)
{
int gboff = paneloffset+step;
magma_int_t *ipiv = ipiv_array[blockIdx.z];
magmaDoubleComplex *A_start = dA_array[blockIdx.z];
magmaDoubleComplex *A0j = &(A_start[paneloffset + (paneloffset+step) * lda]);
magmaDoubleComplex *A00 = &(A_start[paneloffset + paneloffset * lda]);
magmaDoubleComplex *shared_A = shared_data;
__shared__ double shared_x[zamax];
__shared__ int shared_idx[zamax];
__shared__ magmaDoubleComplex alpha;
int tid = threadIdx.x;
// checkinfo to avoid computation of the singular matrix
if (info_array[blockIdx.z] != 0 ) return;
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
// read the current column from dev to shared memory
for (int s=0; s < nchunk; s++)
{
if ( (tid + s * MAX_NTHREADS) < m ) shared_A[tid + s * MAX_NTHREADS] = A0j[tid + s * MAX_NTHREADS];
}
__syncthreads();
// update this column
if ( step > 0 ) {
zupdate_device( m, step, A00, lda, shared_A, 1);
__syncthreads();
}
// if ( tid < (m-step) ) // DO NO TPUT THE IF CONDITION HERE SINCE izamax_devfunc HAS __syncthreads INSIDE.
// So let all htreads call this routine it will handle correctly based on the size
// note that izamax need only 128 threads, s
izamax_devfunc(m-step, shared_A+step, 1, shared_x, shared_idx);
if (tid == 0) {
ipiv[gboff] = shared_idx[0] + gboff + 1; // Fortran Indexing
alpha = shared_A[shared_idx[0]+step];
//printf("@ step %d ipiv=%d where gboff=%d shared_idx %d alpha %5.3f \n",step,ipiv[gboff],gboff,shared_idx[0],alpha);
if (shared_x[0] == MAGMA_D_ZERO) {
info_array[blockIdx.z] = shared_idx[0] + gboff + gbstep + 1;
}
}
__syncthreads();
if (shared_x[0] == MAGMA_D_ZERO) return;
__syncthreads();
// DO NO PUT THE IF CONDITION HERE SINCE izamax_devfunc HAS __syncthreads INSIDE.
zscal5_device( m-step, shared_A+step, alpha);
// put back the pivot that has been scaled with itself menaing =1
if (tid == 0) shared_A[shared_idx[0] + step] = alpha;
__syncthreads();
// write back from shared to dev memory
for (int s=0; s < nchunk; s++)
{
if ( (tid + s * MAX_NTHREADS) < m )
{
A0j[tid + s * MAX_NTHREADS] = shared_A[tid + s * MAX_NTHREADS];
//printf("@ step %d tid %d updating A=x*alpha after A= %5.3f\n",step,tid,shared_A[tid]);
}
}
__syncthreads();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
magma_int_t magma_zcomputecolumn_batched(magma_int_t m, magma_int_t paneloffset, magma_int_t step,
magmaDoubleComplex **dA_array, magma_int_t lda,
magma_int_t **ipiv_array,
magma_int_t *info_array, magma_int_t gbstep,
magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized kernel which merged zscal and zgeru the two kernels
1) zscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a zgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
if ( m == 0) return 0;
size_t all_shmem_size = zamax*(sizeof(double)+sizeof(int)) + (m+2)*sizeof(magmaDoubleComplex);
if ( all_shmem_size > (MAX_SHARED_ALLOWED*1024) ) // limit the shared memory to 44K leaving 4K for extra
{
printf("magma_zcomputecolumn_batched error out of shared memory \n");
return -20;
}
size_t shared_size = sizeof(magmaDoubleComplex)*m;
dim3 grid(1, 1, batchCount);
dim3 threads(min(m, MAX_NTHREADS), 1, 1);
hipLaunchKernelGGL(( zcomputecolumn_kernel_shared_batched), dim3(grid), dim3(threads), shared_size, queue, m, paneloffset, step, dA_array, lda, ipiv_array, info_array, gbstep);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
kernel_zgetf2_sm_batched(
magma_int_t m, magma_int_t ib,
magmaDoubleComplex **dA_array, magma_int_t lda,
magma_int_t **ipiv_array,
magma_int_t *info_array)
{
magma_int_t *ipiv = ipiv_array[blockIdx.z];
int tx = threadIdx.x;
magmaDoubleComplex *shared_A = shared_data;
magmaDoubleComplex *A = dA_array[blockIdx.z];
double *shared_x = (double*)(shared_A + m * ib);
int *shared_idx = (int*)(shared_x + zamax);
magmaDoubleComplex res;
int length;
__shared__ int jp;
// load data to shared memory
if (tx < m)
{
#pragma unroll 8
for (int i=0; i < ib; i++)
{
shared_A[tx + i * m] = A[tx + i * lda];
//printf("shared_A=%f ", shared_A[tx + i * m]);
}
}
__syncthreads();
for (int j=0; j < ib; j++)
{
length = m - j;
int offset = j + j*m;
//======================================
//find max
if (tx < zamax)
{
if ( tx < length)
{
res = shared_A[tx + offset];
// printf("res=%f\n", res);
shared_x[tx] = fabs(MAGMA_Z_REAL(res)) + fabs(MAGMA_Z_IMAG(res));
shared_idx[tx] = tx;
}
else
{
shared_x[tx] = 0.0;
shared_idx[tx] = 0;
}
}
__syncthreads();
if (length >= zamax) // there are more than 128 threads working ==> all shared_x shared_idx are initialized here so I can call the fixed getidmax
magma_getidmax<zamax>(tx, shared_x, shared_idx);
else
magma_getidmax_n(min(zamax,length), tx, shared_x, shared_idx);
if (tx == 0)
{
jp = shared_idx[0];
if (shared_A[jp + offset] == 0.0) printf("error, A(jp,j) == 0.0\n");
ipiv[j] = j + (jp + 1); // Fortran Indexing
//if (blockIdx.x == 1) printf("jp=%d ", jp + j + 1);
}
__syncthreads();
//======================================
if ( jp != 0) //swap
{
if (tx < ib) {
//printf("A[jp]= %f, A[j]=%f, jp=%d\n", shared_A[jp + j + tx*m], shared_A[j + tx*m], jp);
magmaDoubleComplex tmp = shared_A[jp + j + tx*m];
shared_A[jp + j + tx*m] = shared_A[j + tx*m];
shared_A[j + tx*m] = tmp;
}
}
__syncthreads();
//======================================
// Ger
if (tx < length && tx > 0)
{
res = shared_A[tx + offset];
res *= MAGMA_Z_DIV(MAGMA_Z_ONE, shared_A[0 + offset]); // scaling
shared_A[tx + offset] = res;
#pragma unroll 8
for (int i=1; i < ib-j; i++)
{
shared_A[tx + i*m + offset] += (MAGMA_Z_NEG_ONE) * shared_A[i*m + offset] * res;
//printf("res= %f, shared_A=%f\n", res, shared_A[i*m + offset]);
}
}
__syncthreads();
} // end of j
//======================================
// write back
if (tx < m)
{
#pragma unroll 8
for (int i=0; i < ib; i++)
{
A[tx + i * lda] = shared_A[tx + i * m];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
ZGETF2_SM computes an LU factorization of a general M-by-N matrix A
using partial pivoting with row interchanges.
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 3 BLAS version of the algorithm.
This is a batched version that factors batchCount M-by-N matrices in parallel.
dA, ipiv, and info become arrays with one entry per matrix.
This version load entire matrix (m*ib) into shared memory and factorize it
with pivoting and copy back to GPU device memory.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix A. M >= 0.
@param[in]
ib INTEGER
The number of columns of each matrix A. ib >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zgesv_aux
********************************************************************/
extern "C"
magma_int_t magma_zgetf2_sm_batched(
magma_int_t m, magma_int_t ib,
magmaDoubleComplex **dA_array, magma_int_t ldda,
magma_int_t **ipiv_array,
magma_int_t *info_array,
magma_int_t batchCount, magma_queue_t queue)
{
/*
load entire matrix (m*ib) into shared memory and factorize it with pivoting and copy back.
*/
size_t shared_size = sizeof(magmaDoubleComplex) * m * ib + (zamax) * (sizeof(double) + sizeof(int)) + sizeof(int);
if (shared_size > 47000)
{
printf("Shared memory in zgetf2 = %d, exceeds 48K, kernel can not lauched succesfully\n", int(shared_size) );
return 1;
}
dim3 grid(1,1, batchCount);
dim3 threads(max(max(zamax, m), ib), 1, 1);
hipLaunchKernelGGL(( kernel_zgetf2_sm_batched), dim3(grid), dim3(threads), shared_size, queue,
m, ib, dA_array, ldda, ipiv_array, info_array);
return 0;
}
| 9af923648afdaf61723a08b9baae92ee6f065b35.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@author Azzam Haidar
@author Tingxing Dong
@precisions normal z -> s d c
*/
#include "common_magma.h"
#include "magmablas.h"
#include "batched_kernel_param.h"
#include "magma_templates.h"
/*
Purpose
-------
These are internal routines that might have many assumption.
They are used in zgetf2_batched.cpp
No documentation is available today.
@ingroup magma_zgesv_aux
*/
#define PRECISION_z
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
//////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ magmaDoubleComplex shared_data[];
extern __shared__ double sdata[];
extern __shared__ int int_sdata[];
//////////////////////////////////////////////////////////////////////////////////////////
__device__ int
izamax_devfunc(int length, const magmaDoubleComplex *x, int incx, double *shared_x, int *shared_idx)
{
int tx = threadIdx.x;
magmaDoubleComplex res;
double res1;
int nchunk = magma_ceildiv( length, zamax );
if ( tx < zamax ) {
shared_x[tx] = 0.0;
shared_idx[tx] = tx; //-1; // -1 will crash the code in case matrix is singular, better is to put =tx and make check info at output
}
__syncthreads();
for (int s =0; s < nchunk; s++)
{
if ( (tx + s * zamax < length) && (tx < zamax) )
{
res = x[(tx + s * zamax) * incx];
res1 = fabs(MAGMA_Z_REAL(res)) + fabs(MAGMA_Z_IMAG(res));
if ( res1 > shared_x[tx] )
{
shared_x[tx] = res1;
shared_idx[tx] = tx + s * zamax;
}
}
__syncthreads();
}
if (length >= zamax) // there are more than 128 threads working ==> all shared_x shared_idx are initialized here so I can call the fixed getidmax
magma_getidmax<zamax>(tx, shared_x, shared_idx);
else
magma_getidmax_n(min(zamax,length), tx, shared_x, shared_idx);
return shared_idx[0];
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
izamax_kernel_batched(int length, int chunk, magmaDoubleComplex **x_array, int incx,
int step, int lda, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep)
{
magmaDoubleComplex *x_start = x_array[blockIdx.z];
const magmaDoubleComplex *x = &(x_start[step + step * lda]);
magma_int_t *ipiv = ipiv_array[blockIdx.z];
int tx = threadIdx.x;
double *shared_x = sdata;
int *shared_idx = (int*)(shared_x + zamax);
izamax_devfunc(length, x, incx, shared_x, shared_idx);
if (tx == 0) {
ipiv[step] = shared_idx[0] + step + 1; // Fortran Indexing
if (shared_x[0] == MAGMA_D_ZERO) {
info_array[blockIdx.z] = shared_idx[0] + step + gbstep + 1;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
tree_izamax_kernel_batched(int length, magmaDoubleComplex **x_array, int incx,
int step, int lda, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep,
double** data_pool_array, magma_int_t** id_pool_array)
{
magmaDoubleComplex *x_start = x_array[blockIdx.z];
const magmaDoubleComplex *x = &(x_start[step + step * lda]);
double *data_pool = data_pool_array[blockIdx.z];
magma_int_t *id_pool = id_pool_array[blockIdx.z];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
int tx = threadIdx.x;
int local_max_id;
__shared__ double shared_x[zamax];
__shared__ int shared_idx[zamax];
x += zamax * blockIdx.x * incx;
izamax_devfunc(min(zamax, length-blockIdx.x * zamax), x, incx, shared_x, shared_idx);
if (tx == 0)
{
local_max_id = shared_idx[0] + zamax * blockIdx.x; // add the offset
if (gridDim.x == 1)
{
ipiv[step] = local_max_id + step + 1; // Fortran Indexing
if (shared_x[0] == MAGMA_D_ZERO)
info_array[blockIdx.z] = local_max_id + step + gbstep + 1;
}
else
{
// put each thread block local max and its index in workspace
data_pool[blockIdx.x] = shared_x[0];
id_pool[blockIdx.x] = local_max_id;
}
}
}
__global__ void
tree_izamax_kernel2_batched(int n, int step, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep, double** data_pool_array, magma_int_t** id_pool_array)
{
__shared__ double shared_x[zamax];
__shared__ int shared_idx[zamax];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
double *data_pool = data_pool_array[blockIdx.z];
magma_int_t *id_pool = id_pool_array[blockIdx.z];
int tx = threadIdx.x;
//read data
if ( tx < n)
{
shared_x[tx] = data_pool[tx];
shared_idx[tx] = id_pool[tx];
}
else
{
shared_x[tx] = 0.0;
shared_idx[tx] = -2;
}
__syncthreads();
// compute local result inside each thread block
magma_getidmax<zamax>(tx, shared_x, shared_idx);
if (tx == 0 )
{
ipiv[step] = shared_idx[0] + step + 1; // Fortran Indexing
if (shared_x[0] == MAGMA_D_ZERO)
info_array[blockIdx.z] = shared_idx[0] + step + gbstep + 1;
}
}
magma_int_t magma_izamax_lg_batched(magma_int_t length, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t lda,
magma_int_t** ipiv_array, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
if (length == 1) return 0;
if (incx < 0) return 1;
double* data_pool;
magma_int_t* id_pool;
double** data_pool_array = NULL;
magma_int_t** id_pool_array = NULL;
magma_int_t num_blocks = (length-1)/(zamax) + 1;
// creat pools(data and index) to store the result of each thread blocks
magma_dmalloc(&data_pool, num_blocks * batchCount);
magma_imalloc(&id_pool, num_blocks * batchCount);
magma_malloc((void**)&data_pool_array, batchCount * sizeof(*data_pool_array));
magma_malloc((void**)&id_pool_array, batchCount * sizeof(*id_pool_array));
#if defined(PRECISION_z) || defined(PRECISION_d)
dset_pointer(data_pool_array, data_pool, 1, 0, 0, num_blocks, batchCount, queue);
#else
sset_pointer(data_pool_array, data_pool, 1, 0, 0, num_blocks, batchCount, queue);
#endif
set_ipointer(id_pool_array, id_pool, 1, 0, 0, num_blocks, batchCount, queue);
if ( num_blocks > zamax)
{
printf("length(=%d), num_blocks(=%d) is too big > zamax(=%d), the second layer reduction can not be launched, Plz incread zamax \n",
int(length), int(num_blocks), int(zamax));
}
else
{
// first level tree reduction
dim3 grid(num_blocks, 1, batchCount);
dim3 threads(zamax, 1, 1);
tree_izamax_kernel_batched<<<grid, threads, 0, queue>>>(length, x_array, incx, step, lda, ipiv_array, info_array, gbstep, data_pool_array, id_pool_array);
if ( num_blocks > 1)
{
// second level tree reduction
dim3 grid2(1, 1, batchCount);
tree_izamax_kernel2_batched<<<grid2, threads, 0, queue>>>(num_blocks, step, ipiv_array, info_array, gbstep, data_pool_array, id_pool_array);
}
}
magma_free(data_pool);
magma_free(id_pool);
magma_free(data_pool_array);
magma_free(id_pool_array);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
IZAMAX find the index of max absolute value of elements in x and store the index in ipiv
This is an internal routine that might have many assumption.
Arguments
---------
@param[in]
length INTEGER
On entry, length specifies the size of vector x. length >= 0.
@param[in]
x_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array of dimension
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
step INTEGER
the offset of ipiv
@param[in]
lda INTEGER
The leading dimension of each array A, internal use to find the starting position of x.
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
gbstep INTEGER
the offset of info, internal use
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zgesv_aux
********************************************************************/
extern "C"
magma_int_t magma_izamax_batched(magma_int_t length,
magmaDoubleComplex **x_array, magma_int_t incx,
magma_int_t step, magma_int_t lda,
magma_int_t** ipiv_array, magma_int_t *info_array,
magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
if (length == 0 ) return 0;
dim3 grid(1, 1, batchCount);
dim3 threads(zamax, 1, 1);
#if 1
int chunk = magma_ceildiv( length, zamax );
izamax_kernel_batched<<< grid, threads, zamax * (sizeof(double) + sizeof(int)), queue >>>
(length, chunk, x_array, incx, step, lda, ipiv_array, info_array, gbstep);
#else
// the magma_izamax_lg_batched is faster but when cuda launch it as 2 kernels the white space time between these 2 kernels and the next kernel is larger than using the izamax_kernel for that today we are using only izamax_kernel
if ( length <= 10 * zamax )
{
int chunk = magma_ceildiv( length, zamax );
izamax_kernel_batched<<< grid, threads, zamax * (sizeof(double) + sizeof(magma_int_t)), queue >>>
(length, chunk, x_array, incx, step, lda, ipiv_array, info_array, gbstep);
}
else
{
magma_izamax_lg_batched(length, x_array, incx, step, lda, ipiv_array, info_array, gbstep, batchCount);
}
#endif
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void zswap_kernel_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t** ipiv_array)
{
magmaDoubleComplex *x = x_array[blockIdx.z];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
__shared__ int jp;
if (threadIdx.x == 0)
{
jp = ipiv[step] - 1;
//if (blockIdx.z == 1) printf("jp=%d", jp);
}
__syncthreads();
if (jp == step) return; // no pivot
int id = threadIdx.x;
if (id < n) {
magmaDoubleComplex tmp = x[jp + incx*id];
x[jp + incx*id] = x[step + incx*id];
x[step + incx*id] = tmp;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
zswap two row in x. index (ipiv[step]-1)-th and index step -th
This is an internal routine that might have many assumption.
Arguments
---------
@param[in]
n INTEGER
On entry, n specifies the size of vector x. n >= 0.
@param[in]
x_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array of dimension
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
step INTEGER
The starting address of matrix C in A. LDDA >= max(1,M).
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zgesv_aux
********************************************************************/
extern "C"
magma_int_t magma_zswap_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx,
magma_int_t step, magma_int_t** ipiv_array,
magma_int_t batchCount, magma_queue_t queue)
{
/*
zswap two row: (ipiv[step]-1)th and step th
*/
if ( n > MAX_NTHREADS)
{
printf("magma_zswap_batched nb=%d, > %d, not supported\n", int(n), int(MAX_NTHREADS) );
return -15;
}
dim3 grid(1,1, batchCount);
dim3 threads(zamax, 1, 1);
zswap_kernel_batched<<< grid, threads, 0, queue >>>(n, x_array, incx, step, ipiv_array);
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void zscal_zgeru_kernel_batched(int m, int n, int step, magmaDoubleComplex **dA_array, int lda, magma_int_t *info_array, int gbstep)
{
// checkinfo to avoid computation of the singular matrix
if (info_array[blockIdx.z] != 0 ) return;
magmaDoubleComplex *A_start = dA_array[blockIdx.z];
magmaDoubleComplex *A = &(A_start[step + step * lda]);
magmaDoubleComplex *shared_y = shared_data;
int tx = threadIdx.x;
int gbidx = blockIdx.x*MAX_NTHREADS + threadIdx.x;
if (tx < n) {
shared_y[tx] = A[lda * tx];
}
__syncthreads();
if (shared_y[0] == MAGMA_Z_ZERO) {
info_array[blockIdx.z] = step + gbstep + 1;
return;
}
if (gbidx < m && gbidx > 0) {
magmaDoubleComplex reg = MAGMA_Z_ZERO;
reg = A[gbidx];
reg *= MAGMA_Z_DIV(MAGMA_Z_ONE, shared_y[0]);
A[gbidx] = reg;
#pragma unroll
for (int i=1; i < n; i++) {
//A[gbidx + i*lda] = A[gbidx + i*lda] - shared_y[i] * reg; //cuda give wrong results with this one
//A[gbidx + i*lda] -= shared_y[i] * reg; //cuda give wrong results with this one
A[gbidx + i*lda] += (MAGMA_Z_NEG_ONE) * shared_y[i] * reg;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
magma_int_t magma_zscal_zgeru_batched(magma_int_t m, magma_int_t n, magma_int_t step,
magmaDoubleComplex **dA_array, magma_int_t lda,
magma_int_t *info_array, magma_int_t gbstep,
magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized kernel which merged zscal and zgeru the two kernels
1) zscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a zgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
if ( n == 0) return 0;
if ( n > MAX_NTHREADS)
{
printf("magma_zscal_zgeru_batched nb=%d, > %d, not supported \n", int(n), int(MAX_NTHREADS) );
return -15;
}
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
size_t shared_size = sizeof(magmaDoubleComplex)*(n);
dim3 grid(nchunk, 1, batchCount);
dim3 threads(min(m, MAX_NTHREADS), 1, 1);
zscal_zgeru_kernel_batched<<< grid, threads, shared_size, queue>>>(m, n, step, dA_array, lda, info_array, gbstep);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void zgetf2trsm_kernel_batched(int ib, int n, magmaDoubleComplex **dA_array, int step, int lda)
{
/*
this kernel does the safe nonblocked TRSM operation
B = A^-1 * B
*/
magmaDoubleComplex *A_start = dA_array[blockIdx.z];
magmaDoubleComplex *A = &(A_start[step + step * lda]);
magmaDoubleComplex *B = &(A_start[step + (step+ib) * lda]);
magmaDoubleComplex *shared_a = shared_data;
magmaDoubleComplex *shared_b = shared_data+ib*ib;
int tid = threadIdx.x;
int i,d;
// Read A and B at the same time to the shared memory (shared_a shared_b)
// note that shared_b = shared_a+ib*ib so its contiguous
// I can make it in one loop reading
if ( tid < ib) {
#pragma unroll
for (i=0; i < n+ib; i++) {
shared_a[tid + i*ib] = A[tid + i*lda];
}
}
__syncthreads();
if (tid < n) {
#pragma unroll
for (d=0; d < ib-1; d++) {
for (i=d+1; i < ib; i++) {
shared_b[i+tid*ib] += (MAGMA_Z_NEG_ONE) * shared_a[i+d*ib] * shared_b[d+tid*ib];
}
}
}
__syncthreads();
// write back B
if ( tid < ib) {
#pragma unroll
for (i=0; i < n; i++) {
B[tid + i*lda] = shared_b[tid + i*ib];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
zgetf2trsm solves one of the matrix equations on gpu
B = C^-1 * B
where C, B are part of the matrix A in dA_array,
This version load C, B into shared memory and solve it
and copy back to GPU device memory.
This is an internal routine that might have many assumption.
Arguments
---------
@param[in]
ib INTEGER
The number of rows/columns of each matrix C, and rows of B. ib >= 0.
@param[in]
n INTEGER
The number of columns of each matrix B. n >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[in]
step INTEGER
The starting address of matrix C in A. LDDA >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zgesv_aux
********************************************************************/
extern "C" void
magma_zgetf2trsm_batched(magma_int_t ib, magma_int_t n, magmaDoubleComplex **dA_array,
magma_int_t step, magma_int_t ldda,
magma_int_t batchCount, magma_queue_t queue)
{
/*
*/
if ( n == 0 || ib == 0 ) return;
size_t shared_size = sizeof(magmaDoubleComplex)*(ib*(ib+n));
// TODO TODO TODO
if ( shared_size > (MAX_SHARED_ALLOWED*1024) ) // limit the shared memory to 46K leaving 2K for extra
{
printf("kernel_zgetf2trsm error out of shared memory \n");
return;
}
dim3 grid(1, 1, batchCount);
dim3 threads(max(n,ib), 1, 1);
zgetf2trsm_kernel_batched<<< grid, threads, shared_size, queue>>>(ib, n, dA_array, step, ldda);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
zupdate_device(int m, int step, magmaDoubleComplex* x, int ldx, magmaDoubleComplex *A, int lda)
{
int tid = threadIdx.x;
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
int indx;
//magmaDoubleComplex reg = MAGMA_Z_ZERO;
// update the current column by all the previous one
#pragma unroll
for (int i=0; i < step; i++) {
for (int s=0; s < nchunk; s++)
{
indx = tid + s * MAX_NTHREADS;
if ( indx > i && indx < m ) {
A[indx] -= A[i] * x[indx + i*ldx];
//printf(" @ step %d tid %d updating x[tid]*y[i]=A %5.3f %5.3f = %5.3f at i %d \n", step, tid, x[tid + i*ldx], A[i], A[tid],i);
}
}
__syncthreads();
}
//printf(" @ step %d tid %d adding %5.3f to A %5.3f make it %5.3f\n",step,tid,-reg,A[tid],A[tid]-reg);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
zscal5_device(int m, magmaDoubleComplex* x, magmaDoubleComplex alpha)
{
int tid = threadIdx.x;
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
for (int s=0; s < nchunk; s++)
{
if ( (tid + s * MAX_NTHREADS) < m ) {
#if 0
x[tid + s * MAX_NTHREADS] *= MAGMA_Z_DIV(MAGMA_Z_ONE, alpha);
#else
x[tid + s * MAX_NTHREADS] = x[tid + s * MAX_NTHREADS]/alpha;
#endif
}
}
__syncthreads();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
zcomputecolumn_kernel_shared_batched(int m, int paneloffset, int step, magmaDoubleComplex **dA_array, int lda, magma_int_t **ipiv_array, magma_int_t *info_array, int gbstep)
{
int gboff = paneloffset+step;
magma_int_t *ipiv = ipiv_array[blockIdx.z];
magmaDoubleComplex *A_start = dA_array[blockIdx.z];
magmaDoubleComplex *A0j = &(A_start[paneloffset + (paneloffset+step) * lda]);
magmaDoubleComplex *A00 = &(A_start[paneloffset + paneloffset * lda]);
magmaDoubleComplex *shared_A = shared_data;
__shared__ double shared_x[zamax];
__shared__ int shared_idx[zamax];
__shared__ magmaDoubleComplex alpha;
int tid = threadIdx.x;
// checkinfo to avoid computation of the singular matrix
if (info_array[blockIdx.z] != 0 ) return;
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
// read the current column from dev to shared memory
for (int s=0; s < nchunk; s++)
{
if ( (tid + s * MAX_NTHREADS) < m ) shared_A[tid + s * MAX_NTHREADS] = A0j[tid + s * MAX_NTHREADS];
}
__syncthreads();
// update this column
if ( step > 0 ) {
zupdate_device( m, step, A00, lda, shared_A, 1);
__syncthreads();
}
// if ( tid < (m-step) ) // DO NO TPUT THE IF CONDITION HERE SINCE izamax_devfunc HAS __syncthreads INSIDE.
// So let all htreads call this routine it will handle correctly based on the size
// note that izamax need only 128 threads, s
izamax_devfunc(m-step, shared_A+step, 1, shared_x, shared_idx);
if (tid == 0) {
ipiv[gboff] = shared_idx[0] + gboff + 1; // Fortran Indexing
alpha = shared_A[shared_idx[0]+step];
//printf("@ step %d ipiv=%d where gboff=%d shared_idx %d alpha %5.3f \n",step,ipiv[gboff],gboff,shared_idx[0],alpha);
if (shared_x[0] == MAGMA_D_ZERO) {
info_array[blockIdx.z] = shared_idx[0] + gboff + gbstep + 1;
}
}
__syncthreads();
if (shared_x[0] == MAGMA_D_ZERO) return;
__syncthreads();
// DO NO PUT THE IF CONDITION HERE SINCE izamax_devfunc HAS __syncthreads INSIDE.
zscal5_device( m-step, shared_A+step, alpha);
// put back the pivot that has been scaled with itself menaing =1
if (tid == 0) shared_A[shared_idx[0] + step] = alpha;
__syncthreads();
// write back from shared to dev memory
for (int s=0; s < nchunk; s++)
{
if ( (tid + s * MAX_NTHREADS) < m )
{
A0j[tid + s * MAX_NTHREADS] = shared_A[tid + s * MAX_NTHREADS];
//printf("@ step %d tid %d updating A=x*alpha after A= %5.3f\n",step,tid,shared_A[tid]);
}
}
__syncthreads();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
magma_int_t magma_zcomputecolumn_batched(magma_int_t m, magma_int_t paneloffset, magma_int_t step,
magmaDoubleComplex **dA_array, magma_int_t lda,
magma_int_t **ipiv_array,
magma_int_t *info_array, magma_int_t gbstep,
magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized kernel which merged zscal and zgeru the two kernels
1) zscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a zgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
if ( m == 0) return 0;
size_t all_shmem_size = zamax*(sizeof(double)+sizeof(int)) + (m+2)*sizeof(magmaDoubleComplex);
if ( all_shmem_size > (MAX_SHARED_ALLOWED*1024) ) // limit the shared memory to 44K leaving 4K for extra
{
printf("magma_zcomputecolumn_batched error out of shared memory \n");
return -20;
}
size_t shared_size = sizeof(magmaDoubleComplex)*m;
dim3 grid(1, 1, batchCount);
dim3 threads(min(m, MAX_NTHREADS), 1, 1);
zcomputecolumn_kernel_shared_batched<<< grid, threads, shared_size, queue>>>(m, paneloffset, step, dA_array, lda, ipiv_array, info_array, gbstep);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
kernel_zgetf2_sm_batched(
magma_int_t m, magma_int_t ib,
magmaDoubleComplex **dA_array, magma_int_t lda,
magma_int_t **ipiv_array,
magma_int_t *info_array)
{
magma_int_t *ipiv = ipiv_array[blockIdx.z];
int tx = threadIdx.x;
magmaDoubleComplex *shared_A = shared_data;
magmaDoubleComplex *A = dA_array[blockIdx.z];
double *shared_x = (double*)(shared_A + m * ib);
int *shared_idx = (int*)(shared_x + zamax);
magmaDoubleComplex res;
int length;
__shared__ int jp;
// load data to shared memory
if (tx < m)
{
#pragma unroll 8
for (int i=0; i < ib; i++)
{
shared_A[tx + i * m] = A[tx + i * lda];
//printf("shared_A=%f ", shared_A[tx + i * m]);
}
}
__syncthreads();
for (int j=0; j < ib; j++)
{
length = m - j;
int offset = j + j*m;
//======================================
//find max
if (tx < zamax)
{
if ( tx < length)
{
res = shared_A[tx + offset];
// printf("res=%f\n", res);
shared_x[tx] = fabs(MAGMA_Z_REAL(res)) + fabs(MAGMA_Z_IMAG(res));
shared_idx[tx] = tx;
}
else
{
shared_x[tx] = 0.0;
shared_idx[tx] = 0;
}
}
__syncthreads();
if (length >= zamax) // there are more than 128 threads working ==> all shared_x shared_idx are initialized here so I can call the fixed getidmax
magma_getidmax<zamax>(tx, shared_x, shared_idx);
else
magma_getidmax_n(min(zamax,length), tx, shared_x, shared_idx);
if (tx == 0)
{
jp = shared_idx[0];
if (shared_A[jp + offset] == 0.0) printf("error, A(jp,j) == 0.0\n");
ipiv[j] = j + (jp + 1); // Fortran Indexing
//if (blockIdx.x == 1) printf("jp=%d ", jp + j + 1);
}
__syncthreads();
//======================================
if ( jp != 0) //swap
{
if (tx < ib) {
//printf("A[jp]= %f, A[j]=%f, jp=%d\n", shared_A[jp + j + tx*m], shared_A[j + tx*m], jp);
magmaDoubleComplex tmp = shared_A[jp + j + tx*m];
shared_A[jp + j + tx*m] = shared_A[j + tx*m];
shared_A[j + tx*m] = tmp;
}
}
__syncthreads();
//======================================
// Ger
if (tx < length && tx > 0)
{
res = shared_A[tx + offset];
res *= MAGMA_Z_DIV(MAGMA_Z_ONE, shared_A[0 + offset]); // scaling
shared_A[tx + offset] = res;
#pragma unroll 8
for (int i=1; i < ib-j; i++)
{
shared_A[tx + i*m + offset] += (MAGMA_Z_NEG_ONE) * shared_A[i*m + offset] * res;
//printf("res= %f, shared_A=%f\n", res, shared_A[i*m + offset]);
}
}
__syncthreads();
} // end of j
//======================================
// write back
if (tx < m)
{
#pragma unroll 8
for (int i=0; i < ib; i++)
{
A[tx + i * lda] = shared_A[tx + i * m];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
ZGETF2_SM computes an LU factorization of a general M-by-N matrix A
using partial pivoting with row interchanges.
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 3 BLAS version of the algorithm.
This is a batched version that factors batchCount M-by-N matrices in parallel.
dA, ipiv, and info become arrays with one entry per matrix.
This version load entire matrix (m*ib) into shared memory and factorize it
with pivoting and copy back to GPU device memory.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix A. M >= 0.
@param[in]
ib INTEGER
The number of columns of each matrix A. ib >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zgesv_aux
********************************************************************/
extern "C"
magma_int_t magma_zgetf2_sm_batched(
magma_int_t m, magma_int_t ib,
magmaDoubleComplex **dA_array, magma_int_t ldda,
magma_int_t **ipiv_array,
magma_int_t *info_array,
magma_int_t batchCount, magma_queue_t queue)
{
/*
load entire matrix (m*ib) into shared memory and factorize it with pivoting and copy back.
*/
size_t shared_size = sizeof(magmaDoubleComplex) * m * ib + (zamax) * (sizeof(double) + sizeof(int)) + sizeof(int);
if (shared_size > 47000)
{
printf("Shared memory in zgetf2 = %d, exceeds 48K, kernel can not lauched succesfully\n", int(shared_size) );
return 1;
}
dim3 grid(1,1, batchCount);
dim3 threads(max(max(zamax, m), ib), 1, 1);
kernel_zgetf2_sm_batched<<<grid, threads, shared_size, queue>>>
( m, ib, dA_array, ldda, ipiv_array, info_array);
return 0;
}
|
12e44900865ee66ac12b36bc804066148953bde4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 06.02.2019
// @author [email protected]
//
#include <PointersManager.h>
#include <exceptions/cuda_exception.h>
#include <StringUtils.h>
#include <logger.h>
#include <memory/Workspace.h>
namespace nd4j {
//////////////////////////////////////////////////////////////////////////
PointersManager::PointersManager(const nd4j::LaunchContext* context, const std::string& funcName) {
_context = const_cast<nd4j::LaunchContext*>(context);
_funcName = funcName;
}
//////////////////////////////////////////////////////////////////////////
void* PointersManager::replicatePointer(const void* src, const size_t numberOfBytes) {
void* dst = nullptr;
if (_context->getWorkspace() == nullptr) {
hipError_t cudaResult = hipMalloc(reinterpret_cast<void **>(&dst), numberOfBytes);
if (cudaResult != 0)
throw cuda_exception::build(_funcName + ": cannot allocate global memory on device!", cudaResult);
} else {
dst = _context->getWorkspace()->allocateBytes(nd4j::memory::MemoryType::DEVICE, numberOfBytes);
}
if (_context != nullptr)
hipMemcpyAsync(dst, src, numberOfBytes, hipMemcpyHostToDevice, *_context->getCudaStream());
else
hipMemcpy(dst, src, numberOfBytes, hipMemcpyHostToDevice);
_pOnGlobMem.emplace_back(dst);
return dst;
}
//////////////////////////////////////////////////////////////////////////
void PointersManager::synchronize() const {
if (_context != nullptr) {
hipError_t cudaResult = hipStreamSynchronize(*_context->getCudaStream());
if (cudaResult != 0)
throw cuda_exception::build(_funcName + ": cuda stream synchronization failed !", cudaResult);
} else {
nd4j_printf("<%s> syncStream isn't possible: no stream set!", _funcName.c_str());
}
}
//////////////////////////////////////////////////////////////////////////
PointersManager::~PointersManager() {
for (auto& p :_pOnGlobMem)
hipFree(p);
}
////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void printDevContentOnDev_(const void* pDev, const Nd4jLong len, const int tid) {
PointersManager::printDevContentOnDev<T>(pDev, len, tid);
}
////////////////////////////////////////////////////////////////////////
template<typename T>
void PointersManager::printDevContentOnDevFromHost(const void* pDev, const Nd4jLong len, const int tid) {
hipLaunchKernelGGL(( printDevContentOnDev_<T>), dim3(512), dim3(512), 1024, *nd4j::LaunchContext ::defaultContext()->getCudaStream(), pDev, len, tid);
auto res = hipStreamSynchronize(*nd4j::LaunchContext ::defaultContext()->getCudaStream());
if (res != 0)
throw std::runtime_error("PointersManager::printDevContentOnDevFromHost: hipStreamSynchronize failed!");
}
template void PointersManager::printDevContentOnDevFromHost<Nd4jLong>(const void* pDev, const Nd4jLong len, const int tid);
template void PointersManager::printDevContentOnDevFromHost<int>(const void* pDev, const Nd4jLong len, const int tid);
template void PointersManager::printDevContentOnDevFromHost<float>(const void* pDev, const Nd4jLong len, const int tid);
template void PointersManager::printDevContentOnDevFromHost<double>(const void* pDev, const Nd4jLong len, const int tid);
//BUILD_SINGLE_TEMPLATE(template void PointersManager::printDevContentOnDevFromHost, (void* pDev, Nd4jLong len, int tid), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
template<typename T>
void PointersManager::printDevContentOnHost(const void* pDev, const Nd4jLong len) const {
printf("host print out\n");
void* pHost = operator new(sizeof(T) * len);
hipMemcpyAsync(pHost, pDev, sizeof(T) * len, hipMemcpyDeviceToHost, *_context->getCudaStream());
hipError_t cudaResult = hipStreamSynchronize(*_context->getCudaStream());
if(cudaResult != 0)
throw std::runtime_error("PointersManager::printCudaHost: hipStreamSynchronize failed!");
for(Nd4jLong i = 0; i < len; ++i)
printf("%f, ", (double)reinterpret_cast<T*>(pHost)[i]);
printf("\n");
operator delete(pHost);
}
template void PointersManager::printDevContentOnHost<Nd4jLong>(const void* pDev, const Nd4jLong len) const;
template void PointersManager::printDevContentOnHost<int>(const void* pDev, const Nd4jLong len) const;
template void PointersManager::printDevContentOnHost<float>(const void* pDev, const Nd4jLong len) const;
template void PointersManager::printDevContentOnHost<double>(const void* pDev, const Nd4jLong len) const;
}
| 12e44900865ee66ac12b36bc804066148953bde4.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 06.02.2019
// @author [email protected]
//
#include <PointersManager.h>
#include <exceptions/cuda_exception.h>
#include <StringUtils.h>
#include <logger.h>
#include <memory/Workspace.h>
namespace nd4j {
//////////////////////////////////////////////////////////////////////////
PointersManager::PointersManager(const nd4j::LaunchContext* context, const std::string& funcName) {
_context = const_cast<nd4j::LaunchContext*>(context);
_funcName = funcName;
}
//////////////////////////////////////////////////////////////////////////
void* PointersManager::replicatePointer(const void* src, const size_t numberOfBytes) {
void* dst = nullptr;
if (_context->getWorkspace() == nullptr) {
cudaError_t cudaResult = cudaMalloc(reinterpret_cast<void **>(&dst), numberOfBytes);
if (cudaResult != 0)
throw cuda_exception::build(_funcName + ": cannot allocate global memory on device!", cudaResult);
} else {
dst = _context->getWorkspace()->allocateBytes(nd4j::memory::MemoryType::DEVICE, numberOfBytes);
}
if (_context != nullptr)
cudaMemcpyAsync(dst, src, numberOfBytes, cudaMemcpyHostToDevice, *_context->getCudaStream());
else
cudaMemcpy(dst, src, numberOfBytes, cudaMemcpyHostToDevice);
_pOnGlobMem.emplace_back(dst);
return dst;
}
//////////////////////////////////////////////////////////////////////////
void PointersManager::synchronize() const {
if (_context != nullptr) {
cudaError_t cudaResult = cudaStreamSynchronize(*_context->getCudaStream());
if (cudaResult != 0)
throw cuda_exception::build(_funcName + ": cuda stream synchronization failed !", cudaResult);
} else {
nd4j_printf("<%s> syncStream isn't possible: no stream set!", _funcName.c_str());
}
}
//////////////////////////////////////////////////////////////////////////
PointersManager::~PointersManager() {
for (auto& p :_pOnGlobMem)
cudaFree(p);
}
////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void printDevContentOnDev_(const void* pDev, const Nd4jLong len, const int tid) {
PointersManager::printDevContentOnDev<T>(pDev, len, tid);
}
////////////////////////////////////////////////////////////////////////
template<typename T>
void PointersManager::printDevContentOnDevFromHost(const void* pDev, const Nd4jLong len, const int tid) {
printDevContentOnDev_<T><<<512, 512, 1024, *nd4j::LaunchContext ::defaultContext()->getCudaStream()>>>(pDev, len, tid);
auto res = cudaStreamSynchronize(*nd4j::LaunchContext ::defaultContext()->getCudaStream());
if (res != 0)
throw std::runtime_error("PointersManager::printDevContentOnDevFromHost: cudaStreamSynchronize failed!");
}
template void PointersManager::printDevContentOnDevFromHost<Nd4jLong>(const void* pDev, const Nd4jLong len, const int tid);
template void PointersManager::printDevContentOnDevFromHost<int>(const void* pDev, const Nd4jLong len, const int tid);
template void PointersManager::printDevContentOnDevFromHost<float>(const void* pDev, const Nd4jLong len, const int tid);
template void PointersManager::printDevContentOnDevFromHost<double>(const void* pDev, const Nd4jLong len, const int tid);
//BUILD_SINGLE_TEMPLATE(template void PointersManager::printDevContentOnDevFromHost, (void* pDev, Nd4jLong len, int tid), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
template<typename T>
void PointersManager::printDevContentOnHost(const void* pDev, const Nd4jLong len) const {
printf("host print out\n");
void* pHost = operator new(sizeof(T) * len);
cudaMemcpyAsync(pHost, pDev, sizeof(T) * len, cudaMemcpyDeviceToHost, *_context->getCudaStream());
cudaError_t cudaResult = cudaStreamSynchronize(*_context->getCudaStream());
if(cudaResult != 0)
throw std::runtime_error("PointersManager::printCudaHost: cudaStreamSynchronize failed!");
for(Nd4jLong i = 0; i < len; ++i)
printf("%f, ", (double)reinterpret_cast<T*>(pHost)[i]);
printf("\n");
operator delete(pHost);
}
template void PointersManager::printDevContentOnHost<Nd4jLong>(const void* pDev, const Nd4jLong len) const;
template void PointersManager::printDevContentOnHost<int>(const void* pDev, const Nd4jLong len) const;
template void PointersManager::printDevContentOnHost<float>(const void* pDev, const Nd4jLong len) const;
template void PointersManager::printDevContentOnHost<double>(const void* pDev, const Nd4jLong len) const;
}
|
e7284a0ae05775ff7580b8730cb1ebdcaeda7aa7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <exception>
#include <sstream>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/for_each.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "nvstrings/NVStrings.h"
#include "./NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../regex/regex.cuh"
#include "../unicode/is_flags.h"
#include "../util.h"
// Like the other regex functors, this one has two modes: size/count calculation
// and then the operation itself (findall). This minimizes the inlining of
// the regex code while not causing divergence. Makes the code a bit messy
// but build times are reduced by half since only one regex find() is inlined.
template<size_t stack_size>
struct findall_record_fn
{
dreprog* prog;
custring_view_array d_strings;
int* d_counts;
int* d_sizes;
bool bcompute_size_only{true};
char** d_buffers;
custring_view_array* d_rows;
__device__ void operator()(unsigned int idx)
{
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
u_char data1[stack_size], data2[stack_size];
prog->set_stack_mem(data1,data2);
if( !bcompute_size_only && (d_counts[idx] < 1) )
return;
char* buffer = nullptr;
custring_view_array drow = nullptr;
if( !bcompute_size_only )
{
buffer = (char*)d_buffers[idx];
drow = d_rows[idx];
}
int nbytes = 0, nchars = (int)dstr->chars_count();
int spos = 0, rows_idx = 0, find_count = 0;
while( spos <= nchars )
{
int epos = nchars;
if( prog->find(idx,dstr,spos,epos) <=0 )
break;
if( bcompute_size_only )
{
unsigned int bytes = (dstr->byte_offset_for(epos)-dstr->byte_offset_for(spos));
unsigned int size = custring_view::alloc_size(bytes,(epos-spos));
nbytes += ALIGN_SIZE(size);
++find_count;
}
else
{
custring_view* str = dstr->substr((unsigned)spos,(unsigned)(epos-spos),1,buffer);
drow[rows_idx++] = str;
buffer += ALIGN_SIZE(str->alloc_size());
}
spos = epos > spos ? epos : spos + 1;
}
if( bcompute_size_only )
{
d_sizes[idx] = nbytes;
d_counts[idx] = find_count;
}
}
};
// for each string, return substring(s) which match specified pattern
int NVStrings::findall_record( const char* pattern, std::vector<NVStrings*>& results )
{
if( pattern==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags());
delete ptn32;
// allocate regex working memory if necessary
int regex_insts = prog->inst_counts();
if( regex_insts > MAX_STACK_INSTS )
{
if( !prog->alloc_relists(count) )
{
std::ostringstream message;
message << "nvstrings::findall_record: number of instructions (" << prog->inst_counts() << ") ";
message << "and number of strings (" << count << ") ";
message << "exceeds available memory";
dreprog::destroy(prog);
throw std::invalid_argument(message.str());
}
}
// compute counts of each match and size of the buffers
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<int> sizes(count,0);
int* d_sizes = sizes.data().get();
rmm::device_vector<int> counts(count,0);
int* d_counts = counts.data().get();
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10) )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_record_fn<RX_STACK_SMALL>{prog, d_strings, d_counts, d_sizes});
else if( regex_insts <= 100 )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_record_fn<RX_STACK_MEDIUM>{prog, d_strings, d_counts, d_sizes});
else
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_record_fn<RX_STACK_LARGE>{prog, d_strings, d_counts, d_sizes});
hipDeviceSynchronize();
//
// create rows of buffers
thrust::host_vector<int> hcounts(counts); // copies counts from device
thrust::host_vector<custring_view_array> hrows(count,nullptr);
thrust::host_vector<char*> hbuffers(count,nullptr);
for( unsigned int idx=0; idx < count; ++idx )
{
int rcount = hcounts[idx];
NVStrings* row = new NVStrings(rcount);
results.push_back(row);
if( rcount==0 )
continue;
hrows[idx] = row->pImpl->getStringsPtr();
int size = sizes[idx];
char* d_buffer = device_alloc<char>(size,0);
row->pImpl->setMemoryBuffer(d_buffer,size);
hbuffers[idx] = d_buffer;
}
// copy substrings into buffers
rmm::device_vector<custring_view_array> rows(hrows); // copies hrows to device
custring_view_array* d_rows = rows.data().get();
rmm::device_vector<char*> buffers(hbuffers); // copies hbuffers to device
char** d_buffers = buffers.data().get();
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10) )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_record_fn<RX_STACK_SMALL>{prog, d_strings, d_counts, d_sizes, false, d_buffers, d_rows});
else if( regex_insts <= 100 )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_record_fn<RX_STACK_MEDIUM>{prog, d_strings, d_counts, d_sizes, false, d_buffers, d_rows});
else
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_record_fn<RX_STACK_LARGE>{prog, d_strings, d_counts, d_sizes, false, d_buffers, d_rows});
//
printCudaError(hipDeviceSynchronize(),"nvs-findall_record");
dreprog::destroy(prog);
return (int)results.size();
}
| e7284a0ae05775ff7580b8730cb1ebdcaeda7aa7.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <exception>
#include <sstream>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/for_each.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "nvstrings/NVStrings.h"
#include "./NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../regex/regex.cuh"
#include "../unicode/is_flags.h"
#include "../util.h"
// Like the other regex functors, this one has two modes: size/count calculation
// and then the operation itself (findall). This minimizes the inlining of
// the regex code while not causing divergence. Makes the code a bit messy
// but build times are reduced by half since only one regex find() is inlined.
template<size_t stack_size>
struct findall_record_fn
{
dreprog* prog;
custring_view_array d_strings;
int* d_counts;
int* d_sizes;
bool bcompute_size_only{true};
char** d_buffers;
custring_view_array* d_rows;
__device__ void operator()(unsigned int idx)
{
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
u_char data1[stack_size], data2[stack_size];
prog->set_stack_mem(data1,data2);
if( !bcompute_size_only && (d_counts[idx] < 1) )
return;
char* buffer = nullptr;
custring_view_array drow = nullptr;
if( !bcompute_size_only )
{
buffer = (char*)d_buffers[idx];
drow = d_rows[idx];
}
int nbytes = 0, nchars = (int)dstr->chars_count();
int spos = 0, rows_idx = 0, find_count = 0;
while( spos <= nchars )
{
int epos = nchars;
if( prog->find(idx,dstr,spos,epos) <=0 )
break;
if( bcompute_size_only )
{
unsigned int bytes = (dstr->byte_offset_for(epos)-dstr->byte_offset_for(spos));
unsigned int size = custring_view::alloc_size(bytes,(epos-spos));
nbytes += ALIGN_SIZE(size);
++find_count;
}
else
{
custring_view* str = dstr->substr((unsigned)spos,(unsigned)(epos-spos),1,buffer);
drow[rows_idx++] = str;
buffer += ALIGN_SIZE(str->alloc_size());
}
spos = epos > spos ? epos : spos + 1;
}
if( bcompute_size_only )
{
d_sizes[idx] = nbytes;
d_counts[idx] = find_count;
}
}
};
// for each string, return substring(s) which match specified pattern
int NVStrings::findall_record( const char* pattern, std::vector<NVStrings*>& results )
{
if( pattern==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags());
delete ptn32;
// allocate regex working memory if necessary
int regex_insts = prog->inst_counts();
if( regex_insts > MAX_STACK_INSTS )
{
if( !prog->alloc_relists(count) )
{
std::ostringstream message;
message << "nvstrings::findall_record: number of instructions (" << prog->inst_counts() << ") ";
message << "and number of strings (" << count << ") ";
message << "exceeds available memory";
dreprog::destroy(prog);
throw std::invalid_argument(message.str());
}
}
// compute counts of each match and size of the buffers
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<int> sizes(count,0);
int* d_sizes = sizes.data().get();
rmm::device_vector<int> counts(count,0);
int* d_counts = counts.data().get();
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10) )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_record_fn<RX_STACK_SMALL>{prog, d_strings, d_counts, d_sizes});
else if( regex_insts <= 100 )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_record_fn<RX_STACK_MEDIUM>{prog, d_strings, d_counts, d_sizes});
else
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_record_fn<RX_STACK_LARGE>{prog, d_strings, d_counts, d_sizes});
cudaDeviceSynchronize();
//
// create rows of buffers
thrust::host_vector<int> hcounts(counts); // copies counts from device
thrust::host_vector<custring_view_array> hrows(count,nullptr);
thrust::host_vector<char*> hbuffers(count,nullptr);
for( unsigned int idx=0; idx < count; ++idx )
{
int rcount = hcounts[idx];
NVStrings* row = new NVStrings(rcount);
results.push_back(row);
if( rcount==0 )
continue;
hrows[idx] = row->pImpl->getStringsPtr();
int size = sizes[idx];
char* d_buffer = device_alloc<char>(size,0);
row->pImpl->setMemoryBuffer(d_buffer,size);
hbuffers[idx] = d_buffer;
}
// copy substrings into buffers
rmm::device_vector<custring_view_array> rows(hrows); // copies hrows to device
custring_view_array* d_rows = rows.data().get();
rmm::device_vector<char*> buffers(hbuffers); // copies hbuffers to device
char** d_buffers = buffers.data().get();
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10) )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_record_fn<RX_STACK_SMALL>{prog, d_strings, d_counts, d_sizes, false, d_buffers, d_rows});
else if( regex_insts <= 100 )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_record_fn<RX_STACK_MEDIUM>{prog, d_strings, d_counts, d_sizes, false, d_buffers, d_rows});
else
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_record_fn<RX_STACK_LARGE>{prog, d_strings, d_counts, d_sizes, false, d_buffers, d_rows});
//
printCudaError(cudaDeviceSynchronize(),"nvs-findall_record");
dreprog::destroy(prog);
return (int)results.size();
}
|
922d47899850e2cf72a5ef67fc1d061ac1b52e32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// printf.cu
/*
* Simple script to show how to print on a GPU.
* NOTE: I have no idea why, but this simply does not work unless
* you initialize an array on the GPU, then free it afterwards.
* Hence the hipMalloc() and hipFree() calls that do nothing.
*
* My assumption is that it has something to do with initializing
* an "active" state on the device. hipDeviceSynchronize(),
* hipDeviceReset(), and hipSetDevice() do not suffice, however.
*/
#include <stdio.h>
__global__ void hello() {
printf("Hello from Block %d, Thread %d\n", blockIdx.x, threadIdx.x);
}
int main() {
float *d_arr;
hipMalloc(&d_arr, 25*sizeof(float));
hipLaunchKernelGGL(( hello), dim3(5),dim3(5), 0, 0, );
hipFree(d_arr);
return 0;
}
| 922d47899850e2cf72a5ef67fc1d061ac1b52e32.cu | // printf.cu
/*
* Simple script to show how to print on a GPU.
* NOTE: I have no idea why, but this simply does not work unless
* you initialize an array on the GPU, then free it afterwards.
* Hence the cudaMalloc() and cudaFree() calls that do nothing.
*
* My assumption is that it has something to do with initializing
* an "active" state on the device. cudaDeviceSynchronize(),
* cudaDeviceReset(), and cudaSetDevice() do not suffice, however.
*/
#include <stdio.h>
__global__ void hello() {
printf("Hello from Block %d, Thread %d\n", blockIdx.x, threadIdx.x);
}
int main() {
float *d_arr;
cudaMalloc(&d_arr, 25*sizeof(float));
hello<<<5,5>>>();
cudaFree(d_arr);
return 0;
}
|
8b88ff9f1a2220644fff6819c4a27df7842d9e57.hip | // !!! This is a file automatically generated by hipify!!!
// ########################################################################
// Practical Course: GPU Programming in Computer Vision
// Technical University of Munich, Computer Vision Group
// ########################################################################
#include <iostream>
#include <string>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "helper.cuh"
#include "gradient.cuh"
#include "divergence.cuh"
#include "diffusion.cuh"
int main(int argc,char **argv)
{
// parse command line parameters
const char *params = {
"{i|image| |input image}"
"{w|bw|false|load input image as grayscale/black-white}"
"{n|iter|100|iterations}"
"{e|epsilon|0.01|epsilon}"
"{m|mode|0|diffusivity mode}"
"{d|dt|0.001|dt}"
};
cv::CommandLineParser cmd(argc, argv, params);
// input image
std::string inputImage = cmd.get<std::string>("image");
// load the input image as grayscale
bool gray = cmd.get<bool>("bw");
size_t iter = (size_t)cmd.get<int>("iter");
std::cout << "iterations: " << iter << std::endl;
float epsilon = cmd.get<float>("epsilon");
std::cout << "epsilon: " << epsilon << std::endl;
int mode = cmd.get<int>("mode");
std::cout << "mode: " << mode << std::endl;
float dt = cmd.get<float>("dt");
if (dt == 0.0f)
dt = 0.225f/funcDiffusivity(0, epsilon, mode);
std::cout << "dt: " << dt << std::endl;
// init camera
bool useCam = inputImage.empty();
cv::VideoCapture camera;
if (useCam && !openCamera(camera, 0))
{
std::cerr << "ERROR: Could not open camera" << std::endl;
return 1;
}
// read input frame
cv::Mat mIn;
if (useCam)
{
// read in first frame to get the dimensions
camera >> mIn;
}
else
{
// load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
mIn = cv::imread(inputImage.c_str(), (gray ? CV_LOAD_IMAGE_GRAYSCALE : -1));
}
// check
if (mIn.empty())
{
std::cerr << "ERROR: Could not retrieve frame " << inputImage << std::endl;
return 1;
}
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn, CV_32F);
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
int n = nc*h*w;
std::cout << "Image: " << w << " x " << h << std::endl;
// initialize CUDA context
hipDeviceSynchronize(); CUDA_CHECK;
// ### Set the output image format
cv::Mat mOut(h,w,mIn.type()); // grayscale or color depending on input image, nc layers
cv::Mat mDiv(h,w,mIn.type());
cv::Mat mV1(h,w,mIn.type());
cv::Mat mV2(h,w,mIn.type());
// ### Allocate arrays
// allocate raw input image array
float *imgIn = new float[n];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[n];
float *div = new float[n];
float *v1 = new float[n];
float *v2 = new float[n];
// allocate arrays on GPU
float *d_imgIn = NULL;
float *d_v1 = NULL;
float *d_v2 = NULL;
float *d_div = NULL;
// TODO alloc cuda memory for device arrays
hipMalloc(&d_imgIn, n* sizeof(float)); CUDA_CHECK;
hipMalloc(&d_v1, n* sizeof(float)); CUDA_CHECK;
hipMalloc(&d_v2, n* sizeof(float)); CUDA_CHECK;
hipMalloc(&d_div, n* sizeof(float)); CUDA_CHECK;
do
{
// convert range of each channel to [0,1]
mIn /= 255.0f;
// init raw input image array (and convert to layered)
convertMatToLayered (imgIn, mIn);
// upload to GPU
// TODO copy from imgIn to d_imgIn
hipMemcpy(d_imgIn, imgIn, n * sizeof(float), hipMemcpyHostToDevice); CUDA_CHECK;
Timer timer;
timer.start();
for(size_t i = 0; i < iter; ++i)
{
// TODO (9.1) compute gradient of d_imgIn using computeGradientCuda() in gradient.cu
computeGradientCuda(d_v1, d_v2, d_imgIn, w, h, nc);
hipDeviceSynchronize();
// TODO (9.3) implement multDiffusivityCuda() in diffusion.cu
multDiffusivityCuda(d_v1, d_v2, w, h, nc, epsilon, mode);
hipDeviceSynchronize();
// TODO (9.4) compute divergence of d_v1, d_v2 using computeDivergenceCuda() in divergence.cu
computeDivergenceCuda(d_div, d_v1, d_v2, w, h, nc);
hipDeviceSynchronize();
// TODO (9.5) implement updateDiffusivityCuda() in diffusion.cu
updateDiffusivityCuda(d_imgIn, d_div, w, h, nc, dt);
hipDeviceSynchronize();
}
timer.end();
float t = timer.get();
std::cout << "time: " << t*1000 << " ms" << std::endl;
// download from GPU
// TODO download from device arrays to host arrays
hipMemcpy(v1, d_v1, n* sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK;
hipMemcpy(v2, d_v2, n* sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK;
hipMemcpy(div, d_div, n* sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK;
hipMemcpy(imgOut, d_imgIn, n* sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK;
// show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// show output image: first convert to interleaved opencv format from the layered raw array
convertLayeredToMat(mV1, v1);
showImage("V1", mV1, 100+w+80, 100);
convertLayeredToMat(mV2, v2);
showImage("V2", mV2, 100+w+100, 100);
convertLayeredToMat(mDiv, div);
showImage("Div", mDiv, 100+w+60, 100);
convertLayeredToMat(mOut, imgOut);
showImage("Output", mOut, 100+w+40, 100);
if (useCam)
{
// wait 30ms for key input
if (cv::waitKey(30) >= 0)
{
mIn.release();
}
else
{
// retrieve next frame from camera
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn, CV_32F);
}
}
}
while (useCam && !mIn.empty());
if (!useCam)
{
cv::waitKey(0);
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
}
// ### Free allocated arrays
// TODO free cuda memory of all device arrays
hipFree(d_imgIn);
hipFree(d_v1);
hipFree(d_v2);
hipFree(d_div);
// TODO free memory of all host arrays
delete[] imgIn;
delete[] imgOut;
delete[] div;
delete[] v1;
delete[] v2;
// close all opencv windows
cv::destroyAllWindows();
return 0;
}
| 8b88ff9f1a2220644fff6819c4a27df7842d9e57.cu | // ########################################################################
// Practical Course: GPU Programming in Computer Vision
// Technical University of Munich, Computer Vision Group
// ########################################################################
#include <iostream>
#include <string>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "helper.cuh"
#include "gradient.cuh"
#include "divergence.cuh"
#include "diffusion.cuh"
int main(int argc,char **argv)
{
// parse command line parameters
const char *params = {
"{i|image| |input image}"
"{w|bw|false|load input image as grayscale/black-white}"
"{n|iter|100|iterations}"
"{e|epsilon|0.01|epsilon}"
"{m|mode|0|diffusivity mode}"
"{d|dt|0.001|dt}"
};
cv::CommandLineParser cmd(argc, argv, params);
// input image
std::string inputImage = cmd.get<std::string>("image");
// load the input image as grayscale
bool gray = cmd.get<bool>("bw");
size_t iter = (size_t)cmd.get<int>("iter");
std::cout << "iterations: " << iter << std::endl;
float epsilon = cmd.get<float>("epsilon");
std::cout << "epsilon: " << epsilon << std::endl;
int mode = cmd.get<int>("mode");
std::cout << "mode: " << mode << std::endl;
float dt = cmd.get<float>("dt");
if (dt == 0.0f)
dt = 0.225f/funcDiffusivity(0, epsilon, mode);
std::cout << "dt: " << dt << std::endl;
// init camera
bool useCam = inputImage.empty();
cv::VideoCapture camera;
if (useCam && !openCamera(camera, 0))
{
std::cerr << "ERROR: Could not open camera" << std::endl;
return 1;
}
// read input frame
cv::Mat mIn;
if (useCam)
{
// read in first frame to get the dimensions
camera >> mIn;
}
else
{
// load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
mIn = cv::imread(inputImage.c_str(), (gray ? CV_LOAD_IMAGE_GRAYSCALE : -1));
}
// check
if (mIn.empty())
{
std::cerr << "ERROR: Could not retrieve frame " << inputImage << std::endl;
return 1;
}
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn, CV_32F);
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
int n = nc*h*w;
std::cout << "Image: " << w << " x " << h << std::endl;
// initialize CUDA context
cudaDeviceSynchronize(); CUDA_CHECK;
// ### Set the output image format
cv::Mat mOut(h,w,mIn.type()); // grayscale or color depending on input image, nc layers
cv::Mat mDiv(h,w,mIn.type());
cv::Mat mV1(h,w,mIn.type());
cv::Mat mV2(h,w,mIn.type());
// ### Allocate arrays
// allocate raw input image array
float *imgIn = new float[n];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[n];
float *div = new float[n];
float *v1 = new float[n];
float *v2 = new float[n];
// allocate arrays on GPU
float *d_imgIn = NULL;
float *d_v1 = NULL;
float *d_v2 = NULL;
float *d_div = NULL;
// TODO alloc cuda memory for device arrays
cudaMalloc(&d_imgIn, n* sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_v1, n* sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_v2, n* sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_div, n* sizeof(float)); CUDA_CHECK;
do
{
// convert range of each channel to [0,1]
mIn /= 255.0f;
// init raw input image array (and convert to layered)
convertMatToLayered (imgIn, mIn);
// upload to GPU
// TODO copy from imgIn to d_imgIn
cudaMemcpy(d_imgIn, imgIn, n * sizeof(float), cudaMemcpyHostToDevice); CUDA_CHECK;
Timer timer;
timer.start();
for(size_t i = 0; i < iter; ++i)
{
// TODO (9.1) compute gradient of d_imgIn using computeGradientCuda() in gradient.cu
computeGradientCuda(d_v1, d_v2, d_imgIn, w, h, nc);
cudaDeviceSynchronize();
// TODO (9.3) implement multDiffusivityCuda() in diffusion.cu
multDiffusivityCuda(d_v1, d_v2, w, h, nc, epsilon, mode);
cudaDeviceSynchronize();
// TODO (9.4) compute divergence of d_v1, d_v2 using computeDivergenceCuda() in divergence.cu
computeDivergenceCuda(d_div, d_v1, d_v2, w, h, nc);
cudaDeviceSynchronize();
// TODO (9.5) implement updateDiffusivityCuda() in diffusion.cu
updateDiffusivityCuda(d_imgIn, d_div, w, h, nc, dt);
cudaDeviceSynchronize();
}
timer.end();
float t = timer.get();
std::cout << "time: " << t*1000 << " ms" << std::endl;
// download from GPU
// TODO download from device arrays to host arrays
cudaMemcpy(v1, d_v1, n* sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaMemcpy(v2, d_v2, n* sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaMemcpy(div, d_div, n* sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaMemcpy(imgOut, d_imgIn, n* sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;
// show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// show output image: first convert to interleaved opencv format from the layered raw array
convertLayeredToMat(mV1, v1);
showImage("V1", mV1, 100+w+80, 100);
convertLayeredToMat(mV2, v2);
showImage("V2", mV2, 100+w+100, 100);
convertLayeredToMat(mDiv, div);
showImage("Div", mDiv, 100+w+60, 100);
convertLayeredToMat(mOut, imgOut);
showImage("Output", mOut, 100+w+40, 100);
if (useCam)
{
// wait 30ms for key input
if (cv::waitKey(30) >= 0)
{
mIn.release();
}
else
{
// retrieve next frame from camera
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn, CV_32F);
}
}
}
while (useCam && !mIn.empty());
if (!useCam)
{
cv::waitKey(0);
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
}
// ### Free allocated arrays
// TODO free cuda memory of all device arrays
cudaFree(d_imgIn);
cudaFree(d_v1);
cudaFree(d_v2);
cudaFree(d_div);
// TODO free memory of all host arrays
delete[] imgIn;
delete[] imgOut;
delete[] div;
delete[] v1;
delete[] v2;
// close all opencv windows
cv::destroyAllWindows();
return 0;
}
|
89594314fb4dd69c55e864bc26f50e51a819be82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdint.h>
#include <include/cuda_runtime.h>
#include "backend/kernel_compiler/gpu/cuda_impl/select_impl.cuh"
template <typename T>
__global__ void Select(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
output[pos] = cond[pos] ? input_x[pos] : input_y[pos];
}
return;
}
template <typename T>
void CalSelect(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( Select), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, cond, input_x, input_y, output);
return;
}
template void CalSelect<double>(const size_t size, const bool* cond, const double* input_X, const double* input_y,
double* output, hipStream_t cuda_stream);
template void CalSelect<float>(const size_t size, const bool* cond, const float* input_X, const float* input_y,
float* output, hipStream_t cuda_stream);
template void CalSelect<int>(const size_t size, const bool* cond, const int* input_X, const int* input_y, int* output,
hipStream_t cuda_stream);
template void CalSelect<half>(const size_t size, const bool* cond, const half* input_X, const half* input_y,
half* output, hipStream_t cuda_stream);
template void CalSelect<int64_t>(const size_t size, const bool* cond, const int64_t* input_X, const int64_t* input_y,
int64_t* output, hipStream_t cuda_stream);
template void CalSelect<bool>(const size_t size, const bool *cond, const bool *input_X, const bool *input_y,
bool *output, hipStream_t cuda_stream);
| 89594314fb4dd69c55e864bc26f50e51a819be82.cu | /**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdint.h>
#include <include/cuda_runtime.h>
#include "backend/kernel_compiler/gpu/cuda_impl/select_impl.cuh"
template <typename T>
__global__ void Select(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
output[pos] = cond[pos] ? input_x[pos] : input_y[pos];
}
return;
}
template <typename T>
void CalSelect(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output,
cudaStream_t cuda_stream) {
Select<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, cond, input_x, input_y, output);
return;
}
template void CalSelect<double>(const size_t size, const bool* cond, const double* input_X, const double* input_y,
double* output, cudaStream_t cuda_stream);
template void CalSelect<float>(const size_t size, const bool* cond, const float* input_X, const float* input_y,
float* output, cudaStream_t cuda_stream);
template void CalSelect<int>(const size_t size, const bool* cond, const int* input_X, const int* input_y, int* output,
cudaStream_t cuda_stream);
template void CalSelect<half>(const size_t size, const bool* cond, const half* input_X, const half* input_y,
half* output, cudaStream_t cuda_stream);
template void CalSelect<int64_t>(const size_t size, const bool* cond, const int64_t* input_X, const int64_t* input_y,
int64_t* output, cudaStream_t cuda_stream);
template void CalSelect<bool>(const size_t size, const bool *cond, const bool *input_X, const bool *input_y,
bool *output, cudaStream_t cuda_stream);
|
3d71e61e5d212e7b6a82b2569fc5541a40e8d565.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
const int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int idx = p.y * numCols + p.x;
if(p.x >= numCols || p.y >= numRows)
return;
greyImage[idx] = .299f *rgbaImage[idx].x + .587f * rgbaImage[idx].y + .114f * rgbaImage[idx].z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(32, 32, 1); //TODO
const dim3 gridSize( numCols/32+1, numRows/32+1, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 3d71e61e5d212e7b6a82b2569fc5541a40e8d565.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
const int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int idx = p.y * numCols + p.x;
if(p.x >= numCols || p.y >= numRows)
return;
greyImage[idx] = .299f *rgbaImage[idx].x + .587f * rgbaImage[idx].y + .114f * rgbaImage[idx].z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(32, 32, 1); //TODO
const dim3 gridSize( numCols/32+1, numRows/32+1, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
a732aeccb97eb7ed7f9f5a5ba179aebdb1f4fd8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "cuda_utils.h"
#include "distance/distance.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace Distance {
template <typename DataType>
__global__ void naiveDistanceAdjKernel(bool *dist, const DataType *x, const DataType *y,
int m, int n, int k, DataType eps) {
int midx = threadIdx.x + blockIdx.x * blockDim.x;
int nidx = threadIdx.y + blockIdx.y * blockDim.y;
if (midx >= m || nidx >= n)
return;
DataType acc = DataType(0);
for (int i = 0; i < k; ++i) {
auto diff = x[i + midx * k] - y[i + nidx * k];
acc += diff * diff;
}
dist[midx * n + nidx] = acc <= eps;
}
template <typename DataType>
void naiveDistanceAdj(bool *dist, const DataType *x, const DataType *y, int m, int n,
int k, DataType eps) {
static const dim3 TPB(16, 32, 1);
dim3 nblks(ceildiv(m, (int)TPB.x), ceildiv(n, (int)TPB.y), 1);
hipLaunchKernelGGL(( naiveDistanceAdjKernel<DataType>), dim3(nblks), dim3(TPB), 0, 0, dist, x, y, m, n, k, eps);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename DataType>
struct DistanceAdjInputs {
DataType eps;
int m, n, k;
unsigned long long int seed;
};
template <typename DataType>
::std::ostream &operator<<(::std::ostream &os, const DistanceAdjInputs<DataType> &dims) {
return os;
}
template <typename DataType>
class DistanceAdjTest : public ::testing::TestWithParam<DistanceAdjInputs<DataType>> {
public:
void SetUp() override {
params = ::testing::TestWithParam<DistanceAdjInputs<DataType>>::GetParam();
Random::Rng r(params.seed);
int m = params.m;
int n = params.n;
int k = params.k;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
allocate(x, m * k);
allocate(y, n * k);
allocate(dist_ref, m * n);
allocate(dist, m * n);
r.uniform(x, m * k, DataType(-1.0), DataType(1.0), stream);
r.uniform(y, n * k, DataType(-1.0), DataType(1.0), stream);
DataType threshold = params.eps;
naiveDistanceAdj(dist_ref, x, y, m, n, k, threshold);
char *workspace = nullptr;
size_t worksize = getWorkspaceSize<EucExpandedL2, DataType, DataType, bool>(x, y, m, n, k);
if (worksize != 0) {
allocate(workspace, worksize);
}
typedef cutlass::Shape<8, 128, 128> OutputTile_t;
auto fin_op = [threshold] __device__(DataType d_val, int g_d_idx) {
return d_val <= threshold;
};
distance<EucExpandedL2, DataType, DataType, bool, OutputTile_t>(
x, y, dist, m, n, k, workspace, worksize, fin_op, stream);
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(workspace));
}
void TearDown() override {
CUDA_CHECK(hipFree(x));
CUDA_CHECK(hipFree(y));
CUDA_CHECK(hipFree(dist_ref));
CUDA_CHECK(hipFree(dist));
}
protected:
DistanceAdjInputs<DataType> params;
DataType *x, *y;
bool *dist_ref, *dist;
};
const std::vector<DistanceAdjInputs<float>> inputsf = {
{0.01f, 1024, 1024, 32, 1234ULL},
{0.1f, 1024, 1024, 32, 1234ULL},
{1.0f, 1024, 1024, 32, 1234ULL},
{10.0f, 1024, 1024, 32, 1234ULL}};
typedef DistanceAdjTest<float> DistanceAdjTestF;
TEST_P(DistanceAdjTestF, Result) {
ASSERT_TRUE(devArrMatch(dist_ref, dist, params.m, params.n, Compare<bool>()));
}
INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestF,
::testing::ValuesIn(inputsf));
const std::vector<DistanceAdjInputs<double>> inputsd = {
{0.01, 1024, 1024, 32, 1234ULL},
{0.1, 1024, 1024, 32, 1234ULL},
{1.0, 1024, 1024, 32, 1234ULL},
{10.0, 1024, 1024, 32, 1234ULL}};
typedef DistanceAdjTest<double> DistanceAdjTestD;
TEST_P(DistanceAdjTestD, Result) {
ASSERT_TRUE(devArrMatch(dist_ref, dist, params.m, params.n, Compare<bool>()));
}
INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestD,
::testing::ValuesIn(inputsd));
} // end namespace DistanceAdj
} // end namespace MLCommon
| a732aeccb97eb7ed7f9f5a5ba179aebdb1f4fd8d.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "cuda_utils.h"
#include "distance/distance.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace Distance {
template <typename DataType>
__global__ void naiveDistanceAdjKernel(bool *dist, const DataType *x, const DataType *y,
int m, int n, int k, DataType eps) {
int midx = threadIdx.x + blockIdx.x * blockDim.x;
int nidx = threadIdx.y + blockIdx.y * blockDim.y;
if (midx >= m || nidx >= n)
return;
DataType acc = DataType(0);
for (int i = 0; i < k; ++i) {
auto diff = x[i + midx * k] - y[i + nidx * k];
acc += diff * diff;
}
dist[midx * n + nidx] = acc <= eps;
}
template <typename DataType>
void naiveDistanceAdj(bool *dist, const DataType *x, const DataType *y, int m, int n,
int k, DataType eps) {
static const dim3 TPB(16, 32, 1);
dim3 nblks(ceildiv(m, (int)TPB.x), ceildiv(n, (int)TPB.y), 1);
naiveDistanceAdjKernel<DataType><<<nblks, TPB>>>(dist, x, y, m, n, k, eps);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename DataType>
struct DistanceAdjInputs {
DataType eps;
int m, n, k;
unsigned long long int seed;
};
template <typename DataType>
::std::ostream &operator<<(::std::ostream &os, const DistanceAdjInputs<DataType> &dims) {
return os;
}
template <typename DataType>
class DistanceAdjTest : public ::testing::TestWithParam<DistanceAdjInputs<DataType>> {
public:
void SetUp() override {
params = ::testing::TestWithParam<DistanceAdjInputs<DataType>>::GetParam();
Random::Rng r(params.seed);
int m = params.m;
int n = params.n;
int k = params.k;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(x, m * k);
allocate(y, n * k);
allocate(dist_ref, m * n);
allocate(dist, m * n);
r.uniform(x, m * k, DataType(-1.0), DataType(1.0), stream);
r.uniform(y, n * k, DataType(-1.0), DataType(1.0), stream);
DataType threshold = params.eps;
naiveDistanceAdj(dist_ref, x, y, m, n, k, threshold);
char *workspace = nullptr;
size_t worksize = getWorkspaceSize<EucExpandedL2, DataType, DataType, bool>(x, y, m, n, k);
if (worksize != 0) {
allocate(workspace, worksize);
}
typedef cutlass::Shape<8, 128, 128> OutputTile_t;
auto fin_op = [threshold] __device__(DataType d_val, int g_d_idx) {
return d_val <= threshold;
};
distance<EucExpandedL2, DataType, DataType, bool, OutputTile_t>(
x, y, dist, m, n, k, workspace, worksize, fin_op, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(workspace));
}
void TearDown() override {
CUDA_CHECK(cudaFree(x));
CUDA_CHECK(cudaFree(y));
CUDA_CHECK(cudaFree(dist_ref));
CUDA_CHECK(cudaFree(dist));
}
protected:
DistanceAdjInputs<DataType> params;
DataType *x, *y;
bool *dist_ref, *dist;
};
const std::vector<DistanceAdjInputs<float>> inputsf = {
{0.01f, 1024, 1024, 32, 1234ULL},
{0.1f, 1024, 1024, 32, 1234ULL},
{1.0f, 1024, 1024, 32, 1234ULL},
{10.0f, 1024, 1024, 32, 1234ULL}};
typedef DistanceAdjTest<float> DistanceAdjTestF;
TEST_P(DistanceAdjTestF, Result) {
ASSERT_TRUE(devArrMatch(dist_ref, dist, params.m, params.n, Compare<bool>()));
}
INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestF,
::testing::ValuesIn(inputsf));
const std::vector<DistanceAdjInputs<double>> inputsd = {
{0.01, 1024, 1024, 32, 1234ULL},
{0.1, 1024, 1024, 32, 1234ULL},
{1.0, 1024, 1024, 32, 1234ULL},
{10.0, 1024, 1024, 32, 1234ULL}};
typedef DistanceAdjTest<double> DistanceAdjTestD;
TEST_P(DistanceAdjTestD, Result) {
ASSERT_TRUE(devArrMatch(dist_ref, dist, params.m, params.n, Compare<bool>()));
}
INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestD,
::testing::ValuesIn(inputsd));
} // end namespace DistanceAdj
} // end namespace MLCommon
|
f49e5c8f5fec0d8b59172b7bb735e3ee5bb8320d.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
template<typename scalar_t>
struct BitwiseAndFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a & b;
}
};
template<>
struct BitwiseAndFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a && b;
}
};
void bitwise_and_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_and_cuda", [&]() {
BitwiseAndFunctor<scalar_t> f;
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(iter, f);
});
}
template<typename scalar_t>
struct BitwiseOrFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a | b;
}
};
template<>
struct BitwiseOrFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a || b;
}
};
void bitwise_or_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_or_cuda", [&]() {
BitwiseOrFunctor<scalar_t> f;
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(iter, f);
});
}
template<typename scalar_t>
struct BitwiseXorFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a ^ b;
}
};
template<>
struct BitwiseXorFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a != b;
}
};
void bitwise_xor_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_xor_cuda", [&]() {
BitwiseXorFunctor<scalar_t> f;
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(iter, f);
});
}
REGISTER_DISPATCH(bitwise_and_stub, &bitwise_and_kernel_cuda);
REGISTER_DISPATCH(bitwise_or_stub, &bitwise_or_kernel_cuda);
REGISTER_DISPATCH(bitwise_xor_stub, &bitwise_xor_kernel_cuda);
} // namespace at::native
| f49e5c8f5fec0d8b59172b7bb735e3ee5bb8320d.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
template<typename scalar_t>
struct BitwiseAndFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a & b;
}
};
template<>
struct BitwiseAndFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a && b;
}
};
void bitwise_and_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_and_cuda", [&]() {
BitwiseAndFunctor<scalar_t> f;
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(iter, f);
});
}
template<typename scalar_t>
struct BitwiseOrFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a | b;
}
};
template<>
struct BitwiseOrFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a || b;
}
};
void bitwise_or_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_or_cuda", [&]() {
BitwiseOrFunctor<scalar_t> f;
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(iter, f);
});
}
template<typename scalar_t>
struct BitwiseXorFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a ^ b;
}
};
template<>
struct BitwiseXorFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a != b;
}
};
void bitwise_xor_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_xor_cuda", [&]() {
BitwiseXorFunctor<scalar_t> f;
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(iter, f);
});
}
REGISTER_DISPATCH(bitwise_and_stub, &bitwise_and_kernel_cuda);
REGISTER_DISPATCH(bitwise_or_stub, &bitwise_or_kernel_cuda);
REGISTER_DISPATCH(bitwise_xor_stub, &bitwise_xor_kernel_cuda);
} // namespace at::native
|
df74dc58afa194f4a0ddaa1b2f8bb2700c736a27.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#define TILE_WIDTH 100 // for shared kernel
#define DEBUG 0
typedef double Type;
void transpose_CPU(Type* in_, Type* out, int dim1, int dim2){
for(int n = 0; n<dim1*dim2; n++) {
int i = n/dim1;
int j = n%dim1;
out[n] = in_[dim2*j + i];
}
}
//transpose per element
__global__ void transpose_GPU(Type* in_, Type* out, int dim1, int dim2){
int tile_size = blockDim.x ;
int column = tile_size * blockIdx.x + threadIdx.x;
int row = tile_size * blockIdx.y + threadIdx.y;
if(column < dim2 && row < dim1){
out[column*dim2 + row] = in_[column + row*dim2];
}
}
// SHARED MEM APROACH - use shared memory
__global__ void transpose_GPU_shared(Type* in_, Type* out, int dim1, int dim2){
// fill data into shared memory
__shared__ Type M_Shared[TILE_WIDTH][TILE_WIDTH];
int tile_size =TILE_WIDTH;
int column = tile_size * blockIdx.x + threadIdx.x;
int row = tile_size * blockIdx.y + threadIdx.y;
int index_in = row*dim2 + column;
int index_out = column*dim2 + row;
if(row < dim1 && column < dim2 && index_in < dim1*dim2){
M_Shared[threadIdx.y][threadIdx.x] = in_[index_in];
}
__syncthreads(); // wait all other threads to go further.
if(row < dim1 && column < dim2 && index_out < dim1*dim2){
out[index_out] = M_Shared[threadIdx.y][threadIdx.x];
}
}
void print_matrix(Type * mat, int dim1, int dim2){
for (int i = 0; i < dim1; i++) {
for (int j = 0; j < dim2; j++) {
Type num = mat[i + dim2 * j];
printf(" %.2f ",num);
}
printf("\n");
}
}
void initial_matirx(Type * mat, int dim1, int dim2){
for (int i = 0; i < dim1*dim2; ++i) {
mat[i] = i;
}
}
int main(int argc , char* argv[]){
clock_t ser_msec;
int dim1 = atoi(argv[1]);
int dim2 = atoi(argv[2]);
printf("Matrix data type : double\n ");
printf("dimentions = (%d , %d) ,Tile width = %d",dim1,dim2,TILE_WIDTH);
int size = dim1*dim2*sizeof(Type);
Type *in_ = (Type*)malloc(size);
Type *out = (Type*)malloc(size);
Type *ref = (Type*)malloc(size);
// init matrix
initial_matirx(in_,dim1,dim2);
Type *d_in , *d_out;
hipMalloc(&d_in, size);
hipMalloc(&d_out, size);
hipMemcpyAsync(d_in, in_, size, hipMemcpyHostToDevice);
hipMalloc((Type**)&d_out,size);
hipMemset(d_out,0,size);
// init kernel
int threadNumX = TILE_WIDTH;
int threadNumY = TILE_WIDTH;
int blockNumX = dim1 / TILE_WIDTH + (dim1 % TILE_WIDTH == 0 ? 0 : 1 );
int blockNumY = dim2 / TILE_WIDTH + (dim2 % TILE_WIDTH == 0 ? 0 : 1 );
dim3 blockSize(threadNumX,threadNumY);
dim3 gridSize(blockNumX, blockNumY);
hipEvent_t start, stop,start_shared, stop_shared;
float elapsedTime, elapsedTime2;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventCreate(&start_shared);
hipEventCreate(&stop_shared);
if(DEBUG && dim1<50 && dim2<50 ){
printf("\n*********matrix**********\n");
print_matrix(in_, dim1, dim2);
}
ser_msec = clock();
transpose_CPU(in_, ref, dim1, dim2);
ser_msec = (double)(clock() - ser_msec) * 1000000 /CLOCKS_PER_SEC;
if(DEBUG && dim1<50 && dim2<50 ){
printf("\n*********serial**********\n");
print_matrix(ref, dim2, dim1);
}
hipEventRecord(start, 0);
hipLaunchKernelGGL(( transpose_GPU), dim3(gridSize),dim3(blockSize), 0, 0, d_in,d_out,dim1,dim2);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
hipMemcpy(out,d_out,size,hipMemcpyDeviceToHost);
hipEventRecord(start_shared, 0);
hipLaunchKernelGGL(( transpose_GPU_shared), dim3(gridSize),dim3(blockSize), 0, 0, d_in,d_out,dim1,dim2);
hipEventRecord(stop_shared, 0);
hipEventSynchronize(stop_shared);
hipEventElapsedTime(&elapsedTime2, start_shared, stop_shared);
hipMemcpy(out,d_out,size,hipMemcpyDeviceToHost);
if(DEBUG && dim1<50 && dim2<50 ){
printf("\n*********parallel**********\n");
print_matrix(out, dim2, dim1);
}
printf ("\nTime for the serial: \t %ld ms", ser_msec);
printf ("\nTime for the NAIVE: \t %f ms and speedup: %f", elapsedTime, ser_msec/elapsedTime);
printf ("\nTime for the shared: \t %f ms and speedup: %f \n\n", elapsedTime2, ser_msec/elapsedTime2);
// cleanup
hipFree(d_in);
hipFree(d_out);
free(in_);
free(out);
free(ref);
return 0;
} | df74dc58afa194f4a0ddaa1b2f8bb2700c736a27.cu | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#define TILE_WIDTH 100 // for shared kernel
#define DEBUG 0
typedef double Type;
void transpose_CPU(Type* in_, Type* out, int dim1, int dim2){
for(int n = 0; n<dim1*dim2; n++) {
int i = n/dim1;
int j = n%dim1;
out[n] = in_[dim2*j + i];
}
}
//transpose per element
__global__ void transpose_GPU(Type* in_, Type* out, int dim1, int dim2){
int tile_size = blockDim.x ;
int column = tile_size * blockIdx.x + threadIdx.x;
int row = tile_size * blockIdx.y + threadIdx.y;
if(column < dim2 && row < dim1){
out[column*dim2 + row] = in_[column + row*dim2];
}
}
// SHARED MEM APROACH - use shared memory
__global__ void transpose_GPU_shared(Type* in_, Type* out, int dim1, int dim2){
// fill data into shared memory
__shared__ Type M_Shared[TILE_WIDTH][TILE_WIDTH];
int tile_size =TILE_WIDTH;
int column = tile_size * blockIdx.x + threadIdx.x;
int row = tile_size * blockIdx.y + threadIdx.y;
int index_in = row*dim2 + column;
int index_out = column*dim2 + row;
if(row < dim1 && column < dim2 && index_in < dim1*dim2){
M_Shared[threadIdx.y][threadIdx.x] = in_[index_in];
}
__syncthreads(); // wait all other threads to go further.
if(row < dim1 && column < dim2 && index_out < dim1*dim2){
out[index_out] = M_Shared[threadIdx.y][threadIdx.x];
}
}
void print_matrix(Type * mat, int dim1, int dim2){
for (int i = 0; i < dim1; i++) {
for (int j = 0; j < dim2; j++) {
Type num = mat[i + dim2 * j];
printf(" %.2f ",num);
}
printf("\n");
}
}
void initial_matirx(Type * mat, int dim1, int dim2){
for (int i = 0; i < dim1*dim2; ++i) {
mat[i] = i;
}
}
int main(int argc , char* argv[]){
clock_t ser_msec;
int dim1 = atoi(argv[1]);
int dim2 = atoi(argv[2]);
printf("Matrix data type : double\n ");
printf("dimentions = (%d , %d) ,Tile width = %d",dim1,dim2,TILE_WIDTH);
int size = dim1*dim2*sizeof(Type);
Type *in_ = (Type*)malloc(size);
Type *out = (Type*)malloc(size);
Type *ref = (Type*)malloc(size);
// init matrix
initial_matirx(in_,dim1,dim2);
Type *d_in , *d_out;
cudaMalloc(&d_in, size);
cudaMalloc(&d_out, size);
cudaMemcpyAsync(d_in, in_, size, cudaMemcpyHostToDevice);
cudaMalloc((Type**)&d_out,size);
cudaMemset(d_out,0,size);
// init kernel
int threadNumX = TILE_WIDTH;
int threadNumY = TILE_WIDTH;
int blockNumX = dim1 / TILE_WIDTH + (dim1 % TILE_WIDTH == 0 ? 0 : 1 );
int blockNumY = dim2 / TILE_WIDTH + (dim2 % TILE_WIDTH == 0 ? 0 : 1 );
dim3 blockSize(threadNumX,threadNumY);
dim3 gridSize(blockNumX, blockNumY);
cudaEvent_t start, stop,start_shared, stop_shared;
float elapsedTime, elapsedTime2;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&start_shared);
cudaEventCreate(&stop_shared);
if(DEBUG && dim1<50 && dim2<50 ){
printf("\n*********matrix**********\n");
print_matrix(in_, dim1, dim2);
}
ser_msec = clock();
transpose_CPU(in_, ref, dim1, dim2);
ser_msec = (double)(clock() - ser_msec) * 1000000 /CLOCKS_PER_SEC;
if(DEBUG && dim1<50 && dim2<50 ){
printf("\n*********serial**********\n");
print_matrix(ref, dim2, dim1);
}
cudaEventRecord(start, 0);
transpose_GPU<<<gridSize,blockSize>>>(d_in,d_out,dim1,dim2);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(out,d_out,size,cudaMemcpyDeviceToHost);
cudaEventRecord(start_shared, 0);
transpose_GPU_shared<<<gridSize,blockSize>>>(d_in,d_out,dim1,dim2);
cudaEventRecord(stop_shared, 0);
cudaEventSynchronize(stop_shared);
cudaEventElapsedTime(&elapsedTime2, start_shared, stop_shared);
cudaMemcpy(out,d_out,size,cudaMemcpyDeviceToHost);
if(DEBUG && dim1<50 && dim2<50 ){
printf("\n*********parallel**********\n");
print_matrix(out, dim2, dim1);
}
printf ("\nTime for the serial: \t %ld ms", ser_msec);
printf ("\nTime for the NAIVE: \t %f ms and speedup: %f", elapsedTime, ser_msec/elapsedTime);
printf ("\nTime for the shared: \t %f ms and speedup: %f \n\n", elapsedTime2, ser_msec/elapsedTime2);
// cleanup
cudaFree(d_in);
cudaFree(d_out);
free(in_);
free(out);
free(ref);
return 0;
} |
60287eb8b7fda1a9f7d50b486abe53df868d7507.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory (LLNL).
// LLNL-CODE-742473. All rights reserved.
//
// This file is part of HiOp. For details, see https://github.com/LLNL/hiop. HiOp
// is released under the BSD 3-clause license (https://opensource.org/licenses/BSD-3-Clause).
// Please also read "Additional BSD Notice" below.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// i. Redistributions of source code must retain the above copyright notice, this list
// of conditions and the disclaimer below.
// ii. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the disclaimer (as noted below) in the documentation and/or
// other materials provided with the distribution.
// iii. Neither the name of the LLNS/LLNL nor the names of its contributors may be used to
// endorse or promote products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Additional BSD Notice
// 1. This notice is required to be provided under our contract with the U.S. Department
// of Energy (DOE). This work was produced at Lawrence Livermore National Laboratory under
// Contract No. DE-AC52-07NA27344 with the DOE.
// 2. Neither the United States Government nor Lawrence Livermore National Security, LLC
// nor any of their employees, makes any warranty, express or implied, or assumes any
// liability or responsibility for the accuracy, completeness, or usefulness of any
// information, apparatus, product, or process disclosed, or represents that its use would
// not infringe privately-owned rights.
// 3. Also, reference herein to any specific commercial products, process, or services by
// trade name, trademark, manufacturer or otherwise does not necessarily constitute or
// imply its endorsement, recommendation, or favoring by the United States Government or
// Lawrence Livermore National Security, LLC. The views and opinions of authors expressed
// herein do not necessarily state or reflect those of the United States Government or
// Lawrence Livermore National Security, LLC, and shall not be used for advertising or
// product endorsement purposes.
/**
* @file VectorCudaKernels.cu
*
* @author Nai-Yuan Chiang <[email protected]>, LLNL
*
*/
#include "VectorCudaKernels.hpp"
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/fill.h>
#include <thrust/replace.h>
#include <thrust/transform_reduce.h>
#include <thrust/extrema.h>
#include <thrust/logical.h>
#include <thrust/execution_policy.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
//#include <cmath>
//#include <limits>
/// @brief compute abs(b-a)
template <typename T>
struct thrust_abs_diff: public thrust::binary_function<T,T,T>
{
__host__ __device__
T operator()(const T& a, const T& b)
{
return fabs(b - a);
}
};
/// @brief compute abs(a)
template <typename T>
struct thrust_abs: public thrust::unary_function<T,T>
{
__host__ __device__
T operator()(const T& a)
{
return fabs(a);
}
};
/// @brief return true if abs(a) < tol_
struct thrust_abs_less
{
const double tol_;
thrust_abs_less(double tol) : tol_(tol) {}
__host__ __device__
int operator()(const double& a)
{
return (fabs(a) < tol_);
}
};
/// @brief return true if a < tol_
struct thrust_less
{
const double tol_;
thrust_less(double tol) : tol_(tol) {}
__host__ __device__
int operator()(const double& a)
{
return (a < tol_);
}
};
/// @brief return true if (0.0 < a) - (a < 0.0)
template <typename T>
struct thrust_sig: public thrust::unary_function<T,T>
{
__host__ __device__
T operator()(const T& a)
{
return static_cast<double>( (0.0 < a) - (a < 0.0) );
}
};
/// @brief compute sqrt(a)
template <typename T>
struct thrust_sqrt: public thrust::unary_function<T,T>
{
__host__ __device__
T operator()(const T& a)
{
return sqrt(a);
}
};
/// @brief compute log(a) if a > 0, otherwise returns 0
template <typename T>
struct thrust_log_select: public thrust::unary_function<T,double>
{
__host__ __device__
double operator()(const T& a)
{
if(a>0){
return log(a);
}
return 0.;
}
};
/// @brief compute isinf(a)
template <typename T>
struct thrust_isinf: public thrust::unary_function<T,bool>
{
__host__ __device__
bool operator()(const T& a)
{
return isinf(a);
}
};
/// @brief compute isfinite(a)
template <typename T>
struct thrust_isfinite: public thrust::unary_function<T,bool>
{
__host__ __device__
bool operator()(const T& a)
{
return isfinite(a);
}
};
/// @brief compute a==0.0
template <typename T>
struct thrust_iszero: public thrust::unary_function<T,bool>
{
__host__ __device__
bool operator()(const T& a)
{
return a== (T) (0.0);
}
};
/// @brief compute isnan(a)
template <typename T>
struct thrust_isnan: public thrust::unary_function<T,bool>
{
__host__ __device__
bool operator()(const T& a)
{
return isnan(a);
}
};
/// @brief compute (bool) (a)
struct thrust_istrue : public thrust::unary_function<int, bool>
{
__host__ __device__
bool operator()(const int& a)
{
return a!=0;
}
};
/** @brief Set y[i] = min(y[i],c), for i=[0,n_local-1] */
__global__ void component_min_cu(int n, double* y, const double c)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
y[i] = (y[i]<c) ? y[i] : c;
}
}
/** @brief Set y[i] = min(y[i],x[i]), for i=[0,n_local-1] */
__global__ void component_min_cu(int n, double* y, const double* x)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
y[i] = (y[i]<x[i]) ? y[i] : x[i];
}
}
/** @brief Set y[i] = max(y[i],c), for i=[0,n_local-1] */
__global__ void component_max_cu(int n, double* y, const double c)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
y[i] = (y[i]>c) ? y[i] : c;
}
}
/** @brief Set y[i] = max(y[i],x[i]), for i=[0,n_local-1] */
__global__ void component_max_cu(int n, double* y, const double* x)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
y[i] = (y[i]>x[i]) ? y[i] : x[i];
}
}
/// @brief Copy from src the elements specified by the indices in id.
__global__ void copy_from_index_cu(int n, double* vec, const double* val, const int* id)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
vec[i] = val[id[i]];
}
}
/// @brief Performs axpy, y += alpha*x, on the indexes in this specified by id.
__global__ void axpy_w_map_cu(int n, double* yd, const double* xd, const int* id, double alpha)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
assert(id[i]<n);
yd[id[i]] = alpha * xd[i] + yd[id[i]];
}
}
/** @brief this[i] += alpha*x[i]*z[i] forall i */
__global__ void axzpy_cu(int n, double* yd, const double* xd, const double* zd, double alpha)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
yd[i] = alpha * xd[i] * zd[i] + yd[i];
}
}
/** @brief this[i] += alpha*x[i]/z[i] forall i */
__global__ void axdzpy_cu(int n, double* yd, const double* xd, const double* zd, double alpha)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
yd[i] = alpha * xd[i] / zd[i] + yd[i];
}
}
/** @brief this[i] += alpha*x[i]/z[i] forall i with pattern selection */
__global__ void axdzpy_w_pattern_cu(int n, double* yd, const double* xd, const double* zd, const double* id, double alpha)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(id[i] == 1.0) {
yd[i] = alpha * xd[i] / zd[i] + yd[i];
}
}
}
/** @brief y[i] += alpha*1/x[i] + y[i] forall i with pattern selection */
__global__ void adxpy_w_pattern_cu(int n, double* yd, const double* xd, const double* id, double alpha)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(id[i]==1.0) {
yd[i] = alpha / xd[i] + yd[i];
}
}
}
/** @brief elements of this that corespond to nonzeros in ix are divided by elements of v.
The rest of elements of this are set to zero.*/
__global__ void component_div_w_pattern_cu(int n, double* yd, const double* xd, const double* id)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(id[i]==1.0) {
yd[i] = yd[i] / xd[i];
} else {
yd[i] = 0.0;
}
}
}
/** @brief y[i] += c forall i */
__global__ void add_constant_cu(int n, double* yd, double c)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
yd[i] = yd[i] + c;
}
}
/** @brief y[i] += c forall i with pattern selection */
__global__ void add_constant_w_pattern_cu(int n, double* yd, double c, const double* id)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
yd[i] = yd[i] + c * id[i];
}
}
/// @brief Invert (1/x) the elements of this
__global__ void invert_cu(int n, double* yd)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
yd[i] = 1. / yd[i];
}
}
/** @brief Linear damping term */
__global__ void set_linear_damping_term_cu(int n, double* yd, const double* vd, const double* ld, const double* rd)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(ld[i]==1.0 && rd[i]==0.0) {
yd[i] = vd[i];
} else {
yd[i] = 0.0;
}
}
}
/**
* @brief Performs `this[i] = alpha*this[i] + sign*ct` where sign=1 when EXACTLY one of
* ixleft[i] and ixright[i] is 1.0 and sign=0 otherwise.
*/
__global__ void add_linear_damping_term_cu(int n, double* data, const double* ixl, const double* ixr, double alpha, double ct)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
data[i] = alpha * data[i] + ct*(ixl[i]-ixr[i]);
}
}
/** @brief y[i] = 1.0 if x[i] is positive and id[i] = 1.0, otherwise y[i] = 0 */
__global__ void is_posive_w_pattern_cu(int n, double* data, const double* vd, const double* id)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
data[i] = (id[i] == 1.0 && vd[i] > 0.0) ? 1 : 0;
}
}
/** @brief y[i] = x[i] if id[i] = 1.0, otherwise y[i] = val_else */
__global__ void set_val_w_pattern_cu(int n, double* data, const double* vd, const double* id, double val_else)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
data[i] = (id[i] == 1.0) ? vd[i] : val_else;
}
}
/** @brief data[i] = 0 if id[i]==0.0 */
__global__ void select_pattern_cu(int n, double* data, const double* id)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(id[i] == 0.0) {
data[i] = 0.0;
}
}
}
__global__ void match_pattern_cu(int n, double* data, const double* id)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(id[i] == 0.0) {
data[i] = 0.0;
}
}
}
/** @brief Project solution into bounds */
__global__ void project_into_bounds_cu(int n,
double* xd,
const double* xld,
const double* ild,
const double* xud,
const double* iud,
double kappa1,
double kappa2,
double small_real)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
double aux = 0.0;
double aux2 = 0.0;
if(ild[i] != 0.0 && iud[i] != 0.0) {
aux = kappa2*(xud[i] - xld[i]) - small_real;
aux2 = xld[i] + fmin(kappa1 * fmax(1.0, fabs(xld[i])), aux);
if(xd[i] < aux2) {
xd[i] = aux2;
} else {
aux2 = xud[i] - fmin(kappa1 * fmax(1.0, fabs(xud[i])), aux);
if(xd[i] > aux2) {
xd[i] = aux2;
}
}
#ifdef HIOP_DEEPCHECKS
assert(xd[i] > xld[i] && xd[i] < xud[i] && "this should not happen -> HiOp bug");
#endif
} else {
if(ild[i] != 0.0) {
xd[i] = fmax(xd[i], xld[i] + kappa1*fmax(1.0, fabs(xld[i])) - small_real);
}
if(iud[i] != 0.0) {
xd[i] = fmin(xd[i], xud[i] - kappa1*fmax(1.0, fabs(xud[i])) - small_real);
} else {
/*nothing for free vars */
}
}
}
}
/** @brief max{a\in(0,1]| x+ad >=(1-tau)x} */
__global__ void fraction_to_the_boundry_cu(int n, double* yd, const double* xd, const double* dd, double tau)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(dd[i]>=0) {
yd[i] = 1.0;
} else {
yd[i] = -tau*xd[i]/dd[i];
}
}
}
/** @brief max{a\in(0,1]| x+ad >=(1-tau)x} with pattern select */
__global__ void fraction_to_the_boundry_w_pattern_cu(int n,
double* yd,
const double* xd,
const double* dd,
const double* id,
double tau)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(dd[i]>=0 || id[i]==0) {
yd[i] = 1.0;
} else {
yd[i] = -tau*xd[i]/dd[i];
}
}
}
/** @brief y[i] = 0 if id[i]==0.0 && xd[i]!=0.0, otherwise y[i] = 1*/
__global__ void set_match_pattern_cu(int n, int* yd, const double* xd, const double* id)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(id[i]==0.0 && xd[i]!=0.0) {
yd[i] = 0;
} else {
yd[i] = 1;
}
}
}
/** @brief Adjusts duals. */
__global__ void adjust_duals_cu(int n, double* zd, const double* xd, const double* id, double mu, double kappa)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
double a,b;
for (int i = tid; i < n; i += num_threads) {
// preemptive loop to reduce number of iterations?
if(id[i] == 1.) {
// precompute a and b in another loop?
a = mu/xd[i];
b = a/kappa;
a = a*kappa;
// Necessary conditionals
if(zd[i]<b) {
zd[i] = b;
} else {
//zd[i]>=b
if(a<=b) {
zd[i] = b;
} else {
//a>b
if(a<zd[i]) {
zd[i] = a;
}
}
}
// - - - -
//else a>=z[i] then *z=*z (z[i] does not need adjustment)
}
}
}
/// set nonlinear type
__global__ void set_nonlinear_type_cu(const int n,
const int length,
hiop::hiopInterfaceBase::NonlinearityType* arr,
const int start,
const hiop::hiopInterfaceBase::NonlinearityType* arr_src,
const int start_src)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n && i < length; i += num_threads) {
arr[start+i] = arr_src[start_src+i];
}
}
/// set nonlinear type
__global__ void set_nonlinear_type_cu(const int n,
const int length,
hiop::hiopInterfaceBase::NonlinearityType* arr,
const int start,
const hiop::hiopInterfaceBase::NonlinearityType arr_src)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n && i < length; i += num_threads) {
arr[start+i] = arr_src;
}
}
/// for hiopVectorIntCuda
/**
* @brief Set the vector entries to be a linear space of starting at i0 containing evenly
* incremented integers up to i0+(n-1)di, when n is the length of this vector
*/
__global__ void set_to_linspace_cu(int n, int *vec, int i0, int di)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
vec[i] = i0 + i*di;
}
}
/** @brief compute cusum from the given pattern*/
__global__ void compute_cusum_cu(int n, int* vec, const double* id)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(i==0) {
vec[i] = 0;
} else {
// from i=1..n
if(id[i-1]!=0.0){
vec[i] = 1;
} else {
vec[i] = 0;
}
}
}
}
/// @brief Copy the entries in 'dd' where corresponding 'ix' is nonzero, to vd starting at start_index_in_dest.
__global__ void copyToStartingAt_w_pattern_cu(int n_src,
int n_dest,
int start_index_in_dest,
int* nnz_cumsum,
double *vd,
const double* dd)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid+1; i < n_src+1; i += num_threads) {
if(nnz_cumsum[i] != nnz_cumsum[i-1]){
int idx_dest = nnz_cumsum[i-1] + start_index_in_dest;
vd[idx_dest] = dd[i-1];
}
}
}
namespace hiop
{
namespace cuda
{
constexpr int block_size=256;
/// @brief Copy from src the elements specified by the indices in id.
void copy_from_index_kernel(int n_local,
double* yd,
const double* src,
const int* id)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( copy_from_index_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, src, id);
}
/** @brief Set y[i] = min(y[i],c), for i=[0,n_local-1] */
void component_min_kernel(int n_local,
double* yd,
double c)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( component_min_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, c);
}
/** @brief Set y[i] = min(y[i],x[i], for i=[0,n_local-1] */
void component_min_kernel(int n_local,
double* yd,
const double* xd)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( component_min_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, xd);
}
/** @brief Set y[i] = max(y[i],c), for i=[0,n_local-1] */
void component_max_kernel(int n_local,
double* yd,
double c)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( component_max_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, c);
}
/** @brief Set y[i] = max(y[i],x[i]), for i=[0,n_local-1] */
void component_max_kernel(int n_local,
double* yd,
const double* xd)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( component_max_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, xd);
}
/// @brief Performs axpy, y += alpha*x, on the indexes in this specified by id.
void axpy_w_map_kernel(int n_local,
double* yd,
const double* xd,
const int* id,
double alpha)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( axpy_w_map_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, xd, id, alpha);
}
/** @brief y[i] += alpha*x[i]*z[i] forall i */
void axzpy_kernel(int n_local,
double* yd,
const double* xd,
const double* zd,
double alpha)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( axzpy_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, xd, zd, alpha);
}
/** @brief y[i] += alpha*x[i]/z[i] forall i */
void axdzpy_kernel(int n_local,
double* yd,
const double* xd,
const double* zd,
double alpha)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( axdzpy_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, xd, zd, alpha);
}
/** @brief y[i] += alpha*x[i]/z[i] forall i with pattern selection */
void axdzpy_w_pattern_kernel(int n_local,
double* yd,
const double* xd,
const double* zd,
const double* id,
double alpha)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( axdzpy_w_pattern_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, xd, zd, id, alpha);
}
/** @brief y[i] += c forall i */
void add_constant_kernel(int n_local, double* yd, double c)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( add_constant_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, c);
}
/** @brief y[i] += c forall i with pattern selection */
void add_constant_w_pattern_kernel(int n_local, double* yd, const double* id, double c)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( add_constant_w_pattern_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, c, id);
}
/// @brief Invert (1/x) the elements of this
void invert_kernel(int n_local, double* yd)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( invert_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd);
}
/** @brief y[i] += alpha*1/x[i] + y[i] forall i with pattern selection */
void adxpy_w_pattern_kernel(int n_local,
double* yd,
const double* xd,
const double* id,
double alpha)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( adxpy_w_pattern_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, xd, id, alpha);
}
/** @brief elements of this that corespond to nonzeros in ix are divided by elements of v.
The rest of elements of this are set to zero.*/
void component_div_w_pattern_kernel(int n_local,
double* yd,
const double* xd,
const double* id)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( component_div_w_pattern_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, xd, id);
}
/** @brief Linear damping term */
void set_linear_damping_term_kernel(int n_local,
double* yd,
const double* vd,
const double* ld,
const double* rd)
{
// compute linear damping term
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( set_linear_damping_term_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, vd, ld, rd);
}
/**
* @brief Performs `this[i] = alpha*this[i] + sign*ct` where sign=1 when EXACTLY one of
* ixleft[i] and ixright[i] is 1.0 and sign=0 otherwise.
*/
void add_linear_damping_term_kernel(int n_local,
double* yd,
const double* ixl,
const double* ixr,
double alpha,
double ct)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( add_linear_damping_term_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, ixl, ixr, alpha, ct);
}
/** @brief Checks if selected elements of `this` are positive */
void is_posive_w_pattern_kernel(int n_local,
double* yd,
const double* xd,
const double* id)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( is_posive_w_pattern_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, xd, id);
}
/// set value with pattern
void set_val_w_pattern_kernel(int n_local,
double* yd,
const double* xd,
const double* id,
double max_val)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( set_val_w_pattern_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, xd, id, max_val);
}
/** @brief Project solution into bounds */
void project_into_bounds_kernel(int n_local,
double* xd,
const double* xld,
const double* ild,
const double* xud,
const double* iud,
double kappa1,
double kappa2,
double small_real)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( project_into_bounds_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, xd, xld, ild, xud, iud, kappa1, kappa2, small_real);
}
/** @brief max{a\in(0,1]| x+ad >=(1-tau)x} */
void fraction_to_the_boundry_kernel(int n_local,
double* yd,
const double* xd,
const double* dd,
double tau)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( fraction_to_the_boundry_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, xd, dd, tau);
}
/** @brief max{a\in(0,1]| x+ad >=(1-tau)x} with pattern select */
void fraction_to_the_boundry_w_pattern_kernel(int n_local,
double* yd,
const double* xd,
const double* dd,
const double* id,
double tau)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( fraction_to_the_boundry_w_pattern_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, xd, dd, id, tau);
}
/** @brief Set elements of `this` to zero based on `select`.*/
void select_pattern_kernel(int n_local, double* yd, const double* id)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( select_pattern_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, id);
}
/** @brief y[i] = 0 if id[i]==0.0 && xd[i]!=0.0, otherwise y[i] = 1*/
void component_match_pattern_kernel(int n_local, int* yd, const double* xd, const double* id)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( set_match_pattern_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, xd, id);
}
/** @brief Adjusts duals. */
void adjustDuals_plh_kernel(int n_local,
double* yd,
const double* xd,
const double* id,
double mu,
double kappa)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( adjust_duals_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, yd, xd, id, mu, kappa);
}
/// @brief set int array 'arr', starting at `start` and ending at `end`, to the values in `arr_src` from 'start_src`
void set_array_from_to_kernel(int n_local,
hiop::hiopInterfaceBase::NonlinearityType* arr,
int start,
int length,
const hiop::hiopInterfaceBase::NonlinearityType* arr_src,
int start_src)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( set_nonlinear_type_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, length, arr, start, arr_src, start_src);
}
/// @brief set int array 'arr', starting at `start` and ending at `end`, to the values in `arr_src` from 'start_src`
void set_array_from_to_kernel(int n_local,
hiop::hiopInterfaceBase::NonlinearityType* arr,
int start,
int length,
hiop::hiopInterfaceBase::NonlinearityType arr_src)
{
int num_blocks = (n_local+block_size-1)/block_size;
hipLaunchKernelGGL(( set_nonlinear_type_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_local, length, arr, start, arr_src);
}
/// @brief Set all elements to c.
void thrust_fill_kernel(int n, double* ptr, double c)
{
thrust::device_ptr<double> dev_ptr = thrust::device_pointer_cast(ptr);
thrust::fill(thrust::device, dev_ptr, dev_ptr+n, c);
}
/** @brief inf norm on single rank */
double infnorm_local_kernel(int n, double* data_dev)
{
thrust_abs<double> abs_op;
thrust::maximum<double> max_op;
thrust::device_ptr<double> dev_ptr = thrust::device_pointer_cast(data_dev);
// compute one norm
double norm = thrust::transform_reduce(thrust::device, data_dev, data_dev+n, abs_op, 0.0, max_op);
return norm;
}
/** @brief Return the one norm */
double onenorm_local_kernel(int n, double* data_dev)
{
thrust_abs<double> abs_op;
thrust::plus<double> plus_op;
thrust::device_ptr<double> dev_ptr = thrust::device_pointer_cast(data_dev);
//thrust::device_ptr<double> dev_ptr(data_dev);
// compute one norm
double norm = thrust::transform_reduce(thrust::device, data_dev, data_dev+n, abs_op, 0.0, plus_op);
return norm;
}
/** @brief d1[i] = d1[i] * d2[i] forall i */
void thrust_component_mult_kernel(int n, double* d1, const double* d2)
{
// wrap raw pointer with a device_ptr
thrust::multiplies<double> mult_op;
thrust::device_ptr<double> dev_v1 = thrust::device_pointer_cast(d1);
thrust::device_ptr<const double> dev_v2 = thrust::device_pointer_cast(d2);
thrust::transform(thrust::device,
dev_v1, dev_v1+n,
dev_v2, dev_v1,
mult_op);
}
/** @brief d1[i] = d1[i] / d2[i] forall i */
void thrust_component_div_kernel(int n, double* d1, const double* d2)
{
// wrap raw pointer with a device_ptr
thrust::divides<double> div_op;
thrust::device_ptr<double> dev_v1 = thrust::device_pointer_cast(d1);
thrust::device_ptr<const double> dev_v2 = thrust::device_pointer_cast(d2);
thrust::transform(thrust::device,
dev_v1, dev_v1+n,
dev_v2, dev_v1,
div_op);
}
/** @brief d1[i] = abs(d1[i]) forall i */
void thrust_component_abs_kernel(int n, double* d1)
{
// wrap raw pointer with a device_ptr
thrust_abs<double> abs_op;
thrust::device_ptr<double> dev_v1 = thrust::device_pointer_cast(d1);
// compute abs
thrust::transform(thrust::device, dev_v1, dev_v1+n, dev_v1, abs_op);
}
/** @brief d1[i] = sign(d1[i]) forall i */
void thrust_component_sgn_kernel(int n, double* d1)
{
// wrap raw pointer with a device_ptr
thrust_sig<double> sig_op;
thrust::device_ptr<double> dev_v1 = thrust::device_pointer_cast(d1);
// compute sign
thrust::transform(thrust::device, dev_v1, dev_v1+n, dev_v1, sig_op);
}
/** @brief d1[i] = sqrt(d1[i]) forall i */
void thrust_component_sqrt_kernel(int n, double* d1)
{
// wrap raw pointer with a device_ptr
thrust_sqrt<double> sqrt_op;
thrust::device_ptr<double> dev_v1 = thrust::device_pointer_cast(d1);
// compute sqrt
thrust::transform(thrust::device, dev_v1, dev_v1+n, dev_v1, sqrt_op);
}
/** @brief d1[i] = -(d1[i]) forall i */
void thrust_negate_kernel(int n, double* d1)
{
thrust::device_ptr<double> dev_v1 = thrust::device_pointer_cast(d1);
thrust::transform(thrust::device, dev_v1, dev_v1+n, dev_v1, thrust::negate<double>());
}
/** @brief compute sum(log(d1[i])) forall i where id[i]=1*/
double log_barr_obj_kernel(int n, double* d1, const double* id)
{
thrust::device_ptr<double> dev_v = thrust::device_pointer_cast(d1);
thrust::device_ptr<const double> id_v = thrust::device_pointer_cast(id);
// wrap raw pointer with a device_ptr
thrust_log_select<double> log_select_op;
thrust::plus<double> plus_op;
thrust::multiplies<double> mult_op;
// TODO: how to avoid this temp vec?
thrust::device_ptr<double> v_temp = thrust::device_malloc(n*sizeof(double));
// compute x*id
thrust::transform(thrust::device, dev_v, dev_v+n, id_v, v_temp, mult_op);
// compute log(y) for y > 0
double sum = thrust::transform_reduce(thrust::device, v_temp, v_temp+n, log_select_op, 0.0, plus_op);
thrust::device_free(v_temp);
return sum;
}
/** @brief compute sum(d1[i]) */
double thrust_sum_kernel(int n, double* d1)
{
thrust::device_ptr<double> dev_v1 = thrust::device_pointer_cast(d1);
// compute sum
return thrust::reduce(thrust::device, dev_v1, dev_v1+n, 0.0, thrust::plus<double>());
}
/** @brief Linear damping term */
double linear_damping_term_kernel(int n,
const double* vd,
const double* ld,
const double* rd,
double mu,
double kappa_d)
{
// TODO: how to avoid this temp vec?
thrust::device_vector<double> v_temp(n);
double* dv_ptr = thrust::raw_pointer_cast(v_temp.data());
// compute linear damping term
hiop::cuda::set_linear_damping_term_kernel(n, dv_ptr, vd, ld, rd);
double term = thrust::reduce(thrust::device, v_temp.begin(), v_temp.end(), 0.0, thrust::plus<double>());
term *= mu;
term *= kappa_d;
return term;
}
/** @brief compute min(d1) */
double min_local_kernel(int n, double* d1)
{
thrust::device_ptr<double> dev_v1 = thrust::device_pointer_cast(d1);
thrust::device_ptr<double> ret_dev_ptr = thrust::min_element(thrust::device, dev_v1, dev_v1+n);
double *ret_ptr = thrust::raw_pointer_cast(ret_dev_ptr);
double *ret_host = new double[1];
hipError_t cuerr = hipMemcpy(ret_host, ret_ptr, (1)*sizeof(double), hipMemcpyDeviceToHost);
double rv = ret_host[0];
delete [] ret_host;
return rv;
}
/** @brief Checks if selected elements of `this` are positive */
int all_positive_w_pattern_kernel(int n, const double* d1, const double* id)
{
// TODO: how to avoid this temp vec?
thrust::device_vector<double> v_temp(n);
double* dv_ptr = thrust::raw_pointer_cast(v_temp.data());
hiop::cuda::is_posive_w_pattern_kernel(n, dv_ptr, d1, id);
return thrust::reduce(thrust::device, v_temp.begin(), v_temp.end(), (int)0, thrust::plus<int>());
}
/** @brief compute min(d1) for selected elements*/
double min_w_pattern_kernel(int n, const double* d1, const double* id, double max_val)
{
// TODO: how to avoid this temp vec?
thrust::device_ptr<double> dv_ptr = thrust::device_malloc(n*sizeof(double));
double* d_ptr = thrust::raw_pointer_cast(dv_ptr);
// set value with pattern
hiop::cuda::set_val_w_pattern_kernel(n, d_ptr, d1, id, max_val);
thrust::device_ptr<double> ret_dev_ptr = thrust::min_element(thrust::device, dv_ptr, dv_ptr+n);
// TODO: how to return double from device to host?
double *ret_host = new double[1];
double *ret_ptr = thrust::raw_pointer_cast(ret_dev_ptr);
hipError_t cuerr = hipMemcpy(ret_host, ret_ptr, (1)*sizeof(double), hipMemcpyDeviceToHost);
double ret_v = ret_host[0];
delete [] ret_host;
thrust::device_free(dv_ptr);
return ret_v;
}
/** @brief check if xld[i] < xud[i] forall i */
bool check_bounds_kernel(int n, const double* xld, const double* xud)
{
// Perform preliminary check to see of all upper value
thrust::minus<double> minus_op;
thrust::device_ptr<double> dev_xud = thrust::device_pointer_cast(const_cast<double*>(xud));
thrust::device_ptr<double> dev_xld = thrust::device_pointer_cast(const_cast<double*>(xld));
// TODO: how to avoid this temp vec?
thrust::device_ptr<double> dv_ptr = thrust::device_malloc(n*sizeof(double));
thrust::transform(thrust::device,
dev_xud, dev_xud+n,
dev_xld, dv_ptr,
minus_op);
int res_offset = thrust::min_element(thrust::device, dv_ptr, dv_ptr + n) - dv_ptr;
double ret_v = *(dv_ptr + res_offset);
bool bval = (ret_v > 0.0) ? 1 : 0;
thrust::device_free(dv_ptr);
if(false == bval)
return false;
return true;
}
/** @brief compute max{a\in(0,1]| x+ad >=(1-tau)x} */
double min_frac_to_bds_kernel(int n, const double* xd, const double* dd, double tau)
{
thrust::device_ptr<double> dv_ptr = thrust::device_malloc(n*sizeof(double));
double* d_ptr = thrust::raw_pointer_cast(dv_ptr);
// set values
hiop::cuda::fraction_to_the_boundry_kernel(n, d_ptr, xd, dd, tau);
int res_offset = thrust::min_element(thrust::device, dv_ptr, dv_ptr+n) - dv_ptr;
double alpha = *(dv_ptr + res_offset);
thrust::device_free(dv_ptr);
return alpha;
}
/** @brief max{a\in(0,1]| x+ad >=(1-tau)x} with pattern id */
double min_frac_to_bds_w_pattern_kernel(int n,
const double* xd,
const double* dd,
const double* id,
double tau)
{
// TODO: how to avoid this temp vec?
thrust::device_vector<double> v_temp(n);
double* dv_ptr = thrust::raw_pointer_cast(v_temp.data());
// set value with pattern
hiop::cuda::fraction_to_the_boundry_w_pattern_kernel(n, dv_ptr, xd, dd, id, tau);
double alpha = *(thrust::min_element(thrust::device, v_temp.begin(), v_temp.end()));
return alpha;
}
/** @brief Checks if `xd` matches nonzero pattern of `id`. */
bool match_pattern_kernel(int n, const double* xd, const double* id)
{
// TODO: how to avoid this temp vec?
thrust::device_vector<int> v_temp(n);
int* dv_ptr = thrust::raw_pointer_cast(v_temp.data());
// check if xd matches the pattern given by id
hiop::cuda::component_match_pattern_kernel(n, dv_ptr, xd, id);
thrust_istrue istrue_op;
return thrust::all_of(thrust::device, v_temp.begin(), v_temp.end(), istrue_op);
}
/** @brief Checks if all x[i] = 0 */
bool is_zero_kernel(int n, double* xd)
{
// wrap raw pointer with a device_ptr
thrust_iszero<double> iszero_op;
thrust::device_ptr<double> dev_v = thrust::device_pointer_cast(xd);
return thrust::all_of(thrust::device, dev_v, dev_v+n, iszero_op);
}
/** @brief Checks if any x[i] = nan */
bool isnan_kernel(int n, double* xd)
{
// wrap raw pointer with a device_ptr
thrust_isnan<double> isnan_op;
thrust::device_ptr<double> dev_v = thrust::device_pointer_cast(xd);
return thrust::any_of(thrust::device, dev_v, dev_v+n, isnan_op);
}
/** @brief Checks if any x[i] = inf */
bool isinf_kernel(int n, double* xd)
{
// wrap raw pointer with a device_ptr
thrust_isinf<double> isinf_op;
thrust::device_ptr<double> dev_v = thrust::device_pointer_cast(xd);
return thrust::any_of(thrust::device, dev_v, dev_v+n, isinf_op);
}
/** @brief Checks if all x[i] != inf */
bool isfinite_kernel(int n, double* xd)
{
// wrap raw pointer with a device_ptr
thrust_isfinite<double> isfinite_op;
thrust::device_ptr<double> dev_v = thrust::device_pointer_cast(xd);
return thrust::all_of(thrust::device, dev_v, dev_v+n, isfinite_op);
}
/// @brief get number of values that are less than the given value 'val'.
int num_of_elem_less_than_kernel(int n, double* xd, double val)
{
thrust::device_ptr<double> dev_v = thrust::device_pointer_cast(xd);
int rval = thrust::transform_reduce(thrust::device, dev_v, dev_v+n, thrust_less(val), (int) 0, thrust::plus<int>());
return rval;
}
/// @brief get number of values whose absolute value are less than the given value 'val'.
int num_of_elem_absless_than_kernel(int n, double* xd, double val)
{
thrust::device_ptr<double> dev_v = thrust::device_pointer_cast(xd);
int rval = thrust::transform_reduce(thrust::device, dev_v, dev_v+n, thrust_abs_less(val), (int) 0, thrust::plus<int>());
return rval;
}
/// @brief Copy the entries in 'dd' where corresponding 'ix' is nonzero, to vd starting at start_index_in_dest.
void copyToStartingAt_w_pattern_kernel(int n_src,
int n_dest,
int start_index_in_dest,
int* nnz_cumsum,
double *vd,
const double* dd)
{
int num_blocks = (n_src+block_size-1)/block_size;
hipLaunchKernelGGL(( copyToStartingAt_w_pattern_cu), dim3(num_blocks),dim3(block_size), 0, 0, n_src,
n_dest,
start_index_in_dest,
nnz_cumsum,
vd,
dd);
}
/// for hiopVectorIntCuda
/**
* @brief Set the vector entries to be a linear space of starting at i0 containing evenly
* incremented integers up to i0+(n-1)di, when n is the length of this vector
*/
void set_to_linspace_kernel(int sz, int* buf, int i0, int di)
{
int num_blocks = (sz+block_size-1)/block_size;
hipLaunchKernelGGL(( set_to_linspace_cu), dim3(num_blocks),dim3(block_size), 0, 0, sz, buf, i0, di);
}
/** @brief compute cusum from the given pattern*/
void compute_cusum_kernel(int sz, int* buf, const double* id)
{
int num_blocks = (sz+block_size-1)/block_size;
hipLaunchKernelGGL(( compute_cusum_cu), dim3(num_blocks),dim3(block_size), 0, 0, sz, buf, id);
thrust::device_ptr<int> dev_v = thrust::device_pointer_cast(buf);
thrust::inclusive_scan(dev_v, dev_v + sz, dev_v); // in-place scan
}
}
}
| 60287eb8b7fda1a9f7d50b486abe53df868d7507.cu | // Copyright (c) 2017, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory (LLNL).
// LLNL-CODE-742473. All rights reserved.
//
// This file is part of HiOp. For details, see https://github.com/LLNL/hiop. HiOp
// is released under the BSD 3-clause license (https://opensource.org/licenses/BSD-3-Clause).
// Please also read "Additional BSD Notice" below.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// i. Redistributions of source code must retain the above copyright notice, this list
// of conditions and the disclaimer below.
// ii. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the disclaimer (as noted below) in the documentation and/or
// other materials provided with the distribution.
// iii. Neither the name of the LLNS/LLNL nor the names of its contributors may be used to
// endorse or promote products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Additional BSD Notice
// 1. This notice is required to be provided under our contract with the U.S. Department
// of Energy (DOE). This work was produced at Lawrence Livermore National Laboratory under
// Contract No. DE-AC52-07NA27344 with the DOE.
// 2. Neither the United States Government nor Lawrence Livermore National Security, LLC
// nor any of their employees, makes any warranty, express or implied, or assumes any
// liability or responsibility for the accuracy, completeness, or usefulness of any
// information, apparatus, product, or process disclosed, or represents that its use would
// not infringe privately-owned rights.
// 3. Also, reference herein to any specific commercial products, process, or services by
// trade name, trademark, manufacturer or otherwise does not necessarily constitute or
// imply its endorsement, recommendation, or favoring by the United States Government or
// Lawrence Livermore National Security, LLC. The views and opinions of authors expressed
// herein do not necessarily state or reflect those of the United States Government or
// Lawrence Livermore National Security, LLC, and shall not be used for advertising or
// product endorsement purposes.
/**
* @file VectorCudaKernels.cu
*
* @author Nai-Yuan Chiang <[email protected]>, LLNL
*
*/
#include "VectorCudaKernels.hpp"
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/fill.h>
#include <thrust/replace.h>
#include <thrust/transform_reduce.h>
#include <thrust/extrema.h>
#include <thrust/logical.h>
#include <thrust/execution_policy.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
//#include <cmath>
//#include <limits>
/// @brief compute abs(b-a)
template <typename T>
struct thrust_abs_diff: public thrust::binary_function<T,T,T>
{
__host__ __device__
T operator()(const T& a, const T& b)
{
return fabs(b - a);
}
};
/// @brief compute abs(a)
template <typename T>
struct thrust_abs: public thrust::unary_function<T,T>
{
__host__ __device__
T operator()(const T& a)
{
return fabs(a);
}
};
/// @brief return true if abs(a) < tol_
struct thrust_abs_less
{
const double tol_;
thrust_abs_less(double tol) : tol_(tol) {}
__host__ __device__
int operator()(const double& a)
{
return (fabs(a) < tol_);
}
};
/// @brief return true if a < tol_
struct thrust_less
{
const double tol_;
thrust_less(double tol) : tol_(tol) {}
__host__ __device__
int operator()(const double& a)
{
return (a < tol_);
}
};
/// @brief return true if (0.0 < a) - (a < 0.0)
template <typename T>
struct thrust_sig: public thrust::unary_function<T,T>
{
__host__ __device__
T operator()(const T& a)
{
return static_cast<double>( (0.0 < a) - (a < 0.0) );
}
};
/// @brief compute sqrt(a)
template <typename T>
struct thrust_sqrt: public thrust::unary_function<T,T>
{
__host__ __device__
T operator()(const T& a)
{
return sqrt(a);
}
};
/// @brief compute log(a) if a > 0, otherwise returns 0
template <typename T>
struct thrust_log_select: public thrust::unary_function<T,double>
{
__host__ __device__
double operator()(const T& a)
{
if(a>0){
return log(a);
}
return 0.;
}
};
/// @brief compute isinf(a)
template <typename T>
struct thrust_isinf: public thrust::unary_function<T,bool>
{
__host__ __device__
bool operator()(const T& a)
{
return isinf(a);
}
};
/// @brief compute isfinite(a)
template <typename T>
struct thrust_isfinite: public thrust::unary_function<T,bool>
{
__host__ __device__
bool operator()(const T& a)
{
return isfinite(a);
}
};
/// @brief compute a==0.0
template <typename T>
struct thrust_iszero: public thrust::unary_function<T,bool>
{
__host__ __device__
bool operator()(const T& a)
{
return a== (T) (0.0);
}
};
/// @brief compute isnan(a)
template <typename T>
struct thrust_isnan: public thrust::unary_function<T,bool>
{
__host__ __device__
bool operator()(const T& a)
{
return isnan(a);
}
};
/// @brief compute (bool) (a)
struct thrust_istrue : public thrust::unary_function<int, bool>
{
__host__ __device__
bool operator()(const int& a)
{
return a!=0;
}
};
/** @brief Set y[i] = min(y[i],c), for i=[0,n_local-1] */
__global__ void component_min_cu(int n, double* y, const double c)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
y[i] = (y[i]<c) ? y[i] : c;
}
}
/** @brief Set y[i] = min(y[i],x[i]), for i=[0,n_local-1] */
__global__ void component_min_cu(int n, double* y, const double* x)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
y[i] = (y[i]<x[i]) ? y[i] : x[i];
}
}
/** @brief Set y[i] = max(y[i],c), for i=[0,n_local-1] */
__global__ void component_max_cu(int n, double* y, const double c)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
y[i] = (y[i]>c) ? y[i] : c;
}
}
/** @brief Set y[i] = max(y[i],x[i]), for i=[0,n_local-1] */
__global__ void component_max_cu(int n, double* y, const double* x)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
y[i] = (y[i]>x[i]) ? y[i] : x[i];
}
}
/// @brief Copy from src the elements specified by the indices in id.
__global__ void copy_from_index_cu(int n, double* vec, const double* val, const int* id)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
vec[i] = val[id[i]];
}
}
/// @brief Performs axpy, y += alpha*x, on the indexes in this specified by id.
__global__ void axpy_w_map_cu(int n, double* yd, const double* xd, const int* id, double alpha)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
assert(id[i]<n);
yd[id[i]] = alpha * xd[i] + yd[id[i]];
}
}
/** @brief this[i] += alpha*x[i]*z[i] forall i */
__global__ void axzpy_cu(int n, double* yd, const double* xd, const double* zd, double alpha)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
yd[i] = alpha * xd[i] * zd[i] + yd[i];
}
}
/** @brief this[i] += alpha*x[i]/z[i] forall i */
__global__ void axdzpy_cu(int n, double* yd, const double* xd, const double* zd, double alpha)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
yd[i] = alpha * xd[i] / zd[i] + yd[i];
}
}
/** @brief this[i] += alpha*x[i]/z[i] forall i with pattern selection */
__global__ void axdzpy_w_pattern_cu(int n, double* yd, const double* xd, const double* zd, const double* id, double alpha)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(id[i] == 1.0) {
yd[i] = alpha * xd[i] / zd[i] + yd[i];
}
}
}
/** @brief y[i] += alpha*1/x[i] + y[i] forall i with pattern selection */
__global__ void adxpy_w_pattern_cu(int n, double* yd, const double* xd, const double* id, double alpha)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(id[i]==1.0) {
yd[i] = alpha / xd[i] + yd[i];
}
}
}
/** @brief elements of this that corespond to nonzeros in ix are divided by elements of v.
The rest of elements of this are set to zero.*/
__global__ void component_div_w_pattern_cu(int n, double* yd, const double* xd, const double* id)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(id[i]==1.0) {
yd[i] = yd[i] / xd[i];
} else {
yd[i] = 0.0;
}
}
}
/** @brief y[i] += c forall i */
__global__ void add_constant_cu(int n, double* yd, double c)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
yd[i] = yd[i] + c;
}
}
/** @brief y[i] += c forall i with pattern selection */
__global__ void add_constant_w_pattern_cu(int n, double* yd, double c, const double* id)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
yd[i] = yd[i] + c * id[i];
}
}
/// @brief Invert (1/x) the elements of this
__global__ void invert_cu(int n, double* yd)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
yd[i] = 1. / yd[i];
}
}
/** @brief Linear damping term */
__global__ void set_linear_damping_term_cu(int n, double* yd, const double* vd, const double* ld, const double* rd)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(ld[i]==1.0 && rd[i]==0.0) {
yd[i] = vd[i];
} else {
yd[i] = 0.0;
}
}
}
/**
* @brief Performs `this[i] = alpha*this[i] + sign*ct` where sign=1 when EXACTLY one of
* ixleft[i] and ixright[i] is 1.0 and sign=0 otherwise.
*/
__global__ void add_linear_damping_term_cu(int n, double* data, const double* ixl, const double* ixr, double alpha, double ct)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
data[i] = alpha * data[i] + ct*(ixl[i]-ixr[i]);
}
}
/** @brief y[i] = 1.0 if x[i] is positive and id[i] = 1.0, otherwise y[i] = 0 */
__global__ void is_posive_w_pattern_cu(int n, double* data, const double* vd, const double* id)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
data[i] = (id[i] == 1.0 && vd[i] > 0.0) ? 1 : 0;
}
}
/** @brief y[i] = x[i] if id[i] = 1.0, otherwise y[i] = val_else */
__global__ void set_val_w_pattern_cu(int n, double* data, const double* vd, const double* id, double val_else)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
data[i] = (id[i] == 1.0) ? vd[i] : val_else;
}
}
/** @brief data[i] = 0 if id[i]==0.0 */
__global__ void select_pattern_cu(int n, double* data, const double* id)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(id[i] == 0.0) {
data[i] = 0.0;
}
}
}
__global__ void match_pattern_cu(int n, double* data, const double* id)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(id[i] == 0.0) {
data[i] = 0.0;
}
}
}
/** @brief Project solution into bounds */
__global__ void project_into_bounds_cu(int n,
double* xd,
const double* xld,
const double* ild,
const double* xud,
const double* iud,
double kappa1,
double kappa2,
double small_real)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
double aux = 0.0;
double aux2 = 0.0;
if(ild[i] != 0.0 && iud[i] != 0.0) {
aux = kappa2*(xud[i] - xld[i]) - small_real;
aux2 = xld[i] + fmin(kappa1 * fmax(1.0, fabs(xld[i])), aux);
if(xd[i] < aux2) {
xd[i] = aux2;
} else {
aux2 = xud[i] - fmin(kappa1 * fmax(1.0, fabs(xud[i])), aux);
if(xd[i] > aux2) {
xd[i] = aux2;
}
}
#ifdef HIOP_DEEPCHECKS
assert(xd[i] > xld[i] && xd[i] < xud[i] && "this should not happen -> HiOp bug");
#endif
} else {
if(ild[i] != 0.0) {
xd[i] = fmax(xd[i], xld[i] + kappa1*fmax(1.0, fabs(xld[i])) - small_real);
}
if(iud[i] != 0.0) {
xd[i] = fmin(xd[i], xud[i] - kappa1*fmax(1.0, fabs(xud[i])) - small_real);
} else {
/*nothing for free vars */
}
}
}
}
/** @brief max{a\in(0,1]| x+ad >=(1-tau)x} */
__global__ void fraction_to_the_boundry_cu(int n, double* yd, const double* xd, const double* dd, double tau)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(dd[i]>=0) {
yd[i] = 1.0;
} else {
yd[i] = -tau*xd[i]/dd[i];
}
}
}
/** @brief max{a\in(0,1]| x+ad >=(1-tau)x} with pattern select */
__global__ void fraction_to_the_boundry_w_pattern_cu(int n,
double* yd,
const double* xd,
const double* dd,
const double* id,
double tau)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(dd[i]>=0 || id[i]==0) {
yd[i] = 1.0;
} else {
yd[i] = -tau*xd[i]/dd[i];
}
}
}
/** @brief y[i] = 0 if id[i]==0.0 && xd[i]!=0.0, otherwise y[i] = 1*/
__global__ void set_match_pattern_cu(int n, int* yd, const double* xd, const double* id)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(id[i]==0.0 && xd[i]!=0.0) {
yd[i] = 0;
} else {
yd[i] = 1;
}
}
}
/** @brief Adjusts duals. */
__global__ void adjust_duals_cu(int n, double* zd, const double* xd, const double* id, double mu, double kappa)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
double a,b;
for (int i = tid; i < n; i += num_threads) {
// preemptive loop to reduce number of iterations?
if(id[i] == 1.) {
// precompute a and b in another loop?
a = mu/xd[i];
b = a/kappa;
a = a*kappa;
// Necessary conditionals
if(zd[i]<b) {
zd[i] = b;
} else {
//zd[i]>=b
if(a<=b) {
zd[i] = b;
} else {
//a>b
if(a<zd[i]) {
zd[i] = a;
}
}
}
// - - - -
//else a>=z[i] then *z=*z (z[i] does not need adjustment)
}
}
}
/// set nonlinear type
__global__ void set_nonlinear_type_cu(const int n,
const int length,
hiop::hiopInterfaceBase::NonlinearityType* arr,
const int start,
const hiop::hiopInterfaceBase::NonlinearityType* arr_src,
const int start_src)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n && i < length; i += num_threads) {
arr[start+i] = arr_src[start_src+i];
}
}
/// set nonlinear type
__global__ void set_nonlinear_type_cu(const int n,
const int length,
hiop::hiopInterfaceBase::NonlinearityType* arr,
const int start,
const hiop::hiopInterfaceBase::NonlinearityType arr_src)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n && i < length; i += num_threads) {
arr[start+i] = arr_src;
}
}
/// for hiopVectorIntCuda
/**
* @brief Set the vector entries to be a linear space of starting at i0 containing evenly
* incremented integers up to i0+(n-1)di, when n is the length of this vector
*/
__global__ void set_to_linspace_cu(int n, int *vec, int i0, int di)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
vec[i] = i0 + i*di;
}
}
/** @brief compute cusum from the given pattern*/
__global__ void compute_cusum_cu(int n, int* vec, const double* id)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += num_threads) {
if(i==0) {
vec[i] = 0;
} else {
// from i=1..n
if(id[i-1]!=0.0){
vec[i] = 1;
} else {
vec[i] = 0;
}
}
}
}
/// @brief Copy the entries in 'dd' where corresponding 'ix' is nonzero, to vd starting at start_index_in_dest.
__global__ void copyToStartingAt_w_pattern_cu(int n_src,
int n_dest,
int start_index_in_dest,
int* nnz_cumsum,
double *vd,
const double* dd)
{
const int num_threads = blockDim.x * gridDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid+1; i < n_src+1; i += num_threads) {
if(nnz_cumsum[i] != nnz_cumsum[i-1]){
int idx_dest = nnz_cumsum[i-1] + start_index_in_dest;
vd[idx_dest] = dd[i-1];
}
}
}
namespace hiop
{
namespace cuda
{
constexpr int block_size=256;
/// @brief Copy from src the elements specified by the indices in id.
void copy_from_index_kernel(int n_local,
double* yd,
const double* src,
const int* id)
{
int num_blocks = (n_local+block_size-1)/block_size;
copy_from_index_cu<<<num_blocks,block_size>>>(n_local, yd, src, id);
}
/** @brief Set y[i] = min(y[i],c), for i=[0,n_local-1] */
void component_min_kernel(int n_local,
double* yd,
double c)
{
int num_blocks = (n_local+block_size-1)/block_size;
component_min_cu<<<num_blocks,block_size>>>(n_local, yd, c);
}
/** @brief Set y[i] = min(y[i],x[i], for i=[0,n_local-1] */
void component_min_kernel(int n_local,
double* yd,
const double* xd)
{
int num_blocks = (n_local+block_size-1)/block_size;
component_min_cu<<<num_blocks,block_size>>>(n_local, yd, xd);
}
/** @brief Set y[i] = max(y[i],c), for i=[0,n_local-1] */
void component_max_kernel(int n_local,
double* yd,
double c)
{
int num_blocks = (n_local+block_size-1)/block_size;
component_max_cu<<<num_blocks,block_size>>>(n_local, yd, c);
}
/** @brief Set y[i] = max(y[i],x[i]), for i=[0,n_local-1] */
void component_max_kernel(int n_local,
double* yd,
const double* xd)
{
int num_blocks = (n_local+block_size-1)/block_size;
component_max_cu<<<num_blocks,block_size>>>(n_local, yd, xd);
}
/// @brief Performs axpy, y += alpha*x, on the indexes in this specified by id.
void axpy_w_map_kernel(int n_local,
double* yd,
const double* xd,
const int* id,
double alpha)
{
int num_blocks = (n_local+block_size-1)/block_size;
axpy_w_map_cu<<<num_blocks,block_size>>>(n_local, yd, xd, id, alpha);
}
/** @brief y[i] += alpha*x[i]*z[i] forall i */
void axzpy_kernel(int n_local,
double* yd,
const double* xd,
const double* zd,
double alpha)
{
int num_blocks = (n_local+block_size-1)/block_size;
axzpy_cu<<<num_blocks,block_size>>>(n_local, yd, xd, zd, alpha);
}
/** @brief y[i] += alpha*x[i]/z[i] forall i */
void axdzpy_kernel(int n_local,
double* yd,
const double* xd,
const double* zd,
double alpha)
{
int num_blocks = (n_local+block_size-1)/block_size;
axdzpy_cu<<<num_blocks,block_size>>>(n_local, yd, xd, zd, alpha);
}
/** @brief y[i] += alpha*x[i]/z[i] forall i with pattern selection */
void axdzpy_w_pattern_kernel(int n_local,
double* yd,
const double* xd,
const double* zd,
const double* id,
double alpha)
{
int num_blocks = (n_local+block_size-1)/block_size;
axdzpy_w_pattern_cu<<<num_blocks,block_size>>>(n_local, yd, xd, zd, id, alpha);
}
/** @brief y[i] += c forall i */
void add_constant_kernel(int n_local, double* yd, double c)
{
int num_blocks = (n_local+block_size-1)/block_size;
add_constant_cu<<<num_blocks,block_size>>>(n_local, yd, c);
}
/** @brief y[i] += c forall i with pattern selection */
void add_constant_w_pattern_kernel(int n_local, double* yd, const double* id, double c)
{
int num_blocks = (n_local+block_size-1)/block_size;
add_constant_w_pattern_cu<<<num_blocks,block_size>>>(n_local, yd, c, id);
}
/// @brief Invert (1/x) the elements of this
void invert_kernel(int n_local, double* yd)
{
int num_blocks = (n_local+block_size-1)/block_size;
invert_cu<<<num_blocks,block_size>>>(n_local, yd);
}
/** @brief y[i] += alpha*1/x[i] + y[i] forall i with pattern selection */
void adxpy_w_pattern_kernel(int n_local,
double* yd,
const double* xd,
const double* id,
double alpha)
{
int num_blocks = (n_local+block_size-1)/block_size;
adxpy_w_pattern_cu<<<num_blocks,block_size>>>(n_local, yd, xd, id, alpha);
}
/** @brief elements of this that corespond to nonzeros in ix are divided by elements of v.
The rest of elements of this are set to zero.*/
void component_div_w_pattern_kernel(int n_local,
double* yd,
const double* xd,
const double* id)
{
int num_blocks = (n_local+block_size-1)/block_size;
component_div_w_pattern_cu<<<num_blocks,block_size>>>(n_local, yd, xd, id);
}
/** @brief Linear damping term */
void set_linear_damping_term_kernel(int n_local,
double* yd,
const double* vd,
const double* ld,
const double* rd)
{
// compute linear damping term
int num_blocks = (n_local+block_size-1)/block_size;
set_linear_damping_term_cu<<<num_blocks,block_size>>>(n_local, yd, vd, ld, rd);
}
/**
* @brief Performs `this[i] = alpha*this[i] + sign*ct` where sign=1 when EXACTLY one of
* ixleft[i] and ixright[i] is 1.0 and sign=0 otherwise.
*/
void add_linear_damping_term_kernel(int n_local,
double* yd,
const double* ixl,
const double* ixr,
double alpha,
double ct)
{
int num_blocks = (n_local+block_size-1)/block_size;
add_linear_damping_term_cu<<<num_blocks,block_size>>>(n_local, yd, ixl, ixr, alpha, ct);
}
/** @brief Checks if selected elements of `this` are positive */
void is_posive_w_pattern_kernel(int n_local,
double* yd,
const double* xd,
const double* id)
{
int num_blocks = (n_local+block_size-1)/block_size;
is_posive_w_pattern_cu<<<num_blocks,block_size>>>(n_local, yd, xd, id);
}
/// set value with pattern
void set_val_w_pattern_kernel(int n_local,
double* yd,
const double* xd,
const double* id,
double max_val)
{
int num_blocks = (n_local+block_size-1)/block_size;
set_val_w_pattern_cu<<<num_blocks,block_size>>>(n_local, yd, xd, id, max_val);
}
/** @brief Project solution into bounds */
void project_into_bounds_kernel(int n_local,
double* xd,
const double* xld,
const double* ild,
const double* xud,
const double* iud,
double kappa1,
double kappa2,
double small_real)
{
int num_blocks = (n_local+block_size-1)/block_size;
project_into_bounds_cu<<<num_blocks,block_size>>>(n_local, xd, xld, ild, xud, iud, kappa1, kappa2, small_real);
}
/** @brief max{a\in(0,1]| x+ad >=(1-tau)x} */
void fraction_to_the_boundry_kernel(int n_local,
double* yd,
const double* xd,
const double* dd,
double tau)
{
int num_blocks = (n_local+block_size-1)/block_size;
fraction_to_the_boundry_cu<<<num_blocks,block_size>>>(n_local, yd, xd, dd, tau);
}
/** @brief max{a\in(0,1]| x+ad >=(1-tau)x} with pattern select */
void fraction_to_the_boundry_w_pattern_kernel(int n_local,
double* yd,
const double* xd,
const double* dd,
const double* id,
double tau)
{
int num_blocks = (n_local+block_size-1)/block_size;
fraction_to_the_boundry_w_pattern_cu<<<num_blocks,block_size>>>(n_local, yd, xd, dd, id, tau);
}
/** @brief Set elements of `this` to zero based on `select`.*/
void select_pattern_kernel(int n_local, double* yd, const double* id)
{
int num_blocks = (n_local+block_size-1)/block_size;
select_pattern_cu<<<num_blocks,block_size>>>(n_local, yd, id);
}
/** @brief y[i] = 0 if id[i]==0.0 && xd[i]!=0.0, otherwise y[i] = 1*/
void component_match_pattern_kernel(int n_local, int* yd, const double* xd, const double* id)
{
int num_blocks = (n_local+block_size-1)/block_size;
set_match_pattern_cu<<<num_blocks,block_size>>>(n_local, yd, xd, id);
}
/** @brief Adjusts duals. */
void adjustDuals_plh_kernel(int n_local,
double* yd,
const double* xd,
const double* id,
double mu,
double kappa)
{
int num_blocks = (n_local+block_size-1)/block_size;
adjust_duals_cu<<<num_blocks,block_size>>>(n_local, yd, xd, id, mu, kappa);
}
/// @brief set int array 'arr', starting at `start` and ending at `end`, to the values in `arr_src` from 'start_src`
void set_array_from_to_kernel(int n_local,
hiop::hiopInterfaceBase::NonlinearityType* arr,
int start,
int length,
const hiop::hiopInterfaceBase::NonlinearityType* arr_src,
int start_src)
{
int num_blocks = (n_local+block_size-1)/block_size;
set_nonlinear_type_cu<<<num_blocks,block_size>>> (n_local, length, arr, start, arr_src, start_src);
}
/// @brief set int array 'arr', starting at `start` and ending at `end`, to the values in `arr_src` from 'start_src`
void set_array_from_to_kernel(int n_local,
hiop::hiopInterfaceBase::NonlinearityType* arr,
int start,
int length,
hiop::hiopInterfaceBase::NonlinearityType arr_src)
{
int num_blocks = (n_local+block_size-1)/block_size;
set_nonlinear_type_cu<<<num_blocks,block_size>>> (n_local, length, arr, start, arr_src);
}
/// @brief Set all elements to c.
void thrust_fill_kernel(int n, double* ptr, double c)
{
thrust::device_ptr<double> dev_ptr = thrust::device_pointer_cast(ptr);
thrust::fill(thrust::device, dev_ptr, dev_ptr+n, c);
}
/** @brief inf norm on single rank */
double infnorm_local_kernel(int n, double* data_dev)
{
thrust_abs<double> abs_op;
thrust::maximum<double> max_op;
thrust::device_ptr<double> dev_ptr = thrust::device_pointer_cast(data_dev);
// compute one norm
double norm = thrust::transform_reduce(thrust::device, data_dev, data_dev+n, abs_op, 0.0, max_op);
return norm;
}
/** @brief Return the one norm */
double onenorm_local_kernel(int n, double* data_dev)
{
thrust_abs<double> abs_op;
thrust::plus<double> plus_op;
thrust::device_ptr<double> dev_ptr = thrust::device_pointer_cast(data_dev);
//thrust::device_ptr<double> dev_ptr(data_dev);
// compute one norm
double norm = thrust::transform_reduce(thrust::device, data_dev, data_dev+n, abs_op, 0.0, plus_op);
return norm;
}
/** @brief d1[i] = d1[i] * d2[i] forall i */
void thrust_component_mult_kernel(int n, double* d1, const double* d2)
{
// wrap raw pointer with a device_ptr
thrust::multiplies<double> mult_op;
thrust::device_ptr<double> dev_v1 = thrust::device_pointer_cast(d1);
thrust::device_ptr<const double> dev_v2 = thrust::device_pointer_cast(d2);
thrust::transform(thrust::device,
dev_v1, dev_v1+n,
dev_v2, dev_v1,
mult_op);
}
/** @brief d1[i] = d1[i] / d2[i] forall i */
void thrust_component_div_kernel(int n, double* d1, const double* d2)
{
// wrap raw pointer with a device_ptr
thrust::divides<double> div_op;
thrust::device_ptr<double> dev_v1 = thrust::device_pointer_cast(d1);
thrust::device_ptr<const double> dev_v2 = thrust::device_pointer_cast(d2);
thrust::transform(thrust::device,
dev_v1, dev_v1+n,
dev_v2, dev_v1,
div_op);
}
/** @brief d1[i] = abs(d1[i]) forall i */
void thrust_component_abs_kernel(int n, double* d1)
{
// wrap raw pointer with a device_ptr
thrust_abs<double> abs_op;
thrust::device_ptr<double> dev_v1 = thrust::device_pointer_cast(d1);
// compute abs
thrust::transform(thrust::device, dev_v1, dev_v1+n, dev_v1, abs_op);
}
/** @brief d1[i] = sign(d1[i]) forall i */
void thrust_component_sgn_kernel(int n, double* d1)
{
// wrap raw pointer with a device_ptr
thrust_sig<double> sig_op;
thrust::device_ptr<double> dev_v1 = thrust::device_pointer_cast(d1);
// compute sign
thrust::transform(thrust::device, dev_v1, dev_v1+n, dev_v1, sig_op);
}
/** @brief d1[i] = sqrt(d1[i]) forall i */
void thrust_component_sqrt_kernel(int n, double* d1)
{
// wrap raw pointer with a device_ptr
thrust_sqrt<double> sqrt_op;
thrust::device_ptr<double> dev_v1 = thrust::device_pointer_cast(d1);
// compute sqrt
thrust::transform(thrust::device, dev_v1, dev_v1+n, dev_v1, sqrt_op);
}
/** @brief d1[i] = -(d1[i]) forall i */
void thrust_negate_kernel(int n, double* d1)
{
thrust::device_ptr<double> dev_v1 = thrust::device_pointer_cast(d1);
thrust::transform(thrust::device, dev_v1, dev_v1+n, dev_v1, thrust::negate<double>());
}
/** @brief compute sum(log(d1[i])) forall i where id[i]=1*/
double log_barr_obj_kernel(int n, double* d1, const double* id)
{
thrust::device_ptr<double> dev_v = thrust::device_pointer_cast(d1);
thrust::device_ptr<const double> id_v = thrust::device_pointer_cast(id);
// wrap raw pointer with a device_ptr
thrust_log_select<double> log_select_op;
thrust::plus<double> plus_op;
thrust::multiplies<double> mult_op;
// TODO: how to avoid this temp vec?
thrust::device_ptr<double> v_temp = thrust::device_malloc(n*sizeof(double));
// compute x*id
thrust::transform(thrust::device, dev_v, dev_v+n, id_v, v_temp, mult_op);
// compute log(y) for y > 0
double sum = thrust::transform_reduce(thrust::device, v_temp, v_temp+n, log_select_op, 0.0, plus_op);
thrust::device_free(v_temp);
return sum;
}
/** @brief compute sum(d1[i]) */
double thrust_sum_kernel(int n, double* d1)
{
thrust::device_ptr<double> dev_v1 = thrust::device_pointer_cast(d1);
// compute sum
return thrust::reduce(thrust::device, dev_v1, dev_v1+n, 0.0, thrust::plus<double>());
}
/** @brief Linear damping term */
double linear_damping_term_kernel(int n,
const double* vd,
const double* ld,
const double* rd,
double mu,
double kappa_d)
{
// TODO: how to avoid this temp vec?
thrust::device_vector<double> v_temp(n);
double* dv_ptr = thrust::raw_pointer_cast(v_temp.data());
// compute linear damping term
hiop::cuda::set_linear_damping_term_kernel(n, dv_ptr, vd, ld, rd);
double term = thrust::reduce(thrust::device, v_temp.begin(), v_temp.end(), 0.0, thrust::plus<double>());
term *= mu;
term *= kappa_d;
return term;
}
/** @brief compute min(d1) */
double min_local_kernel(int n, double* d1)
{
thrust::device_ptr<double> dev_v1 = thrust::device_pointer_cast(d1);
thrust::device_ptr<double> ret_dev_ptr = thrust::min_element(thrust::device, dev_v1, dev_v1+n);
double *ret_ptr = thrust::raw_pointer_cast(ret_dev_ptr);
double *ret_host = new double[1];
cudaError_t cuerr = cudaMemcpy(ret_host, ret_ptr, (1)*sizeof(double), cudaMemcpyDeviceToHost);
double rv = ret_host[0];
delete [] ret_host;
return rv;
}
/** @brief Checks if selected elements of `this` are positive */
int all_positive_w_pattern_kernel(int n, const double* d1, const double* id)
{
// TODO: how to avoid this temp vec?
thrust::device_vector<double> v_temp(n);
double* dv_ptr = thrust::raw_pointer_cast(v_temp.data());
hiop::cuda::is_posive_w_pattern_kernel(n, dv_ptr, d1, id);
return thrust::reduce(thrust::device, v_temp.begin(), v_temp.end(), (int)0, thrust::plus<int>());
}
/** @brief compute min(d1) for selected elements*/
double min_w_pattern_kernel(int n, const double* d1, const double* id, double max_val)
{
// TODO: how to avoid this temp vec?
thrust::device_ptr<double> dv_ptr = thrust::device_malloc(n*sizeof(double));
double* d_ptr = thrust::raw_pointer_cast(dv_ptr);
// set value with pattern
hiop::cuda::set_val_w_pattern_kernel(n, d_ptr, d1, id, max_val);
thrust::device_ptr<double> ret_dev_ptr = thrust::min_element(thrust::device, dv_ptr, dv_ptr+n);
// TODO: how to return double from device to host?
double *ret_host = new double[1];
double *ret_ptr = thrust::raw_pointer_cast(ret_dev_ptr);
cudaError_t cuerr = cudaMemcpy(ret_host, ret_ptr, (1)*sizeof(double), cudaMemcpyDeviceToHost);
double ret_v = ret_host[0];
delete [] ret_host;
thrust::device_free(dv_ptr);
return ret_v;
}
/** @brief check if xld[i] < xud[i] forall i */
bool check_bounds_kernel(int n, const double* xld, const double* xud)
{
// Perform preliminary check to see of all upper value
thrust::minus<double> minus_op;
thrust::device_ptr<double> dev_xud = thrust::device_pointer_cast(const_cast<double*>(xud));
thrust::device_ptr<double> dev_xld = thrust::device_pointer_cast(const_cast<double*>(xld));
// TODO: how to avoid this temp vec?
thrust::device_ptr<double> dv_ptr = thrust::device_malloc(n*sizeof(double));
thrust::transform(thrust::device,
dev_xud, dev_xud+n,
dev_xld, dv_ptr,
minus_op);
int res_offset = thrust::min_element(thrust::device, dv_ptr, dv_ptr + n) - dv_ptr;
double ret_v = *(dv_ptr + res_offset);
bool bval = (ret_v > 0.0) ? 1 : 0;
thrust::device_free(dv_ptr);
if(false == bval)
return false;
return true;
}
/** @brief compute max{a\in(0,1]| x+ad >=(1-tau)x} */
double min_frac_to_bds_kernel(int n, const double* xd, const double* dd, double tau)
{
thrust::device_ptr<double> dv_ptr = thrust::device_malloc(n*sizeof(double));
double* d_ptr = thrust::raw_pointer_cast(dv_ptr);
// set values
hiop::cuda::fraction_to_the_boundry_kernel(n, d_ptr, xd, dd, tau);
int res_offset = thrust::min_element(thrust::device, dv_ptr, dv_ptr+n) - dv_ptr;
double alpha = *(dv_ptr + res_offset);
thrust::device_free(dv_ptr);
return alpha;
}
/** @brief max{a\in(0,1]| x+ad >=(1-tau)x} with pattern id */
double min_frac_to_bds_w_pattern_kernel(int n,
const double* xd,
const double* dd,
const double* id,
double tau)
{
// TODO: how to avoid this temp vec?
thrust::device_vector<double> v_temp(n);
double* dv_ptr = thrust::raw_pointer_cast(v_temp.data());
// set value with pattern
hiop::cuda::fraction_to_the_boundry_w_pattern_kernel(n, dv_ptr, xd, dd, id, tau);
double alpha = *(thrust::min_element(thrust::device, v_temp.begin(), v_temp.end()));
return alpha;
}
/** @brief Checks if `xd` matches nonzero pattern of `id`. */
bool match_pattern_kernel(int n, const double* xd, const double* id)
{
// TODO: how to avoid this temp vec?
thrust::device_vector<int> v_temp(n);
int* dv_ptr = thrust::raw_pointer_cast(v_temp.data());
// check if xd matches the pattern given by id
hiop::cuda::component_match_pattern_kernel(n, dv_ptr, xd, id);
thrust_istrue istrue_op;
return thrust::all_of(thrust::device, v_temp.begin(), v_temp.end(), istrue_op);
}
/** @brief Checks if all x[i] = 0 */
bool is_zero_kernel(int n, double* xd)
{
// wrap raw pointer with a device_ptr
thrust_iszero<double> iszero_op;
thrust::device_ptr<double> dev_v = thrust::device_pointer_cast(xd);
return thrust::all_of(thrust::device, dev_v, dev_v+n, iszero_op);
}
/** @brief Checks if any x[i] = nan */
bool isnan_kernel(int n, double* xd)
{
// wrap raw pointer with a device_ptr
thrust_isnan<double> isnan_op;
thrust::device_ptr<double> dev_v = thrust::device_pointer_cast(xd);
return thrust::any_of(thrust::device, dev_v, dev_v+n, isnan_op);
}
/** @brief Checks if any x[i] = inf */
bool isinf_kernel(int n, double* xd)
{
// wrap raw pointer with a device_ptr
thrust_isinf<double> isinf_op;
thrust::device_ptr<double> dev_v = thrust::device_pointer_cast(xd);
return thrust::any_of(thrust::device, dev_v, dev_v+n, isinf_op);
}
/** @brief Checks if all x[i] != inf */
bool isfinite_kernel(int n, double* xd)
{
// wrap raw pointer with a device_ptr
thrust_isfinite<double> isfinite_op;
thrust::device_ptr<double> dev_v = thrust::device_pointer_cast(xd);
return thrust::all_of(thrust::device, dev_v, dev_v+n, isfinite_op);
}
/// @brief get number of values that are less than the given value 'val'.
int num_of_elem_less_than_kernel(int n, double* xd, double val)
{
thrust::device_ptr<double> dev_v = thrust::device_pointer_cast(xd);
int rval = thrust::transform_reduce(thrust::device, dev_v, dev_v+n, thrust_less(val), (int) 0, thrust::plus<int>());
return rval;
}
/// @brief get number of values whose absolute value are less than the given value 'val'.
int num_of_elem_absless_than_kernel(int n, double* xd, double val)
{
thrust::device_ptr<double> dev_v = thrust::device_pointer_cast(xd);
int rval = thrust::transform_reduce(thrust::device, dev_v, dev_v+n, thrust_abs_less(val), (int) 0, thrust::plus<int>());
return rval;
}
/// @brief Copy the entries in 'dd' where corresponding 'ix' is nonzero, to vd starting at start_index_in_dest.
void copyToStartingAt_w_pattern_kernel(int n_src,
int n_dest,
int start_index_in_dest,
int* nnz_cumsum,
double *vd,
const double* dd)
{
int num_blocks = (n_src+block_size-1)/block_size;
copyToStartingAt_w_pattern_cu<<<num_blocks,block_size>>>(n_src,
n_dest,
start_index_in_dest,
nnz_cumsum,
vd,
dd);
}
/// for hiopVectorIntCuda
/**
* @brief Set the vector entries to be a linear space of starting at i0 containing evenly
* incremented integers up to i0+(n-1)di, when n is the length of this vector
*/
void set_to_linspace_kernel(int sz, int* buf, int i0, int di)
{
int num_blocks = (sz+block_size-1)/block_size;
set_to_linspace_cu<<<num_blocks,block_size>>>(sz, buf, i0, di);
}
/** @brief compute cusum from the given pattern*/
void compute_cusum_kernel(int sz, int* buf, const double* id)
{
int num_blocks = (sz+block_size-1)/block_size;
compute_cusum_cu<<<num_blocks,block_size>>>(sz, buf, id);
thrust::device_ptr<int> dev_v = thrust::device_pointer_cast(buf);
thrust::inclusive_scan(dev_v, dev_v + sz, dev_v); // in-place scan
}
}
}
|
37adeb77286be854edbd3574e7b66997e01e466e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SoftmaxExecution.hpp"
#include "core/TensorUtils.hpp"
namespace MNN {
namespace CUDA {
template <typename T>
__global__ void SOFTMAX(const T *input, T *output,
const int inside,
const int axis,
const int outside,
const int count
) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
int y = i / inside;
int x = i % inside;
const T* src = input + y * axis * inside + x;
T* dst = output + y * axis * inside + x;
float maxValue = (float)src[0];
for (int z=1; z<axis; ++z) {
maxValue = max(maxValue, src[z * inside]);
}
float sumValue = 0.0;
for (int z=0; z<axis; ++z) {
float tmpSub = (float)src[z * inside] - maxValue;
// EXP CUTOFF
tmpSub = ((tmpSub < -87.0) ? -87.0 : tmpSub);
sumValue = sumValue + exp(tmpSub);
}
sumValue = 1.0 / sumValue;
for (int z=0; z<axis; ++z) {
float tmpSub = (float)src[z * inside] - maxValue;
// EXP CUTOFF
tmpSub = ((tmpSub < -87.0) ? -87.0 : tmpSub);
dst[z*inside] = (T)(exp(tmpSub) * sumValue);
}
}
}
template <typename T>
__global__ void SOFTMAX_WARP_32(const T *input, T *output,
const int inside,
const int axis,
const int outside,
const int count
) {
int idx_outside = blockIdx.x / inside;
int idx_inside = blockIdx.x - idx_outside * inside;
auto src = input + idx_outside * axis * inside + idx_inside;
float local_src = -FLT_MAX;
__shared__ float maxValue;
__shared__ float sumValue;
int tid = threadIdx.x;
if(tid < axis) {
local_src = (float)(src[tid * inside]);
}
float maxRes = warpReduceMax<float>(local_src);
if(tid == 0)
maxValue = maxRes;
__syncthreads();
float local_exp = 0.0f;
if(tid < axis) {
float tmpSub = local_src - maxValue;
// EXP CUTOFF
tmpSub = ((tmpSub < -87.0) ? -87.0 : tmpSub);
local_exp = exp(tmpSub);
}
float sumRes = warpReduceSum<float>(local_exp);
if(tid == 0)
sumValue = sumRes;
__syncthreads();
float divSumValue = 1.0 / sumValue;
if(tid < axis) {
output[(idx_outside * axis + tid) * inside + idx_inside] = (T)(local_exp * divSumValue);
}
}
template <typename T>
__global__ void SOFTMAX_AXIS_REDUCE(const T *input, T *output,
const int inside,
const int axis,
const int per_block_size,
const int calc_multi_num,
const int outside,
const int count
) {
int idx_outside = blockIdx.x / inside;
int idx_inside = blockIdx.x - idx_outside * inside;
auto src = input + idx_outside * axis * inside + idx_inside;
auto dst = output + idx_outside * axis * inside + idx_inside;
float local_src = -FLT_MAX;
__shared__ float maxValue;
__shared__ float sumValue;
int tid = threadIdx.x;
for(int i=0; i<calc_multi_num; i++) {
if(tid + i * per_block_size < axis) {
local_src = max(local_src, (float)(src[(tid + i * per_block_size) * inside]));
}
}
float maxRes = blockReduceMax<float>(local_src);
if(tid == 0)
maxValue = maxRes;
__syncthreads();
float local_exp = 0.0f;
for(int i=0; i<calc_multi_num; i++) {
if(tid + i * per_block_size < axis) {
float tmpSub = (float)(src[(tid + i * per_block_size) * inside]) - maxValue;
// EXP CUTOFF
tmpSub = ((tmpSub < -87.0) ? -87.0 : tmpSub);
local_exp += exp(tmpSub);
}
}
float sumRes = blockReduceSum<float>(local_exp);
if(tid == 0)
sumValue = sumRes;
__syncthreads();
float divSumValue = 1.0 / sumValue;
for(int i=0; i<calc_multi_num; i++) {
if(tid + i * per_block_size < axis) {
float tmpSub = (float)(src[(tid + i * per_block_size) * inside]) - maxValue;
// EXP CUTOFF
tmpSub = ((tmpSub < -87.0) ? -87.0 : tmpSub);
float tmp_exp = exp(tmpSub);
dst[(tid + i * per_block_size) * inside] = (T)(tmp_exp * divSumValue);
}
}
}
SoftmaxExecution::SoftmaxExecution(int axis, Backend *backend) : Execution(backend) {
mAxis = axis;
}
SoftmaxExecution::~SoftmaxExecution() {
//
}
ErrorCode SoftmaxExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto input = inputs[0];
const int dimensions = input->buffer().dimensions;
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
int axis = mAxis;
if (axis < 0) {
axis += dimensions;
}
const auto layout = TensorUtils::getDescribe(input)->dimensionFormat;
mNeedUnpackC4 = layout == MNN_DATA_FORMAT_NC4HW4;
if (mNeedUnpackC4) {
TensorUtils::copyShape(input, &mStorage);
TensorUtils::getDescribe(&mStorage)->dimensionFormat = MNN_DATA_FORMAT_NCHW;
mStorage.buffer().dimensions = dimensions;
mStorage.buffer().type = input->getType();
backend()->onAcquireBuffer(&mStorage, Backend::DYNAMIC);
}
int inside = 1;
int outside = 1;
int dims = input->buffer().dimensions;
for (int i = 0; i < axis; ++i) {
outside *= input->length(i);
}
for (int i = axis + 1; i < dims; ++i) {
inside *= input->length(i);
}
if (mNeedUnpackC4) {
backend()->onReleaseBuffer(&mStorage, Backend::DYNAMIC);
}
mCpuParam.inside = inside;
mCpuParam.outside = outside;
mCpuParam.axis = input->length(axis);
// printf("\nsoftmax:%d-%d-%d, %d-%d\n", mCpuParam.inside, mCpuParam.outside, mCpuParam.axis, mNeedUnpackC4, axis);
return NO_ERROR;
}
ErrorCode SoftmaxExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto input = (void*)inputs[0]->deviceId();
auto output = (void*)outputs[0]->deviceId();
auto dst = output;
if (mNeedUnpackC4) {
backend()->onCopyBuffer(inputs[0], &mStorage);
input = (void*)mStorage.deviceId();
dst = (void*)mStorage.deviceId();
}
//MNN_PRINT("softmax input dims:%d, size:%d-%d-%d-%d\n", inputs[0]->dimensions(), inputs[0]->batch(), inputs[0]->height(), inputs[0]->width(), inputs[0]->channel());
//MNN_PRINT("softmax storage dims:%d, size:%d-%d-%d-%d\n", mStorage.dimensions(), mStorage.batch(), mStorage.height(), mStorage.width(), mStorage.channel());
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
int inside = mCpuParam.inside;
int outside = mCpuParam.outside;
int axis = mCpuParam.axis;
int count = inside * outside;
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
if (static_cast<CUDABackend*>(backend())->useFp16()) {
if(axis % 256 == 0 || axis >= 768) {
block_num = count;
int calc_multi_num = (axis + 255) / 256;
hipLaunchKernelGGL(( SOFTMAX_AXIS_REDUCE), dim3(block_num), dim3(256), 0, 0, (const half*)input, (half*)dst, inside, axis, 256, calc_multi_num, outside, count);
checkKernelErrors;
} else if(axis % 64 == 0 || axis > 32) {
block_num = count;
int calc_multi_num = (axis + 63) / 64;
hipLaunchKernelGGL(( SOFTMAX_AXIS_REDUCE), dim3(block_num), dim3(64), 0, 0, (const half*)input, (half*)dst, inside, axis, 64, calc_multi_num, outside, count);
checkKernelErrors;
} else if(axis <= 32) {
threads_num = 32;
block_num = count;
hipLaunchKernelGGL(( SOFTMAX_WARP_32), dim3(block_num), dim3(threads_num), 0, 0, (const half*)input, (half*)dst, inside, axis, outside, count);
checkKernelErrors;
} else {
hipLaunchKernelGGL(( SOFTMAX), dim3(block_num), dim3(threads_num), 0, 0, (const half*)input, (half*)dst, inside, axis, outside, count);
checkKernelErrors;
}
} else {
if(axis % 256 == 0 || axis >= 768) {
block_num = count;
int calc_multi_num = (axis + 255) / 256;
hipLaunchKernelGGL(( SOFTMAX_AXIS_REDUCE), dim3(block_num), dim3(256), 0, 0, (const float*)input, (float*)dst, inside, axis, 256, calc_multi_num, outside, count);
checkKernelErrors;
} else if(axis % 64 == 0 || axis > 32) {
block_num = count;
int calc_multi_num = (axis + 63) / 64;
hipLaunchKernelGGL(( SOFTMAX_AXIS_REDUCE), dim3(block_num), dim3(64), 0, 0, (const float*)input, (float*)dst, inside, axis, 64, calc_multi_num, outside, count);
checkKernelErrors;
} else if(axis <= 32) {
block_num = count;
threads_num = 32;
hipLaunchKernelGGL(( SOFTMAX_WARP_32), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)dst, inside, axis, outside, count);
checkKernelErrors;
} else {
hipLaunchKernelGGL(( SOFTMAX), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)dst, inside, axis, outside, count);
checkKernelErrors;
}
}
if (mNeedUnpackC4) {
backend()->onCopyBuffer(&mStorage, outputs[0]);
}
return NO_ERROR;
}
class SoftmaxCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
auto type = inputs[0]->getType();
if (type.code != halide_type_float) {
MNN_PRINT("softmax data type:%s not support", type.code);
return nullptr;
}
auto axis = op->main_as_Axis()->axis();
return new SoftmaxExecution(axis, backend);
}
};
static CUDACreatorRegister<SoftmaxCreator> __init(OpType_Softmax);
}
}
| 37adeb77286be854edbd3574e7b66997e01e466e.cu | #include "SoftmaxExecution.hpp"
#include "core/TensorUtils.hpp"
namespace MNN {
namespace CUDA {
template <typename T>
__global__ void SOFTMAX(const T *input, T *output,
const int inside,
const int axis,
const int outside,
const int count
) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
int y = i / inside;
int x = i % inside;
const T* src = input + y * axis * inside + x;
T* dst = output + y * axis * inside + x;
float maxValue = (float)src[0];
for (int z=1; z<axis; ++z) {
maxValue = max(maxValue, src[z * inside]);
}
float sumValue = 0.0;
for (int z=0; z<axis; ++z) {
float tmpSub = (float)src[z * inside] - maxValue;
// EXP CUTOFF
tmpSub = ((tmpSub < -87.0) ? -87.0 : tmpSub);
sumValue = sumValue + exp(tmpSub);
}
sumValue = 1.0 / sumValue;
for (int z=0; z<axis; ++z) {
float tmpSub = (float)src[z * inside] - maxValue;
// EXP CUTOFF
tmpSub = ((tmpSub < -87.0) ? -87.0 : tmpSub);
dst[z*inside] = (T)(exp(tmpSub) * sumValue);
}
}
}
template <typename T>
__global__ void SOFTMAX_WARP_32(const T *input, T *output,
const int inside,
const int axis,
const int outside,
const int count
) {
int idx_outside = blockIdx.x / inside;
int idx_inside = blockIdx.x - idx_outside * inside;
auto src = input + idx_outside * axis * inside + idx_inside;
float local_src = -FLT_MAX;
__shared__ float maxValue;
__shared__ float sumValue;
int tid = threadIdx.x;
if(tid < axis) {
local_src = (float)(src[tid * inside]);
}
float maxRes = warpReduceMax<float>(local_src);
if(tid == 0)
maxValue = maxRes;
__syncthreads();
float local_exp = 0.0f;
if(tid < axis) {
float tmpSub = local_src - maxValue;
// EXP CUTOFF
tmpSub = ((tmpSub < -87.0) ? -87.0 : tmpSub);
local_exp = exp(tmpSub);
}
float sumRes = warpReduceSum<float>(local_exp);
if(tid == 0)
sumValue = sumRes;
__syncthreads();
float divSumValue = 1.0 / sumValue;
if(tid < axis) {
output[(idx_outside * axis + tid) * inside + idx_inside] = (T)(local_exp * divSumValue);
}
}
template <typename T>
__global__ void SOFTMAX_AXIS_REDUCE(const T *input, T *output,
const int inside,
const int axis,
const int per_block_size,
const int calc_multi_num,
const int outside,
const int count
) {
int idx_outside = blockIdx.x / inside;
int idx_inside = blockIdx.x - idx_outside * inside;
auto src = input + idx_outside * axis * inside + idx_inside;
auto dst = output + idx_outside * axis * inside + idx_inside;
float local_src = -FLT_MAX;
__shared__ float maxValue;
__shared__ float sumValue;
int tid = threadIdx.x;
for(int i=0; i<calc_multi_num; i++) {
if(tid + i * per_block_size < axis) {
local_src = max(local_src, (float)(src[(tid + i * per_block_size) * inside]));
}
}
float maxRes = blockReduceMax<float>(local_src);
if(tid == 0)
maxValue = maxRes;
__syncthreads();
float local_exp = 0.0f;
for(int i=0; i<calc_multi_num; i++) {
if(tid + i * per_block_size < axis) {
float tmpSub = (float)(src[(tid + i * per_block_size) * inside]) - maxValue;
// EXP CUTOFF
tmpSub = ((tmpSub < -87.0) ? -87.0 : tmpSub);
local_exp += exp(tmpSub);
}
}
float sumRes = blockReduceSum<float>(local_exp);
if(tid == 0)
sumValue = sumRes;
__syncthreads();
float divSumValue = 1.0 / sumValue;
for(int i=0; i<calc_multi_num; i++) {
if(tid + i * per_block_size < axis) {
float tmpSub = (float)(src[(tid + i * per_block_size) * inside]) - maxValue;
// EXP CUTOFF
tmpSub = ((tmpSub < -87.0) ? -87.0 : tmpSub);
float tmp_exp = exp(tmpSub);
dst[(tid + i * per_block_size) * inside] = (T)(tmp_exp * divSumValue);
}
}
}
SoftmaxExecution::SoftmaxExecution(int axis, Backend *backend) : Execution(backend) {
mAxis = axis;
}
SoftmaxExecution::~SoftmaxExecution() {
//
}
ErrorCode SoftmaxExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto input = inputs[0];
const int dimensions = input->buffer().dimensions;
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
int axis = mAxis;
if (axis < 0) {
axis += dimensions;
}
const auto layout = TensorUtils::getDescribe(input)->dimensionFormat;
mNeedUnpackC4 = layout == MNN_DATA_FORMAT_NC4HW4;
if (mNeedUnpackC4) {
TensorUtils::copyShape(input, &mStorage);
TensorUtils::getDescribe(&mStorage)->dimensionFormat = MNN_DATA_FORMAT_NCHW;
mStorage.buffer().dimensions = dimensions;
mStorage.buffer().type = input->getType();
backend()->onAcquireBuffer(&mStorage, Backend::DYNAMIC);
}
int inside = 1;
int outside = 1;
int dims = input->buffer().dimensions;
for (int i = 0; i < axis; ++i) {
outside *= input->length(i);
}
for (int i = axis + 1; i < dims; ++i) {
inside *= input->length(i);
}
if (mNeedUnpackC4) {
backend()->onReleaseBuffer(&mStorage, Backend::DYNAMIC);
}
mCpuParam.inside = inside;
mCpuParam.outside = outside;
mCpuParam.axis = input->length(axis);
// printf("\nsoftmax:%d-%d-%d, %d-%d\n", mCpuParam.inside, mCpuParam.outside, mCpuParam.axis, mNeedUnpackC4, axis);
return NO_ERROR;
}
ErrorCode SoftmaxExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto input = (void*)inputs[0]->deviceId();
auto output = (void*)outputs[0]->deviceId();
auto dst = output;
if (mNeedUnpackC4) {
backend()->onCopyBuffer(inputs[0], &mStorage);
input = (void*)mStorage.deviceId();
dst = (void*)mStorage.deviceId();
}
//MNN_PRINT("softmax input dims:%d, size:%d-%d-%d-%d\n", inputs[0]->dimensions(), inputs[0]->batch(), inputs[0]->height(), inputs[0]->width(), inputs[0]->channel());
//MNN_PRINT("softmax storage dims:%d, size:%d-%d-%d-%d\n", mStorage.dimensions(), mStorage.batch(), mStorage.height(), mStorage.width(), mStorage.channel());
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
int inside = mCpuParam.inside;
int outside = mCpuParam.outside;
int axis = mCpuParam.axis;
int count = inside * outside;
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
if (static_cast<CUDABackend*>(backend())->useFp16()) {
if(axis % 256 == 0 || axis >= 768) {
block_num = count;
int calc_multi_num = (axis + 255) / 256;
SOFTMAX_AXIS_REDUCE<<<block_num, 256>>>((const half*)input, (half*)dst, inside, axis, 256, calc_multi_num, outside, count);
checkKernelErrors;
} else if(axis % 64 == 0 || axis > 32) {
block_num = count;
int calc_multi_num = (axis + 63) / 64;
SOFTMAX_AXIS_REDUCE<<<block_num, 64>>>((const half*)input, (half*)dst, inside, axis, 64, calc_multi_num, outside, count);
checkKernelErrors;
} else if(axis <= 32) {
threads_num = 32;
block_num = count;
SOFTMAX_WARP_32<<<block_num, threads_num>>>((const half*)input, (half*)dst, inside, axis, outside, count);
checkKernelErrors;
} else {
SOFTMAX<<<block_num, threads_num>>>((const half*)input, (half*)dst, inside, axis, outside, count);
checkKernelErrors;
}
} else {
if(axis % 256 == 0 || axis >= 768) {
block_num = count;
int calc_multi_num = (axis + 255) / 256;
SOFTMAX_AXIS_REDUCE<<<block_num, 256>>>((const float*)input, (float*)dst, inside, axis, 256, calc_multi_num, outside, count);
checkKernelErrors;
} else if(axis % 64 == 0 || axis > 32) {
block_num = count;
int calc_multi_num = (axis + 63) / 64;
SOFTMAX_AXIS_REDUCE<<<block_num, 64>>>((const float*)input, (float*)dst, inside, axis, 64, calc_multi_num, outside, count);
checkKernelErrors;
} else if(axis <= 32) {
block_num = count;
threads_num = 32;
SOFTMAX_WARP_32<<<block_num, threads_num>>>((const float*)input, (float*)dst, inside, axis, outside, count);
checkKernelErrors;
} else {
SOFTMAX<<<block_num, threads_num>>>((const float*)input, (float*)dst, inside, axis, outside, count);
checkKernelErrors;
}
}
if (mNeedUnpackC4) {
backend()->onCopyBuffer(&mStorage, outputs[0]);
}
return NO_ERROR;
}
class SoftmaxCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
auto type = inputs[0]->getType();
if (type.code != halide_type_float) {
MNN_PRINT("softmax data type:%s not support", type.code);
return nullptr;
}
auto axis = op->main_as_Axis()->axis();
return new SoftmaxExecution(axis, backend);
}
};
static CUDACreatorRegister<SoftmaxCreator> __init(OpType_Softmax);
}
}
|
d49d527005a85b112fb9bbe1371f563856a2119c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <cstring>
#include <algorithm>
#include <cudnn.h>
#include "common.h"
/**
* Compute the gradient of the convolution w.r.t. the input data.
*/
void ConvolutionBackwardDataHost(float *dy, int batch_size, int c_out, int h_out, int w_out,
float *w, int c_in, int kernel_h, int kernel_w, int stride_h, int stride_w, float *dx,
int h_in, int w_in) {
// Set the output to zero so we can accumulate
memset(dx, 0, batch_size*c_in*h_in*w_in*sizeof(float));
for (int i = 0; i < batch_size; ++i) {
for (int m = 0; m < c_in; ++m) {
for (int p = 0; p < h_in; ++p) {
for (int q = 0; q < w_in; ++q) {
// cout << "calculating index: " << p << ", " << q << endl;
int k_start = max(0, (int)floor((p - kernel_h) / float(stride_h)) + 1);
int k_end = min(h_out, p / stride_h + 1);
int l_start = max(0, (int)floor((q - kernel_w) / float(stride_w)) + 1);
int l_end = min(w_out, q / stride_w + 1);
// cout << "iter on k: " << k_start << " -> " << k_end << endl;
// cout << "iter on l: " << l_start << " -> " << l_end << endl;
// compute the gradient of this input pixel
for (int j = 0; j < c_out; ++j) {
for (int k = k_start; k < k_end; ++k) {
for (int l = l_start; l < l_end; ++l) {
// cout << "accumulating: " <<
// dy[i*c_out*h_out*w_out + j*h_out*w_out + k*w_out + l];
// cout << " * " <<
// w[j*c_in*kernel_h*kernel_w + m*kernel_h*kernel_w +
// (p - (k*stride_h))*kernel_w + (q - (l * stride_w))]
// << " = " << dy[i*c_out*h_out*w_out + j*h_out*w_out + k*w_out + l] *
// w[j*c_in*kernel_h*kernel_w + m*kernel_h*kernel_w +
// (p - (k*stride_h))*kernel_w + (q - (l * stride_w))] << endl;
// cout << "filter index: " << p - (k*stride_h) << ", " << q - (l*stride_w) << endl;
dx[i*c_in*h_in*w_in + m*h_in*w_in + p*w_in + q] +=
dy[i*c_out*h_out*w_out + j*h_out*w_out + k*w_out + l] *
w[j*c_in*kernel_h*kernel_w + m*kernel_h*kernel_w +
(p - (k*stride_h))*kernel_w + (q - (l * stride_w))];
}
}
}
}
}
}
}
}
__global__ void ConvolutionBackwardDataKernel(float *dy, int batch_size, int c_out,
int h_out, int w_out, float *w, int c_in, int kernel_h, int kernel_w, int stride_h,
int stride_w, float *dx, int h_in, int w_in) {
int i = blockIdx.x; // sample index
int m = blockIdx.y; // channel_in index
for (int p = threadIdx.y; p < h_in; p += blockDim.y) {
for (int q = threadIdx.x; q < w_in; q += blockDim.x) {
int k_start = max(0, (int)floor((p - kernel_h) / float(stride_h)) + 1);
int k_end = min(h_out, p / stride_h + 1);
int l_start = max(0, (int)floor((q - kernel_w) / float(stride_w)) + 1);
int l_end = min(w_out, q / stride_w + 1);
// compute the gradient of this input pixel
for (int j = 0; j < c_out; ++j) {
for (int k = k_start; k < k_end; ++k) {
for (int l = l_start; l < l_end; ++l) {
dx[i*c_in*h_in*w_in + m*h_in*w_in + p*w_in + q] +=
dy[i*c_out*h_out*w_out + j*h_out*w_out + k*w_out + l] *
w[j*c_in*kernel_h*kernel_w + m*kernel_h*kernel_w +
(p - (k*stride_h))*kernel_w + (q - (l * stride_w))];
}
}
}
}
}
}
void ConvolutionBackwardData(float *dy, int batch_size, int c_out, int h_out, int w_out,
float *w, int c_in, int kernel_h, int kernel_w, int stride_h, int stride_w, float *dx,
int h_in, int w_in, hipStream_t stream) {
CUDA_CALL(hipMemsetAsync(dx, 0, batch_size*c_in*h_in*w_in*sizeof(float), stream));
hipLaunchKernelGGL(( ConvolutionBackwardDataKernel), dim3(dim3(batch_size, c_in)), dim3(dim3(32, 32)), 0, stream,
dy, batch_size, c_out, h_out, w_out, w, c_in, kernel_h, kernel_w, stride_h, stride_w,
dx, h_in, w_in);
}
int main() {
cudnnHandle_t handle;
CUDNN_CALL(cudnnCreate(&handle));
hipStream_t stream;
CUDA_CALL(hipStreamCreate(&stream));
CUDNN_CALL(cudnnSetStream(handle, stream));
//
/// Set dimensions for the convolution
//
// Kernel dims - we don't support padding in this example
int kernel_h = 3;
int kernel_w = 3;
int pad_h = 0;
int pad_w = 0;
int stride_h = 1;
int stride_w = 1;
// Input dims
int n = 32;
int h_in = 227;
int w_in = 227;
int c_in = 3;
// Output dims
int h_out = (h_in + 2*pad_h - kernel_h) / stride_h + 1;
int w_out = (w_in + 2*pad_w - kernel_w) / stride_w + 1;
int c_out = 32;
//
/// Setup data & filters for the convolution
//
int filter_size = c_out*c_in*kernel_h*kernel_w;
int image_grad_size = n*c_in*h_in*w_in;
int output_grad_size = n*c_out*h_out*w_out;
float *filters = new float[filter_size];
float *image_grad = new float[image_grad_size];
float *output_grad = new float[output_grad_size];
SetIncremental(filters, filter_size);
SetIncremental(output_grad, output_grad_size);
#ifdef DEBUG
cout << "Filters: ";
PrintTensor(filters, c_out, c_in, kernel_h, kernel_w);
cout << "Output Grad: ";
PrintTensor(output_grad, n, c_out, h_out, w_out);
#endif
// Setup device version of input, output, and filters
float *filters_on_device, *image_grad_on_device, *output_grad_on_device;
CUDA_CALL(hipMalloc(&filters_on_device, filter_size*sizeof(float)));
CUDA_CALL(hipMalloc(&image_grad_on_device, image_grad_size*sizeof(float)));
CUDA_CALL(hipMalloc(&output_grad_on_device, output_grad_size*sizeof(float)));
CUDA_CALL(hipMemcpy(
filters_on_device,
filters, filter_size*sizeof(float),
hipMemcpyHostToDevice
));
CUDA_CALL(hipMemcpy(
output_grad_on_device,
output_grad, output_grad_size*sizeof(float),
hipMemcpyHostToDevice
));
//
/// Setup parameters for cudnn call
//
// Setup alpha/beta
float alpha = 1.f, beta = 0.f;
// Setup input and output grad tensor descriptors
cudnnTensorDescriptor_t dy_desc, dx_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc));
CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(dy_desc,
CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
n, c_out, h_out, w_out));
CUDNN_CALL(cudnnSetTensor4dDescriptor(dx_desc,
CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
n, c_in, h_in, w_in));
// Setup filter descriptor
cudnnFilterDescriptor_t w_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&w_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(w_desc,
CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
c_out, c_in, kernel_h, kernel_w));
// Setup convolution meta-data
cudnnConvolutionDescriptor_t conv_desc;
cudnnConvolutionBwdDataAlgo_t conv_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(conv_desc,
pad_h, pad_w, stride_h, stride_w, 1, 1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
// Setup & allocate workspace
size_t workspace_size = 0;
void *workspace_on_device;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(handle, w_desc,
dy_desc, conv_desc, dx_desc, conv_algo, &workspace_size));
CUDA_CALL(hipMalloc(&workspace_on_device, workspace_size));
// Run the backward convolution w.r.t. the data
auto t1 = GetTime();
CUDNN_CALL(cudnnConvolutionBackwardData(
handle,
&alpha,
w_desc,
filters_on_device,
dy_desc,
output_grad_on_device,
conv_desc,
conv_algo,
workspace_on_device,
workspace_size,
&beta,
dx_desc,
image_grad_on_device
));
CUDA_CALL(hipStreamSynchronize(stream));
float total_seconds = ElapsedTime(t1, GetTime());
cout << "CUDNN FPS: " << n / total_seconds << endl;
#ifdef DEBUG
cout << "Device Data Grad:";
PrintTensor(image_grad_on_device, n, c_in, h_in, w_in);
#endif
// Do the host-side backward convolution w.r.t. the data
t1 = GetTime();
ConvolutionBackwardDataHost(
output_grad,
n,
c_out,
h_out,
w_out,
filters,
c_in,
kernel_h,
kernel_w,
stride_h,
stride_w,
image_grad,
h_in,
w_in
);
total_seconds = ElapsedTime(t1, GetTime());
cout << "Host FPS: " << n / total_seconds << endl;
#ifdef DEBUG
cout << "Data Grad: ";
PrintTensor(image_grad, n, c_in, h_in, w_in);
#endif
// Verify the results
VerifyResults(image_grad, image_grad_on_device, n*c_in*h_in*w_in);
// Do the device-side backward convolution w.r.t. the data
t1 = GetTime();
ConvolutionBackwardData(
output_grad_on_device,
n,
c_out,
h_out,
w_out,
filters_on_device,
c_in,
kernel_h,
kernel_w,
stride_h,
stride_w,
image_grad_on_device,
h_in,
w_in,
stream
);
CUDA_CALL(hipStreamSynchronize(stream));
total_seconds = ElapsedTime(t1, GetTime());
cout << "Device FPS: " << n / total_seconds << endl;
// Verify the results
VerifyResults(image_grad, image_grad_on_device, n*c_in*h_in*w_in);
// clean up
delete[] filters;
delete[] image_grad;
delete[] output_grad;
CUDA_CALL(hipFree(workspace_on_device));
CUDA_CALL(hipFree(filters_on_device));
CUDA_CALL(hipFree(image_grad_on_device));
CUDA_CALL(hipFree(output_grad_on_device));
CUDNN_CALL(cudnnDestroy(handle));
}
| d49d527005a85b112fb9bbe1371f563856a2119c.cu | #include <cmath>
#include <cstring>
#include <algorithm>
#include <cudnn.h>
#include "common.h"
/**
* Compute the gradient of the convolution w.r.t. the input data.
*/
void ConvolutionBackwardDataHost(float *dy, int batch_size, int c_out, int h_out, int w_out,
float *w, int c_in, int kernel_h, int kernel_w, int stride_h, int stride_w, float *dx,
int h_in, int w_in) {
// Set the output to zero so we can accumulate
memset(dx, 0, batch_size*c_in*h_in*w_in*sizeof(float));
for (int i = 0; i < batch_size; ++i) {
for (int m = 0; m < c_in; ++m) {
for (int p = 0; p < h_in; ++p) {
for (int q = 0; q < w_in; ++q) {
// cout << "calculating index: " << p << ", " << q << endl;
int k_start = max(0, (int)floor((p - kernel_h) / float(stride_h)) + 1);
int k_end = min(h_out, p / stride_h + 1);
int l_start = max(0, (int)floor((q - kernel_w) / float(stride_w)) + 1);
int l_end = min(w_out, q / stride_w + 1);
// cout << "iter on k: " << k_start << " -> " << k_end << endl;
// cout << "iter on l: " << l_start << " -> " << l_end << endl;
// compute the gradient of this input pixel
for (int j = 0; j < c_out; ++j) {
for (int k = k_start; k < k_end; ++k) {
for (int l = l_start; l < l_end; ++l) {
// cout << "accumulating: " <<
// dy[i*c_out*h_out*w_out + j*h_out*w_out + k*w_out + l];
// cout << " * " <<
// w[j*c_in*kernel_h*kernel_w + m*kernel_h*kernel_w +
// (p - (k*stride_h))*kernel_w + (q - (l * stride_w))]
// << " = " << dy[i*c_out*h_out*w_out + j*h_out*w_out + k*w_out + l] *
// w[j*c_in*kernel_h*kernel_w + m*kernel_h*kernel_w +
// (p - (k*stride_h))*kernel_w + (q - (l * stride_w))] << endl;
// cout << "filter index: " << p - (k*stride_h) << ", " << q - (l*stride_w) << endl;
dx[i*c_in*h_in*w_in + m*h_in*w_in + p*w_in + q] +=
dy[i*c_out*h_out*w_out + j*h_out*w_out + k*w_out + l] *
w[j*c_in*kernel_h*kernel_w + m*kernel_h*kernel_w +
(p - (k*stride_h))*kernel_w + (q - (l * stride_w))];
}
}
}
}
}
}
}
}
__global__ void ConvolutionBackwardDataKernel(float *dy, int batch_size, int c_out,
int h_out, int w_out, float *w, int c_in, int kernel_h, int kernel_w, int stride_h,
int stride_w, float *dx, int h_in, int w_in) {
int i = blockIdx.x; // sample index
int m = blockIdx.y; // channel_in index
for (int p = threadIdx.y; p < h_in; p += blockDim.y) {
for (int q = threadIdx.x; q < w_in; q += blockDim.x) {
int k_start = max(0, (int)floor((p - kernel_h) / float(stride_h)) + 1);
int k_end = min(h_out, p / stride_h + 1);
int l_start = max(0, (int)floor((q - kernel_w) / float(stride_w)) + 1);
int l_end = min(w_out, q / stride_w + 1);
// compute the gradient of this input pixel
for (int j = 0; j < c_out; ++j) {
for (int k = k_start; k < k_end; ++k) {
for (int l = l_start; l < l_end; ++l) {
dx[i*c_in*h_in*w_in + m*h_in*w_in + p*w_in + q] +=
dy[i*c_out*h_out*w_out + j*h_out*w_out + k*w_out + l] *
w[j*c_in*kernel_h*kernel_w + m*kernel_h*kernel_w +
(p - (k*stride_h))*kernel_w + (q - (l * stride_w))];
}
}
}
}
}
}
void ConvolutionBackwardData(float *dy, int batch_size, int c_out, int h_out, int w_out,
float *w, int c_in, int kernel_h, int kernel_w, int stride_h, int stride_w, float *dx,
int h_in, int w_in, cudaStream_t stream) {
CUDA_CALL(cudaMemsetAsync(dx, 0, batch_size*c_in*h_in*w_in*sizeof(float), stream));
ConvolutionBackwardDataKernel<<<dim3(batch_size, c_in), dim3(32, 32), 0, stream>>>(
dy, batch_size, c_out, h_out, w_out, w, c_in, kernel_h, kernel_w, stride_h, stride_w,
dx, h_in, w_in);
}
int main() {
cudnnHandle_t handle;
CUDNN_CALL(cudnnCreate(&handle));
cudaStream_t stream;
CUDA_CALL(cudaStreamCreate(&stream));
CUDNN_CALL(cudnnSetStream(handle, stream));
//
/// Set dimensions for the convolution
//
// Kernel dims - we don't support padding in this example
int kernel_h = 3;
int kernel_w = 3;
int pad_h = 0;
int pad_w = 0;
int stride_h = 1;
int stride_w = 1;
// Input dims
int n = 32;
int h_in = 227;
int w_in = 227;
int c_in = 3;
// Output dims
int h_out = (h_in + 2*pad_h - kernel_h) / stride_h + 1;
int w_out = (w_in + 2*pad_w - kernel_w) / stride_w + 1;
int c_out = 32;
//
/// Setup data & filters for the convolution
//
int filter_size = c_out*c_in*kernel_h*kernel_w;
int image_grad_size = n*c_in*h_in*w_in;
int output_grad_size = n*c_out*h_out*w_out;
float *filters = new float[filter_size];
float *image_grad = new float[image_grad_size];
float *output_grad = new float[output_grad_size];
SetIncremental(filters, filter_size);
SetIncremental(output_grad, output_grad_size);
#ifdef DEBUG
cout << "Filters: ";
PrintTensor(filters, c_out, c_in, kernel_h, kernel_w);
cout << "Output Grad: ";
PrintTensor(output_grad, n, c_out, h_out, w_out);
#endif
// Setup device version of input, output, and filters
float *filters_on_device, *image_grad_on_device, *output_grad_on_device;
CUDA_CALL(cudaMalloc(&filters_on_device, filter_size*sizeof(float)));
CUDA_CALL(cudaMalloc(&image_grad_on_device, image_grad_size*sizeof(float)));
CUDA_CALL(cudaMalloc(&output_grad_on_device, output_grad_size*sizeof(float)));
CUDA_CALL(cudaMemcpy(
filters_on_device,
filters, filter_size*sizeof(float),
cudaMemcpyHostToDevice
));
CUDA_CALL(cudaMemcpy(
output_grad_on_device,
output_grad, output_grad_size*sizeof(float),
cudaMemcpyHostToDevice
));
//
/// Setup parameters for cudnn call
//
// Setup alpha/beta
float alpha = 1.f, beta = 0.f;
// Setup input and output grad tensor descriptors
cudnnTensorDescriptor_t dy_desc, dx_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc));
CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(dy_desc,
CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
n, c_out, h_out, w_out));
CUDNN_CALL(cudnnSetTensor4dDescriptor(dx_desc,
CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
n, c_in, h_in, w_in));
// Setup filter descriptor
cudnnFilterDescriptor_t w_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&w_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(w_desc,
CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
c_out, c_in, kernel_h, kernel_w));
// Setup convolution meta-data
cudnnConvolutionDescriptor_t conv_desc;
cudnnConvolutionBwdDataAlgo_t conv_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(conv_desc,
pad_h, pad_w, stride_h, stride_w, 1, 1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
// Setup & allocate workspace
size_t workspace_size = 0;
void *workspace_on_device;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(handle, w_desc,
dy_desc, conv_desc, dx_desc, conv_algo, &workspace_size));
CUDA_CALL(cudaMalloc(&workspace_on_device, workspace_size));
// Run the backward convolution w.r.t. the data
auto t1 = GetTime();
CUDNN_CALL(cudnnConvolutionBackwardData(
handle,
&alpha,
w_desc,
filters_on_device,
dy_desc,
output_grad_on_device,
conv_desc,
conv_algo,
workspace_on_device,
workspace_size,
&beta,
dx_desc,
image_grad_on_device
));
CUDA_CALL(cudaStreamSynchronize(stream));
float total_seconds = ElapsedTime(t1, GetTime());
cout << "CUDNN FPS: " << n / total_seconds << endl;
#ifdef DEBUG
cout << "Device Data Grad:";
PrintTensor(image_grad_on_device, n, c_in, h_in, w_in);
#endif
// Do the host-side backward convolution w.r.t. the data
t1 = GetTime();
ConvolutionBackwardDataHost(
output_grad,
n,
c_out,
h_out,
w_out,
filters,
c_in,
kernel_h,
kernel_w,
stride_h,
stride_w,
image_grad,
h_in,
w_in
);
total_seconds = ElapsedTime(t1, GetTime());
cout << "Host FPS: " << n / total_seconds << endl;
#ifdef DEBUG
cout << "Data Grad: ";
PrintTensor(image_grad, n, c_in, h_in, w_in);
#endif
// Verify the results
VerifyResults(image_grad, image_grad_on_device, n*c_in*h_in*w_in);
// Do the device-side backward convolution w.r.t. the data
t1 = GetTime();
ConvolutionBackwardData(
output_grad_on_device,
n,
c_out,
h_out,
w_out,
filters_on_device,
c_in,
kernel_h,
kernel_w,
stride_h,
stride_w,
image_grad_on_device,
h_in,
w_in,
stream
);
CUDA_CALL(cudaStreamSynchronize(stream));
total_seconds = ElapsedTime(t1, GetTime());
cout << "Device FPS: " << n / total_seconds << endl;
// Verify the results
VerifyResults(image_grad, image_grad_on_device, n*c_in*h_in*w_in);
// clean up
delete[] filters;
delete[] image_grad;
delete[] output_grad;
CUDA_CALL(cudaFree(workspace_on_device));
CUDA_CALL(cudaFree(filters_on_device));
CUDA_CALL(cudaFree(image_grad_on_device));
CUDA_CALL(cudaFree(output_grad_on_device));
CUDNN_CALL(cudnnDestroy(handle));
}
|
76db3cd2b92cca77a8bd0aa7fe0bf0b444b63591.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
#include "stdlib.h"
#include <hip/hip_runtime.h>
#define SIZE 10000
__global__
void dotProdKernel(int *A, int *B, int *C, int N){
int i = blockIdx.x * blockDim.x + threadIdx.x;
*C += A[i] * B[i];
}
void dotProd_CPU(int *A, int *B, int *C, int N){
for (int i = 0; i < N; i++){
*C += A[i] * B[i];
}
}
int main() {
int nBytes = SIZE * sizeof(int);
int *first = (int*) malloc(nBytes);
int *second = (int*) malloc(nBytes);
int *result = (int*) malloc(sizeof(int));
*result = 0;
int block_size, block_no;
block_size = 250; //threads per block
block_no = SIZE/block_size;
//Data filling:
for (int i = 0; i < SIZE; i++){
first[i] = i;
second[i] = i;
}
int *first_gpu;
int *second_gpu;
int *result_gpu;
printf("Allocating device memory on host..\n");
//GPU memory allocation
hipMalloc((void **) &first_gpu, nBytes);
hipMalloc((void **) &second_gpu, nBytes);
hipMalloc((void **) &result_gpu, sizeof(int));
//Work definition////////////////////
dim3 dimBlock(block_size, 1, 1);
dim3 dimGrid(block_no, 1, 1);
/////////////////////////////////////
printf("Copying to device..\n");
hipMemcpy(first_gpu, first, nBytes, hipMemcpyHostToDevice);
hipMemcpy(second_gpu, second, nBytes, hipMemcpyHostToDevice);
hipMemcpy(result_gpu, result, sizeof(int), hipMemcpyHostToDevice);
clock_t start_d = clock();
printf("Doing GPU Matrix Multiplication\n");
hipLaunchKernelGGL(( dotProdKernel), dim3(block_no),dim3(block_size), 0, 0, first_gpu, second_gpu, result_gpu, SIZE);
//cudaCheckError();
clock_t end_d = clock();
//Wait for kernel call to finish
hipDeviceSynchronize();
//Copying data back to host, this is a blocking call and will not start until all kernels are finished
hipMemcpy(result, result_gpu, sizeof(int), hipMemcpyDeviceToHost);
double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC;
printf("Time it took on GPU: %f", time_d);
printf("Doing work on CPU \n");
clock_t start = clock();
dotProd_CPU(first,second,result,SIZE);
clock_t end = clock();
double time = (double)(end-start)/CLOCKS_PER_SEC;
printf("Time it took on CPU: %f", time);
//Free GPU memory
hipFree(first_gpu);
hipFree(second_gpu);
hipFree(result_gpu);
return 0;
}
| 76db3cd2b92cca77a8bd0aa7fe0bf0b444b63591.cu | #include "stdio.h"
#include "stdlib.h"
#include <cuda.h>
#define SIZE 10000
__global__
void dotProdKernel(int *A, int *B, int *C, int N){
int i = blockIdx.x * blockDim.x + threadIdx.x;
*C += A[i] * B[i];
}
void dotProd_CPU(int *A, int *B, int *C, int N){
for (int i = 0; i < N; i++){
*C += A[i] * B[i];
}
}
int main() {
int nBytes = SIZE * sizeof(int);
int *first = (int*) malloc(nBytes);
int *second = (int*) malloc(nBytes);
int *result = (int*) malloc(sizeof(int));
*result = 0;
int block_size, block_no;
block_size = 250; //threads per block
block_no = SIZE/block_size;
//Data filling:
for (int i = 0; i < SIZE; i++){
first[i] = i;
second[i] = i;
}
int *first_gpu;
int *second_gpu;
int *result_gpu;
printf("Allocating device memory on host..\n");
//GPU memory allocation
cudaMalloc((void **) &first_gpu, nBytes);
cudaMalloc((void **) &second_gpu, nBytes);
cudaMalloc((void **) &result_gpu, sizeof(int));
//Work definition////////////////////
dim3 dimBlock(block_size, 1, 1);
dim3 dimGrid(block_no, 1, 1);
/////////////////////////////////////
printf("Copying to device..\n");
cudaMemcpy(first_gpu, first, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(second_gpu, second, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(result_gpu, result, sizeof(int), cudaMemcpyHostToDevice);
clock_t start_d = clock();
printf("Doing GPU Matrix Multiplication\n");
dotProdKernel<<<block_no,block_size>>>(first_gpu, second_gpu, result_gpu, SIZE);
//cudaCheckError();
clock_t end_d = clock();
//Wait for kernel call to finish
cudaThreadSynchronize();
//Copying data back to host, this is a blocking call and will not start until all kernels are finished
cudaMemcpy(result, result_gpu, sizeof(int), cudaMemcpyDeviceToHost);
double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC;
printf("Time it took on GPU: %f", time_d);
printf("Doing work on CPU \n");
clock_t start = clock();
dotProd_CPU(first,second,result,SIZE);
clock_t end = clock();
double time = (double)(end-start)/CLOCKS_PER_SEC;
printf("Time it took on CPU: %f", time);
//Free GPU memory
cudaFree(first_gpu);
cudaFree(second_gpu);
cudaFree(result_gpu);
return 0;
}
|
6a7274bdcaf250bcf12331c77d0e3f29cb21869b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/* ==========================================================================
textureCube.cu
==========================================================================
Main wrapper + kernel that changes the colors of the four faces
*/
#define PI 3.1415926536f
// --------------------------------------------------------------------------
// Kernel
// --------------------------------------------------------------------------
// Paint a 2D surface with a moving bulls-eye pattern. The "face" parameter selects
// between 6 different colors to use. We will use a different color on each face of a
// cube map.
// --------------------------------------------------------------------------
// Wrapper
// --------------------------------------------------------------------------
// Sets up grid / blocks, launches kernel
extern "C"
__global__ void CudaKernelTextureCubeStrobelight(char *surface, int width, int height, size_t pitch, int face, float t)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned char *pixel;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// get a pointer to this pixel
pixel = (unsigned char *)(surface + y*pitch) + 4 * x;
// populate it
float theta_x = (2.0f*x) / width - 1.0f;
float theta_y = (2.0f*y) / height - 1.0f;
float theta = 2.0f*PI*sqrt(theta_x*theta_x + theta_y*theta_y);
unsigned char value = 255 * (0.6f + 0.4f*cos(theta + t));
pixel[3] = 255; // alpha
if (face % 2)
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = 0.5; // red
pixel[face / 2] = value;
}
else
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = value; // red
pixel[face / 2] = 0.5;
}
} | 6a7274bdcaf250bcf12331c77d0e3f29cb21869b.cu | #include "includes.h"
/* ==========================================================================
textureCube.cu
==========================================================================
Main wrapper + kernel that changes the colors of the four faces
*/
#define PI 3.1415926536f
// --------------------------------------------------------------------------
// Kernel
// --------------------------------------------------------------------------
// Paint a 2D surface with a moving bulls-eye pattern. The "face" parameter selects
// between 6 different colors to use. We will use a different color on each face of a
// cube map.
// --------------------------------------------------------------------------
// Wrapper
// --------------------------------------------------------------------------
// Sets up grid / blocks, launches kernel
extern "C"
__global__ void CudaKernelTextureCubeStrobelight(char *surface, int width, int height, size_t pitch, int face, float t)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned char *pixel;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// get a pointer to this pixel
pixel = (unsigned char *)(surface + y*pitch) + 4 * x;
// populate it
float theta_x = (2.0f*x) / width - 1.0f;
float theta_y = (2.0f*y) / height - 1.0f;
float theta = 2.0f*PI*sqrt(theta_x*theta_x + theta_y*theta_y);
unsigned char value = 255 * (0.6f + 0.4f*cos(theta + t));
pixel[3] = 255; // alpha
if (face % 2)
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = 0.5; // red
pixel[face / 2] = value;
}
else
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = value; // red
pixel[face / 2] = 0.5;
}
} |
f13027ea7703e0352c410a0aa583615e82216202.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <float.h>
#include <errno.h>
#include <stdint.h>
#include <hip/hip_runtime_api.h>
#define DIM 64
#define CEILING(x,y) (((x) + (y) - 1) / (y))
#define DIMVEC CEILING(DIM,4)
#define SHARED_MEMORY_DIM ((1<<15)+(1<<14)) // 48KB
#define N_THREAD_GPU (1<<10) // limit is 1024
#define MAX_STEPS (1<<20) /* run for no more than 1Mi steps */
#define TARGET_FITNESS (FLT_EPSILON) /* or until the fitness is less than this much */
#define STEP_CHECK_FREQ 1 /* after how many steps to write the system and check the time */
/* needed for find fitness min in parallel */
typedef struct fitness_pos
{
int pos;
float fitness;
} fitness_pos;
/* The whole particle system */
__device__ float4 *current_best_pos;
__device__ float4 *global_best_pos;
__constant__ int num_particles;
__device__ float global_fitness = HUGE_VALF;
__device__ float current_fitness;
/* Extents of the domain in each dimension */
#define coord_min -1
#define coord_max 1
#define coord_range 2
float4 target_pos[DIMVEC]; /* The target position */
__constant__ float4 target_pos_shared[DIMVEC];
float fitness_min;
/* Particle components*/
__device__ float4* pos;
__device__ float4* vel;
__device__ float4* best_pos;
__device__ uint64_t* prng_state;
__device__ float* best_fit;
__device__ float* fitness_val;
/* Overall weight for the old velocity, best position distance and global
* best position distance in the computation of the new velocity
*/
#define vel_omega 0.9
#define vel_phi_best 2
#define vel_phi_global 2
/* The contribution of the velocity to the new position. Set to 1
* to use the standard PSO approach of adding the whole velocity
* to the position.
*/
#define step_factor 1
__device__ __host__ uint32_t MWC64X(uint64_t *state);
__device__ __host__ float range_rand(float min, float max, uint64_t *prng_state);
__device__ __host__ void init_rand(uint64_t *prng_state, int i);
__device__ float fitness(float4 *pos);
__device__ void warp_control_float2(float2* smpos, int particleIndexSHM, int indexDIM);
__device__ void warp_control_float(float* smpos, int particleIndexSHM);
__global__ void init_particle();
__global__ void find_min_fitness_parallel(__restrict__ const fitness_pos* in, fitness_pos* out,const int offset,const int n_in,const int blocks);
__global__ void new_vel_pos();
__global__ void calc_fitness();
int ceil_log2(unsigned long long x);
void check_error(hipError_t err, const char *msg);
void start_time_record(hipEvent_t *before, hipEvent_t *after);
void stop_time_record(hipEvent_t *before, hipEvent_t *after, float *runtime);
void parallel_fitness(const int n_particle, int n_thread);
void write_system(const int step, const float calc_fitness_time, const float new_vel_pos, const float fitness_min, const int n_particles);
void init_mem(int n_particle){
float *pos_d, *vel_d, *best_pos_d, *best_fit_d,*fitness_val_d, *current_best_pos_d, *global_best_pos_d;
uint64_t *prng_state_d;
check_error(hipMalloc(&pos_d,sizeof(float4) * n_particle * DIMVEC),"memory alloc n particle pos");
check_error(hipMalloc(&vel_d,sizeof(float4) * n_particle * DIMVEC),"memory alloc n particle vel");
check_error(hipMalloc(&best_pos_d,sizeof(float4) * n_particle * DIMVEC),"memory alloc n particle best_pos");
check_error(hipMalloc((uint64_t **)&prng_state_d,sizeof(uint64_t) * n_particle),"memory alloc n particle best_pos");
check_error(hipMalloc(&best_fit_d,sizeof(float) * n_particle),"memory alloc n particle best_pos");
check_error(hipMalloc(&fitness_val_d,sizeof(float) * n_particle),"memory alloc n particle best_pos");
check_error(hipMalloc(¤t_best_pos_d,sizeof(float4) * DIMVEC),"memory alloc n particle best_pos");
check_error(hipMalloc(&global_best_pos_d,sizeof(float4) * DIMVEC),"memory alloc n particle best_pos");
check_error(hipMemcpyToSymbol(target_pos_shared, &target_pos, sizeof(float)*DIM),"memory cpy to device target_pos");
check_error(hipMemcpyToSymbol(num_particles, &n_particle, sizeof(int)),"memory cpy to device num_particle");
check_error(hipMemcpyToSymbol(prng_state, &prng_state_d, sizeof(uint64_t)),"memory cpy to device num_particle");
check_error(hipMemcpyToSymbol(pos, &pos_d, sizeof(pos_d)),"memory cpy to device target_pos");
check_error(hipMemcpyToSymbol(vel, &vel_d, sizeof(vel)),"memory cpy to device target_pos");
check_error(hipMemcpyToSymbol(best_pos, &best_pos_d, sizeof(best_pos)),"memory cpy to device target_pos");
check_error(hipMemcpyToSymbol(best_fit, &best_fit_d, sizeof(best_fit)),"memory cpy to device target_pos");
check_error(hipMemcpyToSymbol(fitness_val, &fitness_val_d, sizeof(fitness_val)),"memory cpy to device target_pos");
check_error(hipMemcpyToSymbol(current_best_pos, ¤t_best_pos_d, sizeof(fitness_val)),"memory cpy to device target_pos");
check_error(hipMemcpyToSymbol(global_best_pos, &global_best_pos_d, sizeof(fitness_val)),"memory cpy to device target_pos");
}
int main(int argc, char *argv[])
{
unsigned step = 0;
unsigned n_particle;
hipEvent_t before, after;
float calc_fitness_time = 0, new_vel_pos_time = 0, fitness_min_time = 0;
int j;
int n_blocks;
int n_thread = N_THREAD_GPU;
uint64_t prng_state_h;
dim3 init_parall(N_THREAD_GPU/DIM,DIM,1);
/* Get particle's numbers, default 128 */
n_particle = argc > 1 ? atoi(argv[1]) : 128;
/* Define n blocks for GPU parallelization */
n_blocks = ceil((float)n_particle / N_THREAD_GPU) == 0 ? 1 : ceil((float)n_particle / N_THREAD_GPU);
/* Initialize the target position */
init_rand(&prng_state_h, time(NULL));
printf("target position: (");
for(j = 0; j < DIMVEC; j++){
target_pos[j].x = -0.287776;//range_rand(coord_min, coord_max, &prng_state_h);
target_pos[j].y = 0.520416;//range_rand(coord_min, coord_max, &prng_state_h);
target_pos[j].z = DIM > 2 ? range_rand(coord_min, coord_max, &prng_state_h) : HUGE_VALF;
target_pos[j].w = DIM > 3 ? range_rand(coord_min, coord_max, &prng_state_h) : HUGE_VALF;
printf("%f,%f,%f,%f,", target_pos[j].x,target_pos[j].y,target_pos[j].z,target_pos[j].w);
}
printf(")\n");
/* Initialize a system with the number of particles given
* on the command-line or from default value (128) */
init_mem(n_particle);
/* init particle system and calculate initial fitness */
hipLaunchKernelGGL(( init_particle), dim3(n_blocks), dim3(n_thread), 0, 0, );
parallel_fitness(n_particle, n_thread);
write_system(step, calc_fitness_time, new_vel_pos_time, fitness_min_time, n_particle);
while (step < MAX_STEPS) {
++step;
int n_thread_pos = SHARED_MEMORY_DIM/(sizeof(float2)*DIM) < N_THREAD_GPU ?
SHARED_MEMORY_DIM/(sizeof(float2)*DIM) : N_THREAD_GPU;
int n_blocks_pos_calc_fit = ceil((float)n_particle / (n_thread_pos/DIMVEC)) == 0 ? 1 : ceil((float)n_particle / (n_thread_pos/DIMVEC));
int n_blocks_pos_vel = ceil((float)n_particle / (N_THREAD_GPU/DIMVEC)) == 0 ? 1 : ceil((float)n_particle / (N_THREAD_GPU/DIMVEC));
/* Compute the new velocity for each particle */
/* Update the position of each particle, and the global fitness */
dim3 n_t(DIMVEC,N_THREAD_GPU/DIMVEC);
start_time_record(&before,&after);
hipLaunchKernelGGL(( new_vel_pos), dim3(n_blocks_pos_vel), dim3(n_t), 0, 0, );
stop_time_record(&before,&after,&new_vel_pos_time);
/* Calculate new fitness for each particle*/
dim3 n_t_calc_fit(DIMVEC,n_thread_pos/DIMVEC);
start_time_record(&before,&after);
hipLaunchKernelGGL(( calc_fitness), dim3(n_blocks_pos_calc_fit), dim3(n_t_calc_fit), sizeof(float2)*n_thread_pos, 0, );
stop_time_record(&before,&after,&calc_fitness_time);
/* Calculate min fitness */
start_time_record(&before,&after);
parallel_fitness(n_particle, n_thread);
stop_time_record(&before,&after,&fitness_min_time);
if (fitness_min < TARGET_FITNESS)
break;
if (step % STEP_CHECK_FREQ == 0) {
write_system(step, calc_fitness_time, new_vel_pos_time, fitness_min_time, n_particle);
}
}
write_system(step, calc_fitness_time, new_vel_pos_time, fitness_min_time, n_particle);
}
void write_system(const int step, const float calc_fitness_time, const float new_vel_pos, const float fitness_min, const int n_particles)
{
float current_fitness_d;
float global_fitness_d;
float *current_best_pos_d_addr = (float*)malloc(sizeof(float));
float *global_best_pos_addr = (float*)malloc(sizeof(float));
float *current_best_pos_d = (float*)malloc(sizeof(float) * DIM);
float *global_best_pos_d = (float*)malloc(sizeof(float) * DIM);
float *current_fitness_d_addr = (float*)malloc(sizeof(float));
float *global_fitness_d_addr = (float*)malloc(sizeof(float));
int j;
hipGetSymbolAddress((void **)¤t_fitness_d_addr, current_fitness);
hipGetSymbolAddress((void **)&global_fitness_d_addr, global_fitness);
hipGetSymbolAddress((void **)¤t_best_pos_d_addr, current_best_pos);
hipGetSymbolAddress((void **)&global_best_pos_addr, global_best_pos);
check_error(hipMemcpy(¤t_fitness_d, current_fitness_d_addr, sizeof(float),hipMemcpyDeviceToHost),"refresh current_fitness_d host");
check_error(hipMemcpy(&global_fitness_d, global_fitness_d_addr, sizeof(float),hipMemcpyDeviceToHost),"refresh global_fitness_d host");
printf("step %u, best fitness: current %g, so far %g\n", step,
current_fitness_d, global_fitness_d);
if (step > 0) {
printf("time - calc_fitness_time: %fms new_vel_pos: %fms fitness_min: %f\n",calc_fitness_time,new_vel_pos,fitness_min);
}
printf("\ttarget ");
for(j = 0; j < DIMVEC; j++){
printf("%f,%f,%f,%f,", target_pos[j].x,target_pos[j].y,target_pos[j].z,target_pos[j].w);
}
printf("\n");
}
/* Target function to be minimized: this is the square
* Euclidean distance from target_pos, perturbed by the distance
* to the origin: this puts a local minimum at the origin,
* which is good to test if the method actually finds the global
* minimum or not */
__device__ float fitness(float4 *pos)
{
int i;
float fit1 = 0,fit2 = 0, dim_val;
for(i = 0; i < DIMVEC; i++){
dim_val = pos[i].x;
fit1 += pow(dim_val - target_pos_shared[i].x,2);
fit2 += pow(dim_val,2);
if(pos[i].y != HUGE_VALF){
dim_val = pos[i].y;
fit1 += pow(dim_val - target_pos_shared[i].y,2);
fit2 += pow(dim_val,2);
}
if(pos[i].z != HUGE_VALF){
dim_val = pos[i].z;
fit1 += pow(dim_val - target_pos_shared[i].z,2);
fit2 += pow(dim_val,2);
}
if(pos[i].w != HUGE_VALF){
dim_val = pos[i].w;
fit1 += pow(dim_val - target_pos_shared[i].w,2);
fit2 += pow(dim_val,2);
}
}
return fit1*(100*fit2+1)/10;
}
/* A function that generates a random float in the given range */
float range_rand(float min, float max, uint64_t *prng_state)
{
uint32_t r = MWC64X(prng_state);
return min + r*((max - min)/UINT32_MAX);
}
/* Random number generation: we use the MWC64X PRNG from
* http://cas.ee.ic.ac.uk/people/dt10/research/rngs-gpu-mwc64x.html
* which is parallel-friendly (but needs us to keep track of the state)
*/
uint32_t MWC64X(uint64_t *state)
{
uint64_t x = *state;
uint32_t c = x >> 32; // the upper 32 bits
x &= UINT32_MAX; // keep only the lower bits
*state = x*4294883355U + c;
return ((uint32_t)x)^c;
}
/* A functio to initialize the PRNG */
__device__ __host__ void init_rand(uint64_t *prng_state, int i)
{
*prng_state = i;
}
/* Function to initialize a single particle at index i. */
__global__ void init_particle()
{
const int particleIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int particleIndexDIM = particleIndex * (DIMVEC);
if(particleIndex >= num_particles)
return;
uint64_t prng_state_l;
int index;
init_rand(&prng_state_l, particleIndex);
int j;
for (j = 0; j < DIMVEC; j++){
index = (particleIndexDIM + j);
best_pos[index].x = pos[index].x = range_rand(coord_min, coord_max, &prng_state_l);
best_pos[index].y = pos[index].y = range_rand(coord_min, coord_max, &prng_state_l);
best_pos[index].z = pos[index].z = DIM > 2 ? range_rand(coord_min, coord_max, &prng_state_l) : HUGE_VALF;
best_pos[index].w = pos[index].w = DIM > 3 ? range_rand(coord_min, coord_max, &prng_state_l) : HUGE_VALF;
}
for (j = 0; j < DIMVEC; j++){
index = (particleIndexDIM + j);
vel[particleIndexDIM + j].x = range_rand(-coord_range, coord_range, &prng_state_l);
vel[particleIndexDIM + j].y = range_rand(-coord_range, coord_range, &prng_state_l);
vel[particleIndexDIM + j].z = DIM > 2 ? range_rand(-coord_range, coord_range, &prng_state_l) : HUGE_VALF;
vel[particleIndexDIM + j].w = DIM > 3 ? range_rand(-coord_range, coord_range, &prng_state_l) : HUGE_VALF;
}
best_fit[particleIndex] = fitness_val[particleIndex] = fitness(pos + particleIndexDIM);
prng_state[particleIndex] = prng_state_l;
}
/* Kernel function to compute the new position and new velocity of a given particle */
__global__ void new_vel_pos()
{
const int particleIndex = blockIdx.x * blockDim.y + threadIdx.y;
const int particleIndexDIM = particleIndex * (DIMVEC) + threadIdx.x;
const int indexDIM = threadIdx.x;
if(particleIndex >= num_particles || particleIndexDIM >= num_particles * DIMVEC)
return;
uint64_t prng_state_l = prng_state[particleIndex];
float velLocal, posLocal, pbest, gbest;
float best_vec_rand_coeff, global_vec_rand_coeff;
float4 velLocalV = vel[particleIndexDIM];
float4 posLocalV = pos[particleIndexDIM];
float4 best_pos_v = best_pos[particleIndexDIM];
float4 global_best_pos_v = global_best_pos[indexDIM];
// calc x
best_vec_rand_coeff = range_rand(0, 1, &prng_state_l);
global_vec_rand_coeff = range_rand(0, 1, &prng_state_l);
velLocal = velLocalV.x*vel_omega;
posLocal = posLocalV.x;
pbest = (best_pos_v.x - posLocal) * best_vec_rand_coeff*vel_phi_best;
gbest = (global_best_pos_v.x - posLocal) * global_vec_rand_coeff*vel_phi_global;
velLocal+= (pbest + gbest);
if(velLocal > coord_range) velLocal = coord_range;
else if (velLocal < -coord_range) velLocal = -coord_range;
posLocal += (step_factor*velLocal);
if (posLocal > coord_max) posLocal = coord_max;
else if (posLocal < coord_min) posLocal = coord_min;
posLocalV.x = posLocal;
velLocalV.x = velLocal;
// calc y
velLocal = velLocalV.y*vel_omega;
posLocal = posLocalV.y;
pbest = (best_pos_v.y - posLocal) * best_vec_rand_coeff*vel_phi_best;
gbest = (global_best_pos_v.y - posLocal) * global_vec_rand_coeff*vel_phi_global;
velLocal+= (pbest + gbest);
if(velLocal > coord_range) velLocal = coord_range;
else if (velLocal < -coord_range) velLocal = -coord_range;
posLocal += (step_factor*velLocal);
if (posLocal > coord_max) posLocal = coord_max;
else if (posLocal < coord_min) posLocal = coord_min;
posLocalV.y = posLocal;
velLocalV.y = velLocal;
if(DIM > 2){
// calc z
best_vec_rand_coeff = range_rand(0, 1, &prng_state_l);
global_vec_rand_coeff = range_rand(0, 1, &prng_state_l);
velLocal = velLocalV.z*vel_omega;
posLocal = posLocalV.z;
pbest = (best_pos_v.z - posLocal) * best_vec_rand_coeff*vel_phi_best;
gbest = (global_best_pos_v.z - posLocal) * global_vec_rand_coeff*vel_phi_global;
velLocal+= (pbest + gbest);
if(velLocal > coord_range) velLocal = coord_range;
else if (velLocal < -coord_range) velLocal = -coord_range;
posLocal += (step_factor*velLocal);
if (posLocal > coord_max) posLocal = coord_max;
else if (posLocal < coord_min) posLocal = coord_min;
posLocalV.z = posLocal;
velLocalV.z = velLocal;
}else{
posLocalV.z = HUGE_VALF;
velLocalV.z = HUGE_VALF;
}
if(DIM > 3){
//calc w
best_vec_rand_coeff = range_rand(0, 1, &prng_state_l);
global_vec_rand_coeff = range_rand(0, 1, &prng_state_l);
velLocal = velLocalV.w*vel_omega;
posLocal = posLocalV.w;
pbest = (best_pos_v.w - posLocal) * best_vec_rand_coeff*vel_phi_best;
gbest = (global_best_pos_v.w - posLocal) * global_vec_rand_coeff*vel_phi_global;
velLocal+= (pbest + gbest);
if(velLocal > coord_range) velLocal = coord_range;
else if (velLocal < -coord_range) velLocal = -coord_range;
posLocal += (step_factor*velLocal);
if (posLocal > coord_max) posLocal = coord_max;
else if (posLocal < coord_min) posLocal = coord_min;
posLocalV.w = posLocal;
velLocalV.w = velLocal;
}else{
posLocalV.w = HUGE_VALF;
velLocalV.w = HUGE_VALF;
}
pos[particleIndexDIM] = posLocalV;
vel[particleIndexDIM] = velLocalV;
prng_state[particleIndex] = prng_state_l;
}
/* Kernel function to compute the new fitness val of a given particle */
__global__ void calc_fitness()
{
extern __shared__ float2 smpos[];
int particleIndexSHM = threadIdx.y * blockDim.x + threadIdx.x;
int particleIndex = blockIdx.x * blockDim.y + threadIdx.y;
int particleIndexDIM = particleIndex * (DIMVEC) + threadIdx.x;
int indexDIM = threadIdx.x;
if(particleIndex >= num_particles || particleIndexDIM >= num_particles * DIMVEC)
return;
float4 posLocalV = pos[particleIndexDIM];
float4 targetLocal = target_pos_shared[indexDIM];
smpos[particleIndexSHM].x =
(posLocalV.x - targetLocal.x)*(posLocalV.x - targetLocal.x)+
(posLocalV.y - targetLocal.y)*(posLocalV.y - targetLocal.y);
smpos[particleIndexSHM].x += posLocalV.z != HUGE_VALF ? (posLocalV.z - targetLocal.z)*(posLocalV.z - targetLocal.z) : 0;
smpos[particleIndexSHM].x += posLocalV.w != HUGE_VALF ? (posLocalV.w - targetLocal.w)*(posLocalV.w - targetLocal.w) : 0;
smpos[particleIndexSHM].y = (posLocalV.x*posLocalV.x)+
(posLocalV.y*posLocalV.y);
smpos[particleIndexSHM].y += posLocalV.z != HUGE_VALF ? (posLocalV.z*posLocalV.z) : 0;
smpos[particleIndexSHM].y += posLocalV.w != HUGE_VALF ? (posLocalV.w*posLocalV.w) : 0;
warp_control_float2(smpos,particleIndexSHM, indexDIM);
if (indexDIM==0){
fitness_val[particleIndex] = smpos[particleIndexSHM].x*(100*smpos[particleIndexSHM].y+1)/10;
}
__syncthreads();
if (fitness_val[particleIndex] < best_fit[particleIndex]) {
best_fit[particleIndex] = fitness_val[particleIndex];
memcpy(best_pos + particleIndexDIM,pos + particleIndexDIM,sizeof(float4));
}
}
/* Function to handle the Kernel function find_min_fitness_parallel to don't let the shared memory become full */
void parallel_fitness(const int n_particle, int n_thread){
int shmdim;
fitness_pos *fitness_device_out,*fitness_device_in = NULL;
int last_n_block;
int offset;
int blocks = n_particle;
int max_parallel_particle_iteration = SHARED_MEMORY_DIM / sizeof(fitness_pos);
int iteration;
int max_blocks_per_iteration = max_parallel_particle_iteration / n_thread;
while(blocks != 1){
offset = 0;
last_n_block = blocks;
blocks = ceil((float)blocks / n_thread);
if(blocks == 1){
n_thread = ceil_log2(last_n_block);
}
hipMalloc(&fitness_device_out, sizeof(fitness_pos) * blocks);
shmdim = n_thread*sizeof(fitness_pos);
if(max_parallel_particle_iteration < last_n_block && 0){
iteration = 0;
while(iteration + max_parallel_particle_iteration < blocks*n_thread){
hipLaunchKernelGGL(( find_min_fitness_parallel), dim3(max_blocks_per_iteration), dim3(n_thread),shmdim, 0,
fitness_device_in, fitness_device_out, offset, last_n_block, blocks);
iteration += max_parallel_particle_iteration;
offset += (max_parallel_particle_iteration/n_thread);
}
int x = (blocks*n_thread) - (offset*n_thread);
x = ceil((float)x / n_thread);
hipLaunchKernelGGL(( find_min_fitness_parallel), dim3(x), dim3(n_thread),shmdim, 0,
fitness_device_in, fitness_device_out, offset, last_n_block, blocks);
}else{
hipLaunchKernelGGL(( find_min_fitness_parallel), dim3(blocks), dim3(n_thread),shmdim, 0,
fitness_device_in, fitness_device_out, offset, last_n_block,blocks);
}
if(fitness_device_in != NULL){
check_error(hipFree(fitness_device_in),"free fitness_device_in");
}
fitness_device_in = fitness_device_out;
}
fitness_device_out = (fitness_pos*)malloc(sizeof(fitness_pos));
check_error(hipMemcpy(fitness_device_out, fitness_device_in, sizeof(fitness_pos),hipMemcpyDeviceToHost),"copy fitness_min");
fitness_min = fitness_device_out->fitness;
check_error(hipFree(fitness_device_in),"free fitness_device_out");
free(fitness_device_out);
}
/* Kernel function to compute the new global min fitness */
__global__ void find_min_fitness_parallel(__restrict__ const fitness_pos* in, fitness_pos* out,const int offset,const int n_in,const int blocks){
extern __shared__ fitness_pos sm[];
const int tid=threadIdx.x;
const int i=(blockIdx.x*blockDim.x+threadIdx.x) + (offset*blockDim.x);
int stride;
sm[tid].fitness = HUGE_VALF;
if(i >= num_particles || i >= n_in)
return;
if(in != NULL){
sm[tid] = in[i];
}else{
sm[tid].fitness = fitness_val[i];
sm[tid].pos = i;
}
//copy to SM
for (stride = blockDim.x/2;stride>0;stride>>=1)
{
__syncthreads();
if (tid<stride && sm[tid].fitness > sm[tid+stride].fitness){
sm[tid] = sm[tid+stride];
}
}
if (tid==0){
out[blockIdx.x+offset]=sm[0];//copy back
if(blocks == 1){
current_fitness = sm[0].fitness;
memcpy(current_best_pos,pos+sm[0].pos*DIMVEC,sizeof(float4)*DIMVEC);
if (sm[0].fitness < global_fitness) {
global_fitness = sm[0].fitness;
memcpy(global_best_pos,current_best_pos,sizeof(float4)*DIMVEC);
}
}
}
}
void start_time_record(hipEvent_t *before, hipEvent_t *after){
check_error(hipEventCreate(&(*before)),"create cudaEvent before");
check_error(hipEventCreate(&(*after)),"create cudaEvent after");
check_error(hipEventRecord(*before),"record cudaEvent before");
}
void stop_time_record(hipEvent_t *before, hipEvent_t *after, float *runtime){
check_error(hipEventRecord(*after),"record cudaEvent after");
check_error(hipEventSynchronize(*after),"synch cudaEvent before");
check_error(hipEventElapsedTime(runtime, *before, *after),"calc cudaEvent elapsed time");
}
void check_error(hipError_t err, const char *msg)
{
if (err != hipSuccess) {
fprintf(stderr, "%s : errore %d (%s)\n",
msg, err, hipGetErrorString(err));
exit(err);
}
}
__device__ void warp_control_float2(float2* smpos, int particleIndexSHM, int indexDIM)
{
__syncthreads();
#if DIM > 1
for (int stride = blockDim.x/2;stride>0;stride>>=1)
{
if (indexDIM<stride){
smpos[particleIndexSHM].x += smpos[particleIndexSHM+stride].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM+stride].y;
}
__syncthreads();
}
#else
if (particleIndexSHM < DIM/2)
{
#if DIM >= 32
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 16].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 16].y;
__syncthreads();
#endif
#if DIM >= 16
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 8].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 8].y;
__syncthreads();
#endif
#if DIM >= 8
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 4].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 4].y;
__syncthreads();
#endif
#if DIM >= 4
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 2].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 2].y;
__syncthreads();
#endif
#if DIM >= 2
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 1].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 1].y;
__syncthreads();
#endif
}
#endif
}
/* function to find the ceil of a log2 x value*/
int ceil_log2(unsigned long long x)
{
static const unsigned long long t[6] = {
0xFFFFFFFF00000000ull,
0x00000000FFFF0000ull,
0x000000000000FF00ull,
0x00000000000000F0ull,
0x000000000000000Cull,
0x0000000000000002ull
};
int y = (((x & (x - 1)) == 0) ? 0 : 1);
int j = 32;
int i;
for (i = 0; i < 6; i++) {
int k = (((x & t[i]) == 0) ? 0 : j);
y += k;
x >>= k;
j >>= 1;
}
return 1<<y;
}
| f13027ea7703e0352c410a0aa583615e82216202.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <float.h>
#include <errno.h>
#include <stdint.h>
#include <cuda_runtime_api.h>
#define DIM 64
#define CEILING(x,y) (((x) + (y) - 1) / (y))
#define DIMVEC CEILING(DIM,4)
#define SHARED_MEMORY_DIM ((1<<15)+(1<<14)) // 48KB
#define N_THREAD_GPU (1<<10) // limit is 1024
#define MAX_STEPS (1<<20) /* run for no more than 1Mi steps */
#define TARGET_FITNESS (FLT_EPSILON) /* or until the fitness is less than this much */
#define STEP_CHECK_FREQ 1 /* after how many steps to write the system and check the time */
/* needed for find fitness min in parallel */
typedef struct fitness_pos
{
int pos;
float fitness;
} fitness_pos;
/* The whole particle system */
__device__ float4 *current_best_pos;
__device__ float4 *global_best_pos;
__constant__ int num_particles;
__device__ float global_fitness = HUGE_VALF;
__device__ float current_fitness;
/* Extents of the domain in each dimension */
#define coord_min -1
#define coord_max 1
#define coord_range 2
float4 target_pos[DIMVEC]; /* The target position */
__constant__ float4 target_pos_shared[DIMVEC];
float fitness_min;
/* Particle components*/
__device__ float4* pos;
__device__ float4* vel;
__device__ float4* best_pos;
__device__ uint64_t* prng_state;
__device__ float* best_fit;
__device__ float* fitness_val;
/* Overall weight for the old velocity, best position distance and global
* best position distance in the computation of the new velocity
*/
#define vel_omega 0.9
#define vel_phi_best 2
#define vel_phi_global 2
/* The contribution of the velocity to the new position. Set to 1
* to use the standard PSO approach of adding the whole velocity
* to the position.
*/
#define step_factor 1
__device__ __host__ uint32_t MWC64X(uint64_t *state);
__device__ __host__ float range_rand(float min, float max, uint64_t *prng_state);
__device__ __host__ void init_rand(uint64_t *prng_state, int i);
__device__ float fitness(float4 *pos);
__device__ void warp_control_float2(float2* smpos, int particleIndexSHM, int indexDIM);
__device__ void warp_control_float(float* smpos, int particleIndexSHM);
__global__ void init_particle();
__global__ void find_min_fitness_parallel(__restrict__ const fitness_pos* in, fitness_pos* out,const int offset,const int n_in,const int blocks);
__global__ void new_vel_pos();
__global__ void calc_fitness();
int ceil_log2(unsigned long long x);
void check_error(cudaError_t err, const char *msg);
void start_time_record(cudaEvent_t *before, cudaEvent_t *after);
void stop_time_record(cudaEvent_t *before, cudaEvent_t *after, float *runtime);
void parallel_fitness(const int n_particle, int n_thread);
void write_system(const int step, const float calc_fitness_time, const float new_vel_pos, const float fitness_min, const int n_particles);
void init_mem(int n_particle){
float *pos_d, *vel_d, *best_pos_d, *best_fit_d,*fitness_val_d, *current_best_pos_d, *global_best_pos_d;
uint64_t *prng_state_d;
check_error(cudaMalloc(&pos_d,sizeof(float4) * n_particle * DIMVEC),"memory alloc n particle pos");
check_error(cudaMalloc(&vel_d,sizeof(float4) * n_particle * DIMVEC),"memory alloc n particle vel");
check_error(cudaMalloc(&best_pos_d,sizeof(float4) * n_particle * DIMVEC),"memory alloc n particle best_pos");
check_error(cudaMalloc((uint64_t **)&prng_state_d,sizeof(uint64_t) * n_particle),"memory alloc n particle best_pos");
check_error(cudaMalloc(&best_fit_d,sizeof(float) * n_particle),"memory alloc n particle best_pos");
check_error(cudaMalloc(&fitness_val_d,sizeof(float) * n_particle),"memory alloc n particle best_pos");
check_error(cudaMalloc(¤t_best_pos_d,sizeof(float4) * DIMVEC),"memory alloc n particle best_pos");
check_error(cudaMalloc(&global_best_pos_d,sizeof(float4) * DIMVEC),"memory alloc n particle best_pos");
check_error(cudaMemcpyToSymbol(target_pos_shared, &target_pos, sizeof(float)*DIM),"memory cpy to device target_pos");
check_error(cudaMemcpyToSymbol(num_particles, &n_particle, sizeof(int)),"memory cpy to device num_particle");
check_error(cudaMemcpyToSymbol(prng_state, &prng_state_d, sizeof(uint64_t)),"memory cpy to device num_particle");
check_error(cudaMemcpyToSymbol(pos, &pos_d, sizeof(pos_d)),"memory cpy to device target_pos");
check_error(cudaMemcpyToSymbol(vel, &vel_d, sizeof(vel)),"memory cpy to device target_pos");
check_error(cudaMemcpyToSymbol(best_pos, &best_pos_d, sizeof(best_pos)),"memory cpy to device target_pos");
check_error(cudaMemcpyToSymbol(best_fit, &best_fit_d, sizeof(best_fit)),"memory cpy to device target_pos");
check_error(cudaMemcpyToSymbol(fitness_val, &fitness_val_d, sizeof(fitness_val)),"memory cpy to device target_pos");
check_error(cudaMemcpyToSymbol(current_best_pos, ¤t_best_pos_d, sizeof(fitness_val)),"memory cpy to device target_pos");
check_error(cudaMemcpyToSymbol(global_best_pos, &global_best_pos_d, sizeof(fitness_val)),"memory cpy to device target_pos");
}
int main(int argc, char *argv[])
{
unsigned step = 0;
unsigned n_particle;
cudaEvent_t before, after;
float calc_fitness_time = 0, new_vel_pos_time = 0, fitness_min_time = 0;
int j;
int n_blocks;
int n_thread = N_THREAD_GPU;
uint64_t prng_state_h;
dim3 init_parall(N_THREAD_GPU/DIM,DIM,1);
/* Get particle's numbers, default 128 */
n_particle = argc > 1 ? atoi(argv[1]) : 128;
/* Define n blocks for GPU parallelization */
n_blocks = ceil((float)n_particle / N_THREAD_GPU) == 0 ? 1 : ceil((float)n_particle / N_THREAD_GPU);
/* Initialize the target position */
init_rand(&prng_state_h, time(NULL));
printf("target position: (");
for(j = 0; j < DIMVEC; j++){
target_pos[j].x = -0.287776;//range_rand(coord_min, coord_max, &prng_state_h);
target_pos[j].y = 0.520416;//range_rand(coord_min, coord_max, &prng_state_h);
target_pos[j].z = DIM > 2 ? range_rand(coord_min, coord_max, &prng_state_h) : HUGE_VALF;
target_pos[j].w = DIM > 3 ? range_rand(coord_min, coord_max, &prng_state_h) : HUGE_VALF;
printf("%f,%f,%f,%f,", target_pos[j].x,target_pos[j].y,target_pos[j].z,target_pos[j].w);
}
printf(")\n");
/* Initialize a system with the number of particles given
* on the command-line or from default value (128) */
init_mem(n_particle);
/* init particle system and calculate initial fitness */
init_particle<<<n_blocks, n_thread>>>();
parallel_fitness(n_particle, n_thread);
write_system(step, calc_fitness_time, new_vel_pos_time, fitness_min_time, n_particle);
while (step < MAX_STEPS) {
++step;
int n_thread_pos = SHARED_MEMORY_DIM/(sizeof(float2)*DIM) < N_THREAD_GPU ?
SHARED_MEMORY_DIM/(sizeof(float2)*DIM) : N_THREAD_GPU;
int n_blocks_pos_calc_fit = ceil((float)n_particle / (n_thread_pos/DIMVEC)) == 0 ? 1 : ceil((float)n_particle / (n_thread_pos/DIMVEC));
int n_blocks_pos_vel = ceil((float)n_particle / (N_THREAD_GPU/DIMVEC)) == 0 ? 1 : ceil((float)n_particle / (N_THREAD_GPU/DIMVEC));
/* Compute the new velocity for each particle */
/* Update the position of each particle, and the global fitness */
dim3 n_t(DIMVEC,N_THREAD_GPU/DIMVEC);
start_time_record(&before,&after);
new_vel_pos<<<n_blocks_pos_vel, n_t>>>();
stop_time_record(&before,&after,&new_vel_pos_time);
/* Calculate new fitness for each particle*/
dim3 n_t_calc_fit(DIMVEC,n_thread_pos/DIMVEC);
start_time_record(&before,&after);
calc_fitness<<<n_blocks_pos_calc_fit, n_t_calc_fit, sizeof(float2)*n_thread_pos>>>();
stop_time_record(&before,&after,&calc_fitness_time);
/* Calculate min fitness */
start_time_record(&before,&after);
parallel_fitness(n_particle, n_thread);
stop_time_record(&before,&after,&fitness_min_time);
if (fitness_min < TARGET_FITNESS)
break;
if (step % STEP_CHECK_FREQ == 0) {
write_system(step, calc_fitness_time, new_vel_pos_time, fitness_min_time, n_particle);
}
}
write_system(step, calc_fitness_time, new_vel_pos_time, fitness_min_time, n_particle);
}
void write_system(const int step, const float calc_fitness_time, const float new_vel_pos, const float fitness_min, const int n_particles)
{
float current_fitness_d;
float global_fitness_d;
float *current_best_pos_d_addr = (float*)malloc(sizeof(float));
float *global_best_pos_addr = (float*)malloc(sizeof(float));
float *current_best_pos_d = (float*)malloc(sizeof(float) * DIM);
float *global_best_pos_d = (float*)malloc(sizeof(float) * DIM);
float *current_fitness_d_addr = (float*)malloc(sizeof(float));
float *global_fitness_d_addr = (float*)malloc(sizeof(float));
int j;
cudaGetSymbolAddress((void **)¤t_fitness_d_addr, current_fitness);
cudaGetSymbolAddress((void **)&global_fitness_d_addr, global_fitness);
cudaGetSymbolAddress((void **)¤t_best_pos_d_addr, current_best_pos);
cudaGetSymbolAddress((void **)&global_best_pos_addr, global_best_pos);
check_error(cudaMemcpy(¤t_fitness_d, current_fitness_d_addr, sizeof(float),cudaMemcpyDeviceToHost),"refresh current_fitness_d host");
check_error(cudaMemcpy(&global_fitness_d, global_fitness_d_addr, sizeof(float),cudaMemcpyDeviceToHost),"refresh global_fitness_d host");
printf("step %u, best fitness: current %g, so far %g\n", step,
current_fitness_d, global_fitness_d);
if (step > 0) {
printf("time - calc_fitness_time: %fms new_vel_pos: %fms fitness_min: %f\n",calc_fitness_time,new_vel_pos,fitness_min);
}
printf("\ttarget ");
for(j = 0; j < DIMVEC; j++){
printf("%f,%f,%f,%f,", target_pos[j].x,target_pos[j].y,target_pos[j].z,target_pos[j].w);
}
printf("\n");
}
/* Target function to be minimized: this is the square
* Euclidean distance from target_pos, “perturbed” by the distance
* to the origin: this puts a local minimum at the origin,
* which is good to test if the method actually finds the global
* minimum or not */
__device__ float fitness(float4 *pos)
{
int i;
float fit1 = 0,fit2 = 0, dim_val;
for(i = 0; i < DIMVEC; i++){
dim_val = pos[i].x;
fit1 += pow(dim_val - target_pos_shared[i].x,2);
fit2 += pow(dim_val,2);
if(pos[i].y != HUGE_VALF){
dim_val = pos[i].y;
fit1 += pow(dim_val - target_pos_shared[i].y,2);
fit2 += pow(dim_val,2);
}
if(pos[i].z != HUGE_VALF){
dim_val = pos[i].z;
fit1 += pow(dim_val - target_pos_shared[i].z,2);
fit2 += pow(dim_val,2);
}
if(pos[i].w != HUGE_VALF){
dim_val = pos[i].w;
fit1 += pow(dim_val - target_pos_shared[i].w,2);
fit2 += pow(dim_val,2);
}
}
return fit1*(100*fit2+1)/10;
}
/* A function that generates a random float in the given range */
float range_rand(float min, float max, uint64_t *prng_state)
{
uint32_t r = MWC64X(prng_state);
return min + r*((max - min)/UINT32_MAX);
}
/* Random number generation: we use the MWC64X PRNG from
* http://cas.ee.ic.ac.uk/people/dt10/research/rngs-gpu-mwc64x.html
* which is parallel-friendly (but needs us to keep track of the state)
*/
uint32_t MWC64X(uint64_t *state)
{
uint64_t x = *state;
uint32_t c = x >> 32; // the upper 32 bits
x &= UINT32_MAX; // keep only the lower bits
*state = x*4294883355U + c;
return ((uint32_t)x)^c;
}
/* A functio to initialize the PRNG */
__device__ __host__ void init_rand(uint64_t *prng_state, int i)
{
*prng_state = i;
}
/* Function to initialize a single particle at index i. */
__global__ void init_particle()
{
const int particleIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int particleIndexDIM = particleIndex * (DIMVEC);
if(particleIndex >= num_particles)
return;
uint64_t prng_state_l;
int index;
init_rand(&prng_state_l, particleIndex);
int j;
for (j = 0; j < DIMVEC; j++){
index = (particleIndexDIM + j);
best_pos[index].x = pos[index].x = range_rand(coord_min, coord_max, &prng_state_l);
best_pos[index].y = pos[index].y = range_rand(coord_min, coord_max, &prng_state_l);
best_pos[index].z = pos[index].z = DIM > 2 ? range_rand(coord_min, coord_max, &prng_state_l) : HUGE_VALF;
best_pos[index].w = pos[index].w = DIM > 3 ? range_rand(coord_min, coord_max, &prng_state_l) : HUGE_VALF;
}
for (j = 0; j < DIMVEC; j++){
index = (particleIndexDIM + j);
vel[particleIndexDIM + j].x = range_rand(-coord_range, coord_range, &prng_state_l);
vel[particleIndexDIM + j].y = range_rand(-coord_range, coord_range, &prng_state_l);
vel[particleIndexDIM + j].z = DIM > 2 ? range_rand(-coord_range, coord_range, &prng_state_l) : HUGE_VALF;
vel[particleIndexDIM + j].w = DIM > 3 ? range_rand(-coord_range, coord_range, &prng_state_l) : HUGE_VALF;
}
best_fit[particleIndex] = fitness_val[particleIndex] = fitness(pos + particleIndexDIM);
prng_state[particleIndex] = prng_state_l;
}
/* Kernel function to compute the new position and new velocity of a given particle */
__global__ void new_vel_pos()
{
const int particleIndex = blockIdx.x * blockDim.y + threadIdx.y;
const int particleIndexDIM = particleIndex * (DIMVEC) + threadIdx.x;
const int indexDIM = threadIdx.x;
if(particleIndex >= num_particles || particleIndexDIM >= num_particles * DIMVEC)
return;
uint64_t prng_state_l = prng_state[particleIndex];
float velLocal, posLocal, pbest, gbest;
float best_vec_rand_coeff, global_vec_rand_coeff;
float4 velLocalV = vel[particleIndexDIM];
float4 posLocalV = pos[particleIndexDIM];
float4 best_pos_v = best_pos[particleIndexDIM];
float4 global_best_pos_v = global_best_pos[indexDIM];
// calc x
best_vec_rand_coeff = range_rand(0, 1, &prng_state_l);
global_vec_rand_coeff = range_rand(0, 1, &prng_state_l);
velLocal = velLocalV.x*vel_omega;
posLocal = posLocalV.x;
pbest = (best_pos_v.x - posLocal) * best_vec_rand_coeff*vel_phi_best;
gbest = (global_best_pos_v.x - posLocal) * global_vec_rand_coeff*vel_phi_global;
velLocal+= (pbest + gbest);
if(velLocal > coord_range) velLocal = coord_range;
else if (velLocal < -coord_range) velLocal = -coord_range;
posLocal += (step_factor*velLocal);
if (posLocal > coord_max) posLocal = coord_max;
else if (posLocal < coord_min) posLocal = coord_min;
posLocalV.x = posLocal;
velLocalV.x = velLocal;
// calc y
velLocal = velLocalV.y*vel_omega;
posLocal = posLocalV.y;
pbest = (best_pos_v.y - posLocal) * best_vec_rand_coeff*vel_phi_best;
gbest = (global_best_pos_v.y - posLocal) * global_vec_rand_coeff*vel_phi_global;
velLocal+= (pbest + gbest);
if(velLocal > coord_range) velLocal = coord_range;
else if (velLocal < -coord_range) velLocal = -coord_range;
posLocal += (step_factor*velLocal);
if (posLocal > coord_max) posLocal = coord_max;
else if (posLocal < coord_min) posLocal = coord_min;
posLocalV.y = posLocal;
velLocalV.y = velLocal;
if(DIM > 2){
// calc z
best_vec_rand_coeff = range_rand(0, 1, &prng_state_l);
global_vec_rand_coeff = range_rand(0, 1, &prng_state_l);
velLocal = velLocalV.z*vel_omega;
posLocal = posLocalV.z;
pbest = (best_pos_v.z - posLocal) * best_vec_rand_coeff*vel_phi_best;
gbest = (global_best_pos_v.z - posLocal) * global_vec_rand_coeff*vel_phi_global;
velLocal+= (pbest + gbest);
if(velLocal > coord_range) velLocal = coord_range;
else if (velLocal < -coord_range) velLocal = -coord_range;
posLocal += (step_factor*velLocal);
if (posLocal > coord_max) posLocal = coord_max;
else if (posLocal < coord_min) posLocal = coord_min;
posLocalV.z = posLocal;
velLocalV.z = velLocal;
}else{
posLocalV.z = HUGE_VALF;
velLocalV.z = HUGE_VALF;
}
if(DIM > 3){
//calc w
best_vec_rand_coeff = range_rand(0, 1, &prng_state_l);
global_vec_rand_coeff = range_rand(0, 1, &prng_state_l);
velLocal = velLocalV.w*vel_omega;
posLocal = posLocalV.w;
pbest = (best_pos_v.w - posLocal) * best_vec_rand_coeff*vel_phi_best;
gbest = (global_best_pos_v.w - posLocal) * global_vec_rand_coeff*vel_phi_global;
velLocal+= (pbest + gbest);
if(velLocal > coord_range) velLocal = coord_range;
else if (velLocal < -coord_range) velLocal = -coord_range;
posLocal += (step_factor*velLocal);
if (posLocal > coord_max) posLocal = coord_max;
else if (posLocal < coord_min) posLocal = coord_min;
posLocalV.w = posLocal;
velLocalV.w = velLocal;
}else{
posLocalV.w = HUGE_VALF;
velLocalV.w = HUGE_VALF;
}
pos[particleIndexDIM] = posLocalV;
vel[particleIndexDIM] = velLocalV;
prng_state[particleIndex] = prng_state_l;
}
/* Kernel function to compute the new fitness val of a given particle */
__global__ void calc_fitness()
{
extern __shared__ float2 smpos[];
int particleIndexSHM = threadIdx.y * blockDim.x + threadIdx.x;
int particleIndex = blockIdx.x * blockDim.y + threadIdx.y;
int particleIndexDIM = particleIndex * (DIMVEC) + threadIdx.x;
int indexDIM = threadIdx.x;
if(particleIndex >= num_particles || particleIndexDIM >= num_particles * DIMVEC)
return;
float4 posLocalV = pos[particleIndexDIM];
float4 targetLocal = target_pos_shared[indexDIM];
smpos[particleIndexSHM].x =
(posLocalV.x - targetLocal.x)*(posLocalV.x - targetLocal.x)+
(posLocalV.y - targetLocal.y)*(posLocalV.y - targetLocal.y);
smpos[particleIndexSHM].x += posLocalV.z != HUGE_VALF ? (posLocalV.z - targetLocal.z)*(posLocalV.z - targetLocal.z) : 0;
smpos[particleIndexSHM].x += posLocalV.w != HUGE_VALF ? (posLocalV.w - targetLocal.w)*(posLocalV.w - targetLocal.w) : 0;
smpos[particleIndexSHM].y = (posLocalV.x*posLocalV.x)+
(posLocalV.y*posLocalV.y);
smpos[particleIndexSHM].y += posLocalV.z != HUGE_VALF ? (posLocalV.z*posLocalV.z) : 0;
smpos[particleIndexSHM].y += posLocalV.w != HUGE_VALF ? (posLocalV.w*posLocalV.w) : 0;
warp_control_float2(smpos,particleIndexSHM, indexDIM);
if (indexDIM==0){
fitness_val[particleIndex] = smpos[particleIndexSHM].x*(100*smpos[particleIndexSHM].y+1)/10;
}
__syncthreads();
if (fitness_val[particleIndex] < best_fit[particleIndex]) {
best_fit[particleIndex] = fitness_val[particleIndex];
memcpy(best_pos + particleIndexDIM,pos + particleIndexDIM,sizeof(float4));
}
}
/* Function to handle the Kernel function find_min_fitness_parallel to don't let the shared memory become full */
void parallel_fitness(const int n_particle, int n_thread){
int shmdim;
fitness_pos *fitness_device_out,*fitness_device_in = NULL;
int last_n_block;
int offset;
int blocks = n_particle;
int max_parallel_particle_iteration = SHARED_MEMORY_DIM / sizeof(fitness_pos);
int iteration;
int max_blocks_per_iteration = max_parallel_particle_iteration / n_thread;
while(blocks != 1){
offset = 0;
last_n_block = blocks;
blocks = ceil((float)blocks / n_thread);
if(blocks == 1){
n_thread = ceil_log2(last_n_block);
}
cudaMalloc(&fitness_device_out, sizeof(fitness_pos) * blocks);
shmdim = n_thread*sizeof(fitness_pos);
if(max_parallel_particle_iteration < last_n_block && 0){
iteration = 0;
while(iteration + max_parallel_particle_iteration < blocks*n_thread){
find_min_fitness_parallel<<<max_blocks_per_iteration, n_thread,shmdim>>>
(fitness_device_in, fitness_device_out, offset, last_n_block, blocks);
iteration += max_parallel_particle_iteration;
offset += (max_parallel_particle_iteration/n_thread);
}
int x = (blocks*n_thread) - (offset*n_thread);
x = ceil((float)x / n_thread);
find_min_fitness_parallel<<<x, n_thread,shmdim>>>
(fitness_device_in, fitness_device_out, offset, last_n_block, blocks);
}else{
find_min_fitness_parallel<<<blocks, n_thread,shmdim>>>
(fitness_device_in, fitness_device_out, offset, last_n_block,blocks);
}
if(fitness_device_in != NULL){
check_error(cudaFree(fitness_device_in),"free fitness_device_in");
}
fitness_device_in = fitness_device_out;
}
fitness_device_out = (fitness_pos*)malloc(sizeof(fitness_pos));
check_error(cudaMemcpy(fitness_device_out, fitness_device_in, sizeof(fitness_pos),cudaMemcpyDeviceToHost),"copy fitness_min");
fitness_min = fitness_device_out->fitness;
check_error(cudaFree(fitness_device_in),"free fitness_device_out");
free(fitness_device_out);
}
/* Kernel function to compute the new global min fitness */
__global__ void find_min_fitness_parallel(__restrict__ const fitness_pos* in, fitness_pos* out,const int offset,const int n_in,const int blocks){
extern __shared__ fitness_pos sm[];
const int tid=threadIdx.x;
const int i=(blockIdx.x*blockDim.x+threadIdx.x) + (offset*blockDim.x);
int stride;
sm[tid].fitness = HUGE_VALF;
if(i >= num_particles || i >= n_in)
return;
if(in != NULL){
sm[tid] = in[i];
}else{
sm[tid].fitness = fitness_val[i];
sm[tid].pos = i;
}
//copy to SM
for (stride = blockDim.x/2;stride>0;stride>>=1)
{
__syncthreads();
if (tid<stride && sm[tid].fitness > sm[tid+stride].fitness){
sm[tid] = sm[tid+stride];
}
}
if (tid==0){
out[blockIdx.x+offset]=sm[0];//copy back
if(blocks == 1){
current_fitness = sm[0].fitness;
memcpy(current_best_pos,pos+sm[0].pos*DIMVEC,sizeof(float4)*DIMVEC);
if (sm[0].fitness < global_fitness) {
global_fitness = sm[0].fitness;
memcpy(global_best_pos,current_best_pos,sizeof(float4)*DIMVEC);
}
}
}
}
void start_time_record(cudaEvent_t *before, cudaEvent_t *after){
check_error(cudaEventCreate(&(*before)),"create cudaEvent before");
check_error(cudaEventCreate(&(*after)),"create cudaEvent after");
check_error(cudaEventRecord(*before),"record cudaEvent before");
}
void stop_time_record(cudaEvent_t *before, cudaEvent_t *after, float *runtime){
check_error(cudaEventRecord(*after),"record cudaEvent after");
check_error(cudaEventSynchronize(*after),"synch cudaEvent before");
check_error(cudaEventElapsedTime(runtime, *before, *after),"calc cudaEvent elapsed time");
}
void check_error(cudaError_t err, const char *msg)
{
if (err != cudaSuccess) {
fprintf(stderr, "%s : errore %d (%s)\n",
msg, err, cudaGetErrorString(err));
exit(err);
}
}
__device__ void warp_control_float2(float2* smpos, int particleIndexSHM, int indexDIM)
{
__syncthreads();
#if DIM > 1
for (int stride = blockDim.x/2;stride>0;stride>>=1)
{
if (indexDIM<stride){
smpos[particleIndexSHM].x += smpos[particleIndexSHM+stride].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM+stride].y;
}
__syncthreads();
}
#else
if (particleIndexSHM < DIM/2)
{
#if DIM >= 32
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 16].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 16].y;
__syncthreads();
#endif
#if DIM >= 16
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 8].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 8].y;
__syncthreads();
#endif
#if DIM >= 8
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 4].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 4].y;
__syncthreads();
#endif
#if DIM >= 4
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 2].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 2].y;
__syncthreads();
#endif
#if DIM >= 2
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 1].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 1].y;
__syncthreads();
#endif
}
#endif
}
/* function to find the ceil of a log2 x value*/
int ceil_log2(unsigned long long x)
{
static const unsigned long long t[6] = {
0xFFFFFFFF00000000ull,
0x00000000FFFF0000ull,
0x000000000000FF00ull,
0x00000000000000F0ull,
0x000000000000000Cull,
0x0000000000000002ull
};
int y = (((x & (x - 1)) == 0) ? 0 : 1);
int j = 32;
int i;
for (i = 0; i < 6; i++) {
int k = (((x & t[i]) == 0) ? 0 : j);
y += k;
x >>= k;
j >>= 1;
}
return 1<<y;
}
|
2721ba57fb15d6ffcc1c59fd8f998b6c61fc5cbc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SART_cuda.h" // consists all required package and functions
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
// Macro for input and output
#define IN_IMG prhs[0]
#define PROJ prhs[1]
#define GEO_PARA prhs[2]
#define ITER_PARA prhs[3]
#define OUT_IMG plhs[0]
#define OUT_ERR plhs[1]
int nx, ny, nz, na, nb, numImg, numBytesImg, numSingleProj, numBytesSingleProj;
float da, db, ai, bi, SO, SD, dx;
// resolutions of volumes
if (mxGetField(GEO_PARA, 0, "nx") != NULL)
nx = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nx"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution nx.\n");
if (mxGetField(GEO_PARA, 0, "ny") != NULL)
ny = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "ny"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution ny.\n");
if (mxGetField(GEO_PARA, 0, "nz") != NULL)
nz = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nz"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution nz.\n");
numImg = nx * ny * nz; // size of image
numBytesImg = numImg * sizeof(float); // number of bytes in image
// detector plane resolutions
if (mxGetField(GEO_PARA, 0, "na") != NULL)
na = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "na"));
else if (mxGetField(GEO_PARA, 0, "nv") != NULL)
na = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nv"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid number of detector in plane, which is denoted as na or nu.\n");
if (mxGetField(GEO_PARA, 0, "nb") != NULL)
nb = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nb"));
else if (mxGetField(GEO_PARA, 0, "nu") != NULL)
nb = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nu"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid number of detector across plane, which is denoted as nb or nv.\n");
numSingleProj = na * nb;
numBytesSingleProj = numSingleProj * sizeof(float);
// voxel resolution dx, which is also the scaling factor of the whole system
if (mxGetField(GEO_PARA, 0, "dx") != NULL)
dx = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "dx"));
else{
dx = 1.0f;
mexPrintf("Automatically set voxel size dx to 1. \n");
mexPrintf("If don't want that default value, please set para.dx manually.\n");
}
// detector resolution
if (mxGetField(GEO_PARA, 0, "da") != NULL)
da = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "da"));
else{
da = 1.0f;
mexPrintf("Automatically set detector cell size da to 1. \n");
mexPrintf("If don't want that default value, please set para.da manually.\n");
}
if (mxGetField(GEO_PARA, 0, "db") != NULL)
db = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "db"));
else{
db = 1.0f;
mexPrintf("Automatically set detectof cell size db to 1. \n");
mexPrintf("If don't want that default value, please set para.db manually.\n");
}
// detector plane offset from centered calibrations
if (mxGetField(GEO_PARA, 0, "ai") != NULL){
ai = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "ai"));
ai -= ((float)na / 2 - 0.5f);
}
else{
mexPrintf("Automatically set detector offset ai to 0. \n");
mexPrintf("If don't want that default value, please set para.ai manually.\n");
ai = - ((float)na / 2 - 0.5f);
}
if (mxGetField(GEO_PARA, 0, "bi") != NULL){
bi = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "bi"));
bi -= ((float)nb / 2 - 0.5f);
}
else{
mexPrintf("Automatically set detector offset bi to 0. \n");
mexPrintf("If don't want that default value, please set para.bi manually.\n");
bi = - ((float)nb / 2 - 0.5f);
}
if (mxGetField(GEO_PARA, 0, "SO") != NULL)
SO = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SO"));
else if (mxGetField(GEO_PARA, 0, "SI") != NULL)
SO = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SI"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid distance between source and isocenter, which is denoted with para.SO or para.DI.\n");
if (mxGetField(GEO_PARA, 0, "SD") != NULL)
SD = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SD"));
else if (mxGetField(GEO_PARA, 0, "DI") != NULL)
SD = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "DI")) + SO;
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid distance between source and detector plane, which is denoted with para.SD or para.SI + para.DI.\n");
// load iterating parameters, for the whole bin
int n_iter, n_iter_invertDVF;
if (mxGetField(ITER_PARA, 0, "n_iter") != NULL)
n_iter = (int)mxGetScalar(mxGetField(ITER_PARA, 0, "n_iter")); // number of views in this bin
else{
n_iter = 1;
mexPrintf("Automatically set number of iterations to 1. \n");
mexPrintf("If don't want that default value, please set iter_para.n_iter manually.\n");
}
if (mxGetField(ITER_PARA, 0, "n_iter_invertDVF") != NULL)
n_iter_invertDVF = (int)mxGetScalar(mxGetField(ITER_PARA, 0, "n_iter_invertDVF"));
else{
n_iter_invertDVF = 10;
mexPrintf("Automatically set number of iterations for inverting DVF to 10. \n");
mexPrintf("If don't want that default value, please set iter_para.n_iter_invertDVF manually.\n");
}
int n_bin, *n_views, numProj, numBytesProj, N_view; // number of bins, numbers of views of bins, and the index view of each bin.
// e.g. we have 3 bins here with 10 total views. For these 3 bins, they holds 1,3,6 views. Then we will set n_views as {0, 1, 4, 10}, which is the starting view indices of each bin. Moreover, we need to well arrange the volumes and flows.
if (mxGetField(ITER_PARA, 0, "n_bin") != NULL)
n_bin = (int)mxGetScalar(mxGetField(ITER_PARA, 0, "n_bin"));
else{
n_bin = 8;
mexPrintf("Automatically set number of bins to 8. \n");
mexPrintf("If don't want that default value, please set iter_para.n_bin manually.\n");
}
if (mxGetField(ITER_PARA, 0, "n_views") != NULL)
n_views = (int*)mxGetData(mxGetField(ITER_PARA, 0, "n_views"));
else{
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid number bins, which is denoted as iter_para.n_views.\n");
}
N_view = n_views[n_bin];
// 5D models
float *h_alpha_x, *h_alpha_y, *h_alpha_z, *h_beta_x, *h_beta_y, *h_beta_z, *angles, lambda;
// load 5DCT alpha and beta
if (mxGetField(ITER_PARA, 0, "alpha_x") != NULL)
h_alpha_x = (float*)mxGetData(mxGetField(ITER_PARA, 0, "alpha_x"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid iter_para.alpha_x.\n");
if (mxGetField(ITER_PARA, 0, "alpha_y") != NULL)
h_alpha_y = (float*)mxGetData(mxGetField(ITER_PARA, 0, "alpha_y"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid iter_para.alpha_y.\n");
if (mxGetField(ITER_PARA, 0, "alpha_z") != NULL)
h_alpha_z = (float*)mxGetData(mxGetField(ITER_PARA, 0, "alpha_z"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid iter_para.alpha_z.\n");
if (mxGetField(ITER_PARA, 0, "beta_x") != NULL)
h_beta_x = (float*)mxGetData(mxGetField(ITER_PARA, 0, "beta_x"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid iter_para.beta_x.\n");
if (mxGetField(ITER_PARA, 0, "beta_y") != NULL)
h_beta_y = (float*)mxGetData(mxGetField(ITER_PARA, 0, "beta_y"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid iter_para.beta_y.\n");
if (mxGetField(ITER_PARA, 0, "beta_z") != NULL)
h_beta_z = (float*)mxGetData(mxGetField(ITER_PARA, 0, "beta_z"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid iter_para.beta_z.\n");
// load 5DCT parameters volume (v) and flow (f)
float *volumes, *flows, *ref_volumes, *ref_flows;
if (mxGetField(ITER_PARA, 0, "volumes") != NULL)
volumes= (float*)mxGetData(mxGetField(ITER_PARA, 0, "volumes"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume in iter_para.volumes.\n");
if (mxGetField(ITER_PARA, 0, "flows") != NULL)
flows = (float*)mxGetData(mxGetField(ITER_PARA, 0, "flows"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid flow in iter_para.flows.\n");
if (mxGetField(ITER_PARA, 0, "volume0") != NULL)
ref_volumes = (float*)mxGetData(mxGetField(ITER_PARA, 0, "volume0"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid referenced volume in iter_para.volume0.\n");
if (mxGetField(ITER_PARA, 0, "flow0") != NULL)
ref_flows = (float*)mxGetData(mxGetField(ITER_PARA, 0, "flow0"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid referenced flow in iter_para.flow0.\n");
if (mxGetField(ITER_PARA, 0, "angles") != NULL)
angles = (float*)mxGetData(mxGetField(ITER_PARA, 0, "angles"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid angles iter_para.angles.\n");
if (mxGetField(ITER_PARA, 0, "lambda") != NULL)
lambda = (float)mxGetScalar(mxGetField(ITER_PARA, 0, "lambda"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid coefficience iter_para.lambda.\n");
numProj = numSingleProj * N_view;
numBytesProj = numProj * sizeof(float);
// load initial guess of image
float *h_img;
h_img = (float*)mxGetData(IN_IMG);
// load true projection value
float *h_proj;
h_proj = (float*)mxGetData(PROJ);
// define thread distributions
const dim3 gridSize_img((nx + BLOCKWIDTH - 1) / BLOCKWIDTH, (ny + BLOCKHEIGHT - 1) / BLOCKHEIGHT, (nz + BLOCKDEPTH - 1) / BLOCKDEPTH);
const dim3 gridSize_singleProj((na + BLOCKWIDTH - 1) / BLOCKWIDTH, (nb + BLOCKHEIGHT - 1) / BLOCKHEIGHT, 1);
const dim3 blockSize(BLOCKWIDTH,BLOCKHEIGHT, BLOCKDEPTH);
// CUDA 3DArray Malloc parameters
struct hipExtent extent_img = make_hipExtent(nx, ny, nz);
struct hipExtent extent_singleProj = make_hipExtent(na, nb, 1);
//Allocate CUDA array in device memory of 5DCT matrices: alpha and beta
hipArray *d_alpha_x, *d_alpha_y, *d_alpha_z, *d_beta_x, *d_beta_y, *d_beta_z;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipError_t cudaStat;
// alpha_x
cudaStat = hipMalloc3DArray(&d_alpha_x, &channelDesc, extent_img);
if (cudaStat != hipSuccess) {
mexPrintf("Array memory allocation for alpha_x failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// alpha_y
cudaStat = hipMalloc3DArray(&d_alpha_y, &channelDesc, extent_img);
if (cudaStat != hipSuccess) {
mexPrintf("Array memory allocation for alpha_y failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// alpha_z
cudaStat = hipMalloc3DArray(&d_alpha_z, &channelDesc, extent_img);
if (cudaStat != hipSuccess) {
mexPrintf("Array memory allocation for alpha_z failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// beta_x
cudaStat = hipMalloc3DArray(&d_beta_x, &channelDesc, extent_img);
if (cudaStat != hipSuccess) {
mexPrintf("Array memory allocation for beta_x failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// beta_y
cudaStat = hipMalloc3DArray(&d_beta_y, &channelDesc, extent_img);
if (cudaStat != hipSuccess) {
mexPrintf("Array memory allocation for beta_y failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// beta_z
cudaStat = hipMalloc3DArray(&d_beta_z, &channelDesc, extent_img);
if (cudaStat != hipSuccess) {
mexPrintf("Array memory allocation for beta_z failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// Get pitched pointer to alpha and beta in host memory
hipPitchedPtr hp_alpha_x = make_hipPitchedPtr((void*) h_alpha_x, nx * sizeof(float), nx, ny);
hipPitchedPtr hp_alpha_y = make_hipPitchedPtr((void*) h_alpha_y, nx * sizeof(float), nx, ny);
hipPitchedPtr hp_alpha_z = make_hipPitchedPtr((void*) h_alpha_z, nx * sizeof(float), nx, ny);
hipPitchedPtr hp_beta_x = make_hipPitchedPtr((void*) h_beta_x, nx * sizeof(float), nx, ny);
hipPitchedPtr hp_beta_y = make_hipPitchedPtr((void*) h_beta_y, nx * sizeof(float), nx, ny);
hipPitchedPtr hp_beta_z = make_hipPitchedPtr((void*) h_beta_z, nx * sizeof(float), nx, ny);
// Copy alpha and beta to texture memory from pitched pointer
hipMemcpy3DParms copyParams = {0};
copyParams.extent = extent_img;
copyParams.kind = hipMemcpyHostToDevice;
//alpha_x
copyParams.srcPtr = hp_alpha_x;
copyParams.dstArray = d_alpha_x;
cudaStat = hipMemcpy3D(©Params);
if (cudaStat != hipSuccess) {
mexPrintf("Failed to copy alpha_x to device memory.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
//alpha_y
copyParams.srcPtr = hp_alpha_y;
copyParams.dstArray = d_alpha_y;
cudaStat = hipMemcpy3D(©Params);
if (cudaStat != hipSuccess) {
mexPrintf("Failed to copy alpha_y to device memory.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
//alpha_z
copyParams.srcPtr = hp_alpha_z;
copyParams.dstArray = d_alpha_z;
cudaStat = hipMemcpy3D(©Params);
if (cudaStat != hipSuccess) {
mexPrintf("Failed to copy alpha_z to device memory.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
//beta_x
copyParams.srcPtr = hp_beta_x;
copyParams.dstArray = d_beta_x;
cudaStat = hipMemcpy3D(©Params);
if (cudaStat != hipSuccess) {
mexPrintf("Failed to copy beta_x to device memory.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
//beta_y
copyParams.srcPtr = hp_beta_y;
copyParams.dstArray = d_beta_y;
cudaStat = hipMemcpy3D(©Params);
if (cudaStat != hipSuccess) {
mexPrintf("Failed to copy beta_y to device memory.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
//beta_z
copyParams.srcPtr = hp_beta_z;
copyParams.dstArray = d_beta_z;
cudaStat = hipMemcpy3D(©Params);
if (cudaStat != hipSuccess) {
mexPrintf("Failed to copy beta_z to device memory.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// create texture object alpha and beta
hipResourceDesc resDesc;
hipTextureDesc texDesc, texDesc2;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.addressMode[1] = hipAddressModeClamp;
texDesc.addressMode[2] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = 0;
memset(&texDesc2, 0, sizeof(texDesc2));
texDesc2.addressMode[0] = hipAddressModeClamp;
texDesc2.addressMode[1] = hipAddressModeClamp;
texDesc2.addressMode[2] = hipAddressModeClamp;
texDesc2.filterMode = hipFilterModePoint;
texDesc2.readMode = hipReadModeElementType;
texDesc2.normalizedCoords = 0;
// alpha_x
resDesc.res.array.array = d_alpha_x;
hipTextureObject_t tex_alpha_x = 0;
hipCreateTextureObject(&tex_alpha_x, &resDesc, &texDesc, NULL);
// alpha_y
resDesc.res.array.array = d_alpha_y;
// memset(&texDesc, 0, sizeof(texDesc));
// texDesc.addressMode[0] = hipAddressModeClamp;
// texDesc.addressMode[1] = hipAddressModeClamp;
// texDesc.addressMode[2] = hipAddressModeClamp;
// texDesc.filterMode = hipFilterModeLinear;
// texDesc.readMode = hipReadModeElementType;
// texDesc.normalizedCoords = 0;
hipTextureObject_t tex_alpha_y = 0;
hipCreateTextureObject(&tex_alpha_y, &resDesc, &texDesc, NULL);
// alpha_z
resDesc.res.array.array = d_alpha_z;
// memset(&texDesc, 0, sizeof(texDesc));
// texDesc.addressMode[0] = hipAddressModeClamp;
// texDesc.addressMode[1] = hipAddressModeClamp;
// texDesc.addressMode[2] = hipAddressModeClamp;
// texDesc.filterMode = hipFilterModeLinear;
// texDesc.readMode = hipReadModeElementType;
// texDesc.normalizedCoords = 0;
hipTextureObject_t tex_alpha_z = 0;
hipCreateTextureObject(&tex_alpha_z, &resDesc, &texDesc, NULL);
// beta_x
resDesc.res.array.array = d_beta_x;
// memset(&texDesc, 0, sizeof(texDesc));
// texDesc.addressMode[0] = hipAddressModeClamp;
// texDesc.addressMode[1] = hipAddressModeClamp;
// texDesc.addressMode[2] = hipAddressModeClamp;
// texDesc.filterMode = hipFilterModeLinear;
// texDesc.readMode = hipReadModeElementType;
// texDesc.normalizedCoords = 0;
hipTextureObject_t tex_beta_x = 0;
hipCreateTextureObject(&tex_beta_x, &resDesc, &texDesc, NULL);
// beta_y
resDesc.res.array.array = d_beta_y;
// memset(&texDesc, 0, sizeof(texDesc));
// texDesc.addressMode[0] = hipAddressModeClamp;
// texDesc.addressMode[1] = hipAddressModeClamp;
// texDesc.addressMode[2] = hipAddressModeClamp;
// texDesc.filterMode = hipFilterModeLinear;
// texDesc.readMode = hipReadModeElementType;
// texDesc.normalizedCoords = 0;
hipTextureObject_t tex_beta_y = 0;
hipCreateTextureObject(&tex_beta_y, &resDesc, &texDesc, NULL);
// beta_z
resDesc.res.array.array = d_beta_z;
// memset(&texDesc, 0, sizeof(texDesc));
// texDesc.addressMode[0] = hipAddressModeClamp;
// texDesc.addressMode[1] = hipAddressModeClamp;
// texDesc.addressMode[2] = hipAddressModeClamp;
// texDesc.filterMode = hipFilterModeLinear;
// texDesc.readMode = hipReadModeElementType;
// texDesc.normalizedCoords = 0;
hipTextureObject_t tex_beta_z = 0;
hipCreateTextureObject(&tex_beta_z, &resDesc, &texDesc, NULL);
// malloc in device: projection of the whole bin
float *d_proj;
hipMalloc((void**)&d_proj, numBytesSingleProj);
// copy to device: projection of the whole bin
// hipMemcpy(d_proj, h_proj, numBytesProj, hipMemcpyHostToDevice);
// malloc in device: another projection pointer, with single view size
float *d_singleViewProj2;
hipMalloc((void**)&d_singleViewProj2, numBytesSingleProj);
// malloc in device: projection of the whole bin
float *d_img ,*d_img1;
hipArray *array_img;
hipMalloc((void**)&d_img, numBytesImg * n_bin);
hipMalloc((void**)&d_img1, numBytesImg);
cudaStat = hipMalloc3DArray(&array_img, &channelDesc, extent_img);
if (cudaStat != hipSuccess) {
mexPrintf("Array memory allocation for array_img failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// malloc in device: another image pointer, for single view
float *d_singleViewImg1, *d_singleViewImg2, *d_imgOnes;
hipMalloc(&d_singleViewImg1, numBytesImg);
hipMalloc(&d_singleViewImg2, numBytesImg);
hipMalloc(&d_imgOnes, numBytesImg);
float angle, volume, flow;
//Malloc forward and inverted DVFs in device
float *d_mx, *d_my, *d_mz, *d_mx2, *d_my2, *d_mz2;
hipMalloc(&d_mx, numBytesImg);
hipMalloc(&d_my, numBytesImg);
hipMalloc(&d_mz, numBytesImg);
hipMalloc(&d_mx2, numBytesImg);
hipMalloc(&d_my2, numBytesImg);
hipMalloc(&d_mz2, numBytesImg);
// Alloc forward and inverted DVFs in device, in form of array memory
hipArray *array_mx, *array_my, *array_mz, *array_mx2, *array_my2, *array_mz2;
cudaStat = hipMalloc3DArray(&array_mx, &channelDesc, extent_img);
if (cudaStat != hipSuccess) {
mexPrintf("Array memory allocation for array_mx failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
cudaStat = hipMalloc3DArray(&array_my, &channelDesc, extent_img);
if (cudaStat != hipSuccess) {
mexPrintf("Array memory allocation for array_my failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
cudaStat = hipMalloc3DArray(&array_mz, &channelDesc, extent_img);
if (cudaStat != hipSuccess) {
mexPrintf("Array memory allocation for array_mz failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
cudaStat = hipMalloc3DArray(&array_mx2, &channelDesc, extent_img);
if (cudaStat != hipSuccess) {
mexPrintf("Array memory allocation for array_mx2 failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
cudaStat = hipMalloc3DArray(&array_my2, &channelDesc, extent_img);
if (cudaStat != hipSuccess) {
mexPrintf("Array memory allocation for array_my2 failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
cudaStat = hipMalloc3DArray(&array_mz2, &channelDesc, extent_img);
if (cudaStat != hipSuccess) {
mexPrintf("Array memory allocation for array_mz2 failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// define tex_mx etc
hipTextureObject_t tex_mx = 0, tex_my = 0, tex_mz = 0, tex_mx2 = 0, tex_my2 = 0, tex_mz2 = 0, tex_img = 0;
// setup output images
OUT_IMG = mxCreateNumericMatrix(0, 0, mxSINGLE_CLASS, mxREAL);
const mwSize outDim[4] = {(mwSize)nx, (mwSize)ny, (mwSize)nz, (mwSize)n_bin};
mxSetDimensions(OUT_IMG, outDim, 4);
mxSetData(OUT_IMG, mxMalloc(numBytesImg * n_bin));
float *h_outimg = (float*)mxGetData(OUT_IMG);
OUT_ERR = mxCreateNumericMatrix(n_iter, 1, mxSINGLE_CLASS, mxREAL);
float *h_outerr = (float*)mxGetData(OUT_ERR), temp_err[1];
hipblasHandle_t handle;
copyParams.kind = hipMemcpyDeviceToDevice;
// for (int i = 0; i < n_iter; i++)
// mexPrintf("=");mexEvalString("drawnow;");
// mexPrintf("\n");mexEvalString("drawnow;");
hipMemcpy(d_img, h_img, numBytesImg * n_bin, hipMemcpyHostToDevice);
for (int iter = 0; iter < n_iter; iter++){ // iteration
// mexPrintf("x");mexEvalString("drawnow;");
for (int ibin = 0; ibin < n_bin; ibin++){
processBar(iter, n_iter, ibin, n_bin);
// initial guesses of each bin
// mexPrintf("Dealing with ibin = %d.\n", ibin); mexEvalString("drawnow;");
// if (ibin < 1){
// hipMemcpy(d_img, h_img, numBytesImg, hipMemcpyHostToDevice);
// }
// else{
// // hipMemcpy(d_img1, h_img, numBytesImg, hipMemcpyHostToDevice);
// volume = ref_volumes[ibin];
// flow = ref_flows[ibin];
// hipLaunchKernelGGL(( kernel_forwardDVF), dim3(gridSize_img), dim3(blockSize), 0, 0, d_mx, d_my, d_mz, tex_alpha_x, tex_alpha_y, tex_alpha_z, tex_beta_x, tex_beta_y, tex_beta_z, volume, flow, nx, ny, nz);
// hipDeviceSynchronize();
// // copy img to pitched pointer and bind it to a texture object
// hipPitchedPtr dp_img = make_hipPitchedPtr((void*) d_img1, nx * sizeof(float), nx, ny);
// copyParams.srcPtr = dp_img;
// copyParams.dstArray = array_img;
// cudaStat = hipMemcpy3D(©Params);
// if (cudaStat != hipSuccess) {
// mexPrintf("Failed to copy dp_img to array memory array_img.\n");
// mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
// mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
// }
// resDesc.res.array.array = array_img;
// hipCreateTextureObject(&tex_img, &resDesc, &texDesc, NULL);
// hipLaunchKernelGGL(( kernel_deformation), dim3(gridSize_img), dim3(blockSize), 0, 0, d_img, tex_img, d_mx, d_my, d_mz, nx, ny, nz);
// hipDeviceSynchronize();
// }
for (int i_view = n_views[ibin]; i_view < n_views[ibin + 1]; i_view++){ // view
angle = angles[i_view];
volume = ref_volumes[ibin] - volumes[i_view];
flow = ref_flows[ibin] - flows[i_view];
// generate forwards DVFs: d_mx, d_my, d_mz and inverted DVFs: d_mx2, d_my2, d_mz2
hipLaunchKernelGGL(( kernel_forwardDVF), dim3(gridSize_img), dim3(blockSize), 0, 0, d_mx, d_my, d_mz, tex_alpha_x, tex_alpha_y, tex_alpha_z, tex_beta_x, tex_beta_y, tex_beta_z, volume, flow, nx, ny, nz);
hipDeviceSynchronize();
// copy mx etc to pitched pointer and bind it to a texture object
hipPitchedPtr dp_mx = make_hipPitchedPtr((void*) d_mx, nx * sizeof(float), nx, ny);
copyParams.srcPtr = dp_mx;
copyParams.dstArray = array_mx;
cudaStat = hipMemcpy3D(©Params);
if (cudaStat != hipSuccess) {
mexPrintf("Failed to copy dp_mx to array memory array_mx2.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
resDesc.res.array.array = array_mx;
hipCreateTextureObject(&tex_mx, &resDesc, &texDesc, NULL);
hipPitchedPtr dp_my = make_hipPitchedPtr((void*) d_my, nx * sizeof(float), nx, ny);
copyParams.srcPtr = dp_my;
copyParams.dstArray = array_my;
cudaStat = hipMemcpy3D(©Params);
if (cudaStat != hipSuccess) {
mexPrintf("Failed to copy dp_my to array memory array_mx2.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
resDesc.res.array.array = array_my;
hipCreateTextureObject(&tex_my, &resDesc, &texDesc, NULL);
hipPitchedPtr dp_mz = make_hipPitchedPtr((void*) d_mz, nx * sizeof(float), nx, ny);
copyParams.srcPtr = dp_mz;
copyParams.dstArray = array_mz;
cudaStat = hipMemcpy3D(©Params);
if (cudaStat != hipSuccess) {
mexPrintf("Failed to copy dp_mz to array memory array_mx2.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
resDesc.res.array.array = array_mz;
hipCreateTextureObject(&tex_mz, &resDesc, &texDesc, NULL);
hipLaunchKernelGGL(( kernel_invertDVF), dim3(gridSize_img), dim3(blockSize), 0, 0, d_mx2, d_my2, d_mz2, tex_mx, tex_my, tex_mz, nx, ny, nz, n_iter_invertDVF);
hipDeviceSynchronize();
// copy mx2 etc to pitched pointer and bind it to a texture object
hipPitchedPtr dp_mx2 = make_hipPitchedPtr((void*) d_mx2, nx * sizeof(float), nx, ny);
copyParams.srcPtr = dp_mx2;
copyParams.dstArray = array_mx2;
cudaStat = hipMemcpy3D(©Params);
if (cudaStat != hipSuccess) {
mexPrintf("Failed to copy dp_mx2 to array memory array_mx2.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
resDesc.res.array.array = array_mx2;
hipCreateTextureObject(&tex_mx2, &resDesc, &texDesc, NULL);
hipPitchedPtr dp_my2 = make_hipPitchedPtr((void*) d_my2, nx * sizeof(float), nx, ny);
copyParams.srcPtr = dp_my2;
copyParams.dstArray = array_my2;
cudaStat = hipMemcpy3D(©Params);
if (cudaStat != hipSuccess) {
mexPrintf("Failed to copy dp_my2 to array memory array_mx2.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
resDesc.res.array.array = array_my2;
hipCreateTextureObject(&tex_my2, &resDesc, &texDesc, NULL);
hipPitchedPtr dp_mz2 = make_hipPitchedPtr((void*) d_mz2, nx * sizeof(float), nx, ny);
copyParams.srcPtr = dp_mz2;
copyParams.dstArray = array_mz2;
cudaStat = hipMemcpy3D(©Params);
if (cudaStat != hipSuccess) {
mexPrintf("Failed to copy dp_mz2 to array memory array_mx2.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
resDesc.res.array.array = array_mz2;
hipCreateTextureObject(&tex_mz2, &resDesc, &texDesc, NULL);
// copy img to pitched pointer and bind it to a texture object
hipPitchedPtr dp_img = make_hipPitchedPtr((void*) d_img + ibin * numBytesImg, nx * sizeof(float), nx, ny);
copyParams.srcPtr = dp_img;
copyParams.dstArray = array_img;
cudaStat = hipMemcpy3D(©Params);
if (cudaStat != hipSuccess) {
mexPrintf("Failed to copy dp_img to array memory array_img.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
resDesc.res.array.array = array_img;
hipCreateTextureObject(&tex_img, &resDesc, &texDesc, NULL);
// deformed image for i_view, from reference image of the bin
hipLaunchKernelGGL(( kernel_deformation), dim3(gridSize_img), dim3(blockSize), 0, 0, d_singleViewImg1, tex_img, d_mx2, d_my2, d_mz2, nx, ny, nz);
hipDeviceSynchronize();
// projection of deformed image from initial guess
hipLaunchKernelGGL(( kernel_projection), dim3(gridSize_singleProj), dim3(blockSize), 0, 0, d_singleViewProj2, d_singleViewImg1, angle, SO, SD, da, na, ai, db, nb, bi, nx, ny, nz); // TBD
hipDeviceSynchronize();
// difference between true projection and projection from initial guess
// update d_singleViewProj2 instead of malloc a new one
hipMemcpy(d_proj, h_proj + i_view * numSingleProj, numBytesSingleProj, hipMemcpyHostToDevice);
// mexPrintf("i_view = %d.\n", i_view);mexEvalString("drawnow;");
hipLaunchKernelGGL(( kernel_add), dim3(gridSize_singleProj), dim3(blockSize), 0, 0, d_singleViewProj2, d_proj, 0, na, nb, -1);
hipDeviceSynchronize();
hipblasSnrm2(handle, na * nb, d_singleViewProj2, 1, temp_err);
h_outerr[iter] += temp_err[0];
// backprojecting the difference of projections
// print parameters
kernel_backprojection(d_singleViewImg1, d_singleViewProj2, angle, SO, SD, da, na, ai, db, nb, bi, nx, ny, nz);
// copy img to pitched pointer and bind it to a texture object
dp_img = make_hipPitchedPtr((void*) d_singleViewImg1, nx * sizeof(float), nx, ny);
copyParams.srcPtr = dp_img;
copyParams.dstArray = array_img;
cudaStat = hipMemcpy3D(©Params);
if (cudaStat != hipSuccess) {
mexPrintf("Failed to copy dp_img to array memory array_img.\n");
mexPrintf("Error code %d: %s\n",cudaStat,hipGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
resDesc.res.array.array = array_img;
hipCreateTextureObject(&tex_img, &resDesc, &texDesc2, NULL);
hipLaunchKernelGGL(( kernel_invertDVF), dim3(gridSize_img), dim3(blockSize), 0, 0, d_mx, d_my, d_mz, tex_mx2, tex_my2, tex_mz2, nx, ny, nz, n_iter_invertDVF);
hipDeviceSynchronize();
// deform backprojection back to the bin
hipLaunchKernelGGL(( kernel_deformation), dim3(gridSize_img), dim3(blockSize), 0, 0, d_singleViewImg2, tex_img, d_mx, d_my, d_mz, nx, ny, nz);
hipDeviceSynchronize();
// mexPrintf("8");mexEvalString("drawnow;");
// calculate the ones backprojection data
hipLaunchKernelGGL(( kernel_initial), dim3(gridSize_img), dim3(blockSize), 0, 0, d_singleViewImg1, nx, ny, nz, 1);
hipDeviceSynchronize();
// mexPrintf("9");mexEvalString("drawnow;");
hipLaunchKernelGGL(( kernel_projection), dim3(gridSize_singleProj), dim3(blockSize), 0, 0, d_singleViewProj2, d_singleViewImg1, angle, SO, SD, da, na, ai, db, nb, bi, nx, ny, nz);
hipDeviceSynchronize();
// mexPrintf("10");mexEvalString("drawnow;");
// kernel_backprojection<<<gridSize_img, blockSize>>>(d_singleViewImg1, d_singleViewProj2, angle, SO, SD, da, na, ai, db, nb, bi, nx, ny, nz);
// hipDeviceSynchronize();
kernel_backprojection(d_singleViewImg1, d_singleViewProj2, angle, SO, SD, da, na, ai, db, nb, bi, nx, ny, nz);
// mexPrintf("11");mexEvalString("drawnow;");
// weighting
hipLaunchKernelGGL(( kernel_division), dim3(gridSize_img), dim3(blockSize), 0, 0, d_singleViewImg2, d_singleViewImg1, nx, ny, nz);
hipDeviceSynchronize();
// mexPrintf("12");mexEvalString("drawnow;");
// updating
hipLaunchKernelGGL(( kernel_update), dim3(gridSize_img), dim3(blockSize), 0, 0, d_img + ibin * numImg, d_singleViewImg2, nx, ny, nz, lambda);
hipDeviceSynchronize();
// mexPrintf("13");mexEvalString("drawnow;");
}
// if (ibin == 0){
// hipMemcpy(d_img1, d_img, numBytesImg, hipMemcpyDeviceToDevice);
// }
}
// mexPrintf("\n");mexEvalString("drawnow;");
}
hipMemcpy(h_outimg, d_img, numBytesImg * n_bin, hipMemcpyDeviceToHost);
// const mwSize *outDim = mxGetDimensions(PROJ); // IN_IMG or PROJ
// mxSetDimensions(OUT_IMG, outDim, 3);
// mxSetData(OUT_IMG, mxMalloc(numBytesImg));
// float *h_outimg = (float*)mxGetData(OUT_IMG);
// hipMemcpy(h_outimg, d_singleViewProj2, numBytesSingleProj, hipMemcpyDeviceToHost);
// const mwSize *outDim = mxGetDimensions(IN_IMG); // IN_IMG or PROJ
// mxSetDimensions(OUT_IMG, outDim, 3);
// mxSetData(OUT_IMG, mxMalloc(numBytesImg));
// float *h_outimg = (float*)mxGetData(OUT_IMG);
// hipMemcpy(h_outimg, d_singleViewImg1, numBytesImg, hipMemcpyDeviceToHost);
hipDestroyTextureObject(tex_alpha_x);
hipDestroyTextureObject(tex_alpha_y);
hipDestroyTextureObject(tex_alpha_z);
hipDestroyTextureObject(tex_beta_x);
hipDestroyTextureObject(tex_beta_y);
hipDestroyTextureObject(tex_beta_z);
hipDestroyTextureObject(tex_img);
hipDestroyTextureObject(tex_mx);
hipDestroyTextureObject(tex_my);
hipDestroyTextureObject(tex_mz);
hipDestroyTextureObject(tex_mx2);
hipDestroyTextureObject(tex_my2);
hipDestroyTextureObject(tex_mz2);
hipFreeArray(d_alpha_x);
hipFreeArray(d_alpha_y);
hipFreeArray(d_alpha_z);
hipFreeArray(d_beta_x);
hipFreeArray(d_beta_y);
hipFreeArray(d_beta_z);
// hipFreeArray(d_img);
hipFree(d_mx);
hipFree(d_my);
hipFree(d_mz);
hipFree(d_mx2);
hipFree(d_my2);
hipFree(d_mz2);
hipFreeArray(array_mx);
hipFreeArray(array_my);
hipFreeArray(array_mz);
hipFreeArray(array_mx2);
hipFreeArray(array_my2);
hipFreeArray(array_mz2);
hipFree(d_proj);
hipFree(d_singleViewImg1);
hipFree(d_singleViewImg2);
hipFree(d_singleViewProj2);
hipFree(d_img);
hipFree(d_img1);
hipDeviceReset();
return;
}
| 2721ba57fb15d6ffcc1c59fd8f998b6c61fc5cbc.cu | #include "SART_cuda.h" // consists all required package and functions
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
// Macro for input and output
#define IN_IMG prhs[0]
#define PROJ prhs[1]
#define GEO_PARA prhs[2]
#define ITER_PARA prhs[3]
#define OUT_IMG plhs[0]
#define OUT_ERR plhs[1]
int nx, ny, nz, na, nb, numImg, numBytesImg, numSingleProj, numBytesSingleProj;
float da, db, ai, bi, SO, SD, dx;
// resolutions of volumes
if (mxGetField(GEO_PARA, 0, "nx") != NULL)
nx = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nx"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution nx.\n");
if (mxGetField(GEO_PARA, 0, "ny") != NULL)
ny = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "ny"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution ny.\n");
if (mxGetField(GEO_PARA, 0, "nz") != NULL)
nz = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nz"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution nz.\n");
numImg = nx * ny * nz; // size of image
numBytesImg = numImg * sizeof(float); // number of bytes in image
// detector plane resolutions
if (mxGetField(GEO_PARA, 0, "na") != NULL)
na = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "na"));
else if (mxGetField(GEO_PARA, 0, "nv") != NULL)
na = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nv"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid number of detector in plane, which is denoted as na or nu.\n");
if (mxGetField(GEO_PARA, 0, "nb") != NULL)
nb = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nb"));
else if (mxGetField(GEO_PARA, 0, "nu") != NULL)
nb = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nu"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid number of detector across plane, which is denoted as nb or nv.\n");
numSingleProj = na * nb;
numBytesSingleProj = numSingleProj * sizeof(float);
// voxel resolution dx, which is also the scaling factor of the whole system
if (mxGetField(GEO_PARA, 0, "dx") != NULL)
dx = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "dx"));
else{
dx = 1.0f;
mexPrintf("Automatically set voxel size dx to 1. \n");
mexPrintf("If don't want that default value, please set para.dx manually.\n");
}
// detector resolution
if (mxGetField(GEO_PARA, 0, "da") != NULL)
da = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "da"));
else{
da = 1.0f;
mexPrintf("Automatically set detector cell size da to 1. \n");
mexPrintf("If don't want that default value, please set para.da manually.\n");
}
if (mxGetField(GEO_PARA, 0, "db") != NULL)
db = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "db"));
else{
db = 1.0f;
mexPrintf("Automatically set detectof cell size db to 1. \n");
mexPrintf("If don't want that default value, please set para.db manually.\n");
}
// detector plane offset from centered calibrations
if (mxGetField(GEO_PARA, 0, "ai") != NULL){
ai = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "ai"));
ai -= ((float)na / 2 - 0.5f);
}
else{
mexPrintf("Automatically set detector offset ai to 0. \n");
mexPrintf("If don't want that default value, please set para.ai manually.\n");
ai = - ((float)na / 2 - 0.5f);
}
if (mxGetField(GEO_PARA, 0, "bi") != NULL){
bi = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "bi"));
bi -= ((float)nb / 2 - 0.5f);
}
else{
mexPrintf("Automatically set detector offset bi to 0. \n");
mexPrintf("If don't want that default value, please set para.bi manually.\n");
bi = - ((float)nb / 2 - 0.5f);
}
if (mxGetField(GEO_PARA, 0, "SO") != NULL)
SO = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SO"));
else if (mxGetField(GEO_PARA, 0, "SI") != NULL)
SO = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SI"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid distance between source and isocenter, which is denoted with para.SO or para.DI.\n");
if (mxGetField(GEO_PARA, 0, "SD") != NULL)
SD = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SD"));
else if (mxGetField(GEO_PARA, 0, "DI") != NULL)
SD = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "DI")) + SO;
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid distance between source and detector plane, which is denoted with para.SD or para.SI + para.DI.\n");
// load iterating parameters, for the whole bin
int n_iter, n_iter_invertDVF;
if (mxGetField(ITER_PARA, 0, "n_iter") != NULL)
n_iter = (int)mxGetScalar(mxGetField(ITER_PARA, 0, "n_iter")); // number of views in this bin
else{
n_iter = 1;
mexPrintf("Automatically set number of iterations to 1. \n");
mexPrintf("If don't want that default value, please set iter_para.n_iter manually.\n");
}
if (mxGetField(ITER_PARA, 0, "n_iter_invertDVF") != NULL)
n_iter_invertDVF = (int)mxGetScalar(mxGetField(ITER_PARA, 0, "n_iter_invertDVF"));
else{
n_iter_invertDVF = 10;
mexPrintf("Automatically set number of iterations for inverting DVF to 10. \n");
mexPrintf("If don't want that default value, please set iter_para.n_iter_invertDVF manually.\n");
}
int n_bin, *n_views, numProj, numBytesProj, N_view; // number of bins, numbers of views of bins, and the index view of each bin.
// e.g. we have 3 bins here with 10 total views. For these 3 bins, they holds 1,3,6 views. Then we will set n_views as {0, 1, 4, 10}, which is the starting view indices of each bin. Moreover, we need to well arrange the volumes and flows.
if (mxGetField(ITER_PARA, 0, "n_bin") != NULL)
n_bin = (int)mxGetScalar(mxGetField(ITER_PARA, 0, "n_bin"));
else{
n_bin = 8;
mexPrintf("Automatically set number of bins to 8. \n");
mexPrintf("If don't want that default value, please set iter_para.n_bin manually.\n");
}
if (mxGetField(ITER_PARA, 0, "n_views") != NULL)
n_views = (int*)mxGetData(mxGetField(ITER_PARA, 0, "n_views"));
else{
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid number bins, which is denoted as iter_para.n_views.\n");
}
N_view = n_views[n_bin];
// 5D models
float *h_alpha_x, *h_alpha_y, *h_alpha_z, *h_beta_x, *h_beta_y, *h_beta_z, *angles, lambda;
// load 5DCT alpha and beta
if (mxGetField(ITER_PARA, 0, "alpha_x") != NULL)
h_alpha_x = (float*)mxGetData(mxGetField(ITER_PARA, 0, "alpha_x"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid iter_para.alpha_x.\n");
if (mxGetField(ITER_PARA, 0, "alpha_y") != NULL)
h_alpha_y = (float*)mxGetData(mxGetField(ITER_PARA, 0, "alpha_y"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid iter_para.alpha_y.\n");
if (mxGetField(ITER_PARA, 0, "alpha_z") != NULL)
h_alpha_z = (float*)mxGetData(mxGetField(ITER_PARA, 0, "alpha_z"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid iter_para.alpha_z.\n");
if (mxGetField(ITER_PARA, 0, "beta_x") != NULL)
h_beta_x = (float*)mxGetData(mxGetField(ITER_PARA, 0, "beta_x"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid iter_para.beta_x.\n");
if (mxGetField(ITER_PARA, 0, "beta_y") != NULL)
h_beta_y = (float*)mxGetData(mxGetField(ITER_PARA, 0, "beta_y"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid iter_para.beta_y.\n");
if (mxGetField(ITER_PARA, 0, "beta_z") != NULL)
h_beta_z = (float*)mxGetData(mxGetField(ITER_PARA, 0, "beta_z"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid iter_para.beta_z.\n");
// load 5DCT parameters volume (v) and flow (f)
float *volumes, *flows, *ref_volumes, *ref_flows;
if (mxGetField(ITER_PARA, 0, "volumes") != NULL)
volumes= (float*)mxGetData(mxGetField(ITER_PARA, 0, "volumes"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume in iter_para.volumes.\n");
if (mxGetField(ITER_PARA, 0, "flows") != NULL)
flows = (float*)mxGetData(mxGetField(ITER_PARA, 0, "flows"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid flow in iter_para.flows.\n");
if (mxGetField(ITER_PARA, 0, "volume0") != NULL)
ref_volumes = (float*)mxGetData(mxGetField(ITER_PARA, 0, "volume0"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid referenced volume in iter_para.volume0.\n");
if (mxGetField(ITER_PARA, 0, "flow0") != NULL)
ref_flows = (float*)mxGetData(mxGetField(ITER_PARA, 0, "flow0"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid referenced flow in iter_para.flow0.\n");
if (mxGetField(ITER_PARA, 0, "angles") != NULL)
angles = (float*)mxGetData(mxGetField(ITER_PARA, 0, "angles"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid angles iter_para.angles.\n");
if (mxGetField(ITER_PARA, 0, "lambda") != NULL)
lambda = (float)mxGetScalar(mxGetField(ITER_PARA, 0, "lambda"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid coefficience iter_para.lambda.\n");
numProj = numSingleProj * N_view;
numBytesProj = numProj * sizeof(float);
// load initial guess of image
float *h_img;
h_img = (float*)mxGetData(IN_IMG);
// load true projection value
float *h_proj;
h_proj = (float*)mxGetData(PROJ);
// define thread distributions
const dim3 gridSize_img((nx + BLOCKWIDTH - 1) / BLOCKWIDTH, (ny + BLOCKHEIGHT - 1) / BLOCKHEIGHT, (nz + BLOCKDEPTH - 1) / BLOCKDEPTH);
const dim3 gridSize_singleProj((na + BLOCKWIDTH - 1) / BLOCKWIDTH, (nb + BLOCKHEIGHT - 1) / BLOCKHEIGHT, 1);
const dim3 blockSize(BLOCKWIDTH,BLOCKHEIGHT, BLOCKDEPTH);
// CUDA 3DArray Malloc parameters
struct cudaExtent extent_img = make_cudaExtent(nx, ny, nz);
struct cudaExtent extent_singleProj = make_cudaExtent(na, nb, 1);
//Allocate CUDA array in device memory of 5DCT matrices: alpha and beta
cudaArray *d_alpha_x, *d_alpha_y, *d_alpha_z, *d_beta_x, *d_beta_y, *d_beta_z;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaError_t cudaStat;
// alpha_x
cudaStat = cudaMalloc3DArray(&d_alpha_x, &channelDesc, extent_img);
if (cudaStat != cudaSuccess) {
mexPrintf("Array memory allocation for alpha_x failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// alpha_y
cudaStat = cudaMalloc3DArray(&d_alpha_y, &channelDesc, extent_img);
if (cudaStat != cudaSuccess) {
mexPrintf("Array memory allocation for alpha_y failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// alpha_z
cudaStat = cudaMalloc3DArray(&d_alpha_z, &channelDesc, extent_img);
if (cudaStat != cudaSuccess) {
mexPrintf("Array memory allocation for alpha_z failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// beta_x
cudaStat = cudaMalloc3DArray(&d_beta_x, &channelDesc, extent_img);
if (cudaStat != cudaSuccess) {
mexPrintf("Array memory allocation for beta_x failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// beta_y
cudaStat = cudaMalloc3DArray(&d_beta_y, &channelDesc, extent_img);
if (cudaStat != cudaSuccess) {
mexPrintf("Array memory allocation for beta_y failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// beta_z
cudaStat = cudaMalloc3DArray(&d_beta_z, &channelDesc, extent_img);
if (cudaStat != cudaSuccess) {
mexPrintf("Array memory allocation for beta_z failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// Get pitched pointer to alpha and beta in host memory
cudaPitchedPtr hp_alpha_x = make_cudaPitchedPtr((void*) h_alpha_x, nx * sizeof(float), nx, ny);
cudaPitchedPtr hp_alpha_y = make_cudaPitchedPtr((void*) h_alpha_y, nx * sizeof(float), nx, ny);
cudaPitchedPtr hp_alpha_z = make_cudaPitchedPtr((void*) h_alpha_z, nx * sizeof(float), nx, ny);
cudaPitchedPtr hp_beta_x = make_cudaPitchedPtr((void*) h_beta_x, nx * sizeof(float), nx, ny);
cudaPitchedPtr hp_beta_y = make_cudaPitchedPtr((void*) h_beta_y, nx * sizeof(float), nx, ny);
cudaPitchedPtr hp_beta_z = make_cudaPitchedPtr((void*) h_beta_z, nx * sizeof(float), nx, ny);
// Copy alpha and beta to texture memory from pitched pointer
cudaMemcpy3DParms copyParams = {0};
copyParams.extent = extent_img;
copyParams.kind = cudaMemcpyHostToDevice;
//alpha_x
copyParams.srcPtr = hp_alpha_x;
copyParams.dstArray = d_alpha_x;
cudaStat = cudaMemcpy3D(©Params);
if (cudaStat != cudaSuccess) {
mexPrintf("Failed to copy alpha_x to device memory.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
//alpha_y
copyParams.srcPtr = hp_alpha_y;
copyParams.dstArray = d_alpha_y;
cudaStat = cudaMemcpy3D(©Params);
if (cudaStat != cudaSuccess) {
mexPrintf("Failed to copy alpha_y to device memory.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
//alpha_z
copyParams.srcPtr = hp_alpha_z;
copyParams.dstArray = d_alpha_z;
cudaStat = cudaMemcpy3D(©Params);
if (cudaStat != cudaSuccess) {
mexPrintf("Failed to copy alpha_z to device memory.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
//beta_x
copyParams.srcPtr = hp_beta_x;
copyParams.dstArray = d_beta_x;
cudaStat = cudaMemcpy3D(©Params);
if (cudaStat != cudaSuccess) {
mexPrintf("Failed to copy beta_x to device memory.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
//beta_y
copyParams.srcPtr = hp_beta_y;
copyParams.dstArray = d_beta_y;
cudaStat = cudaMemcpy3D(©Params);
if (cudaStat != cudaSuccess) {
mexPrintf("Failed to copy beta_y to device memory.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
//beta_z
copyParams.srcPtr = hp_beta_z;
copyParams.dstArray = d_beta_z;
cudaStat = cudaMemcpy3D(©Params);
if (cudaStat != cudaSuccess) {
mexPrintf("Failed to copy beta_z to device memory.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// create texture object alpha and beta
cudaResourceDesc resDesc;
cudaTextureDesc texDesc, texDesc2;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.addressMode[1] = cudaAddressModeClamp;
texDesc.addressMode[2] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 0;
memset(&texDesc2, 0, sizeof(texDesc2));
texDesc2.addressMode[0] = cudaAddressModeClamp;
texDesc2.addressMode[1] = cudaAddressModeClamp;
texDesc2.addressMode[2] = cudaAddressModeClamp;
texDesc2.filterMode = cudaFilterModePoint;
texDesc2.readMode = cudaReadModeElementType;
texDesc2.normalizedCoords = 0;
// alpha_x
resDesc.res.array.array = d_alpha_x;
cudaTextureObject_t tex_alpha_x = 0;
cudaCreateTextureObject(&tex_alpha_x, &resDesc, &texDesc, NULL);
// alpha_y
resDesc.res.array.array = d_alpha_y;
// memset(&texDesc, 0, sizeof(texDesc));
// texDesc.addressMode[0] = cudaAddressModeClamp;
// texDesc.addressMode[1] = cudaAddressModeClamp;
// texDesc.addressMode[2] = cudaAddressModeClamp;
// texDesc.filterMode = cudaFilterModeLinear;
// texDesc.readMode = cudaReadModeElementType;
// texDesc.normalizedCoords = 0;
cudaTextureObject_t tex_alpha_y = 0;
cudaCreateTextureObject(&tex_alpha_y, &resDesc, &texDesc, NULL);
// alpha_z
resDesc.res.array.array = d_alpha_z;
// memset(&texDesc, 0, sizeof(texDesc));
// texDesc.addressMode[0] = cudaAddressModeClamp;
// texDesc.addressMode[1] = cudaAddressModeClamp;
// texDesc.addressMode[2] = cudaAddressModeClamp;
// texDesc.filterMode = cudaFilterModeLinear;
// texDesc.readMode = cudaReadModeElementType;
// texDesc.normalizedCoords = 0;
cudaTextureObject_t tex_alpha_z = 0;
cudaCreateTextureObject(&tex_alpha_z, &resDesc, &texDesc, NULL);
// beta_x
resDesc.res.array.array = d_beta_x;
// memset(&texDesc, 0, sizeof(texDesc));
// texDesc.addressMode[0] = cudaAddressModeClamp;
// texDesc.addressMode[1] = cudaAddressModeClamp;
// texDesc.addressMode[2] = cudaAddressModeClamp;
// texDesc.filterMode = cudaFilterModeLinear;
// texDesc.readMode = cudaReadModeElementType;
// texDesc.normalizedCoords = 0;
cudaTextureObject_t tex_beta_x = 0;
cudaCreateTextureObject(&tex_beta_x, &resDesc, &texDesc, NULL);
// beta_y
resDesc.res.array.array = d_beta_y;
// memset(&texDesc, 0, sizeof(texDesc));
// texDesc.addressMode[0] = cudaAddressModeClamp;
// texDesc.addressMode[1] = cudaAddressModeClamp;
// texDesc.addressMode[2] = cudaAddressModeClamp;
// texDesc.filterMode = cudaFilterModeLinear;
// texDesc.readMode = cudaReadModeElementType;
// texDesc.normalizedCoords = 0;
cudaTextureObject_t tex_beta_y = 0;
cudaCreateTextureObject(&tex_beta_y, &resDesc, &texDesc, NULL);
// beta_z
resDesc.res.array.array = d_beta_z;
// memset(&texDesc, 0, sizeof(texDesc));
// texDesc.addressMode[0] = cudaAddressModeClamp;
// texDesc.addressMode[1] = cudaAddressModeClamp;
// texDesc.addressMode[2] = cudaAddressModeClamp;
// texDesc.filterMode = cudaFilterModeLinear;
// texDesc.readMode = cudaReadModeElementType;
// texDesc.normalizedCoords = 0;
cudaTextureObject_t tex_beta_z = 0;
cudaCreateTextureObject(&tex_beta_z, &resDesc, &texDesc, NULL);
// malloc in device: projection of the whole bin
float *d_proj;
cudaMalloc((void**)&d_proj, numBytesSingleProj);
// copy to device: projection of the whole bin
// cudaMemcpy(d_proj, h_proj, numBytesProj, cudaMemcpyHostToDevice);
// malloc in device: another projection pointer, with single view size
float *d_singleViewProj2;
cudaMalloc((void**)&d_singleViewProj2, numBytesSingleProj);
// malloc in device: projection of the whole bin
float *d_img ,*d_img1;
cudaArray *array_img;
cudaMalloc((void**)&d_img, numBytesImg * n_bin);
cudaMalloc((void**)&d_img1, numBytesImg);
cudaStat = cudaMalloc3DArray(&array_img, &channelDesc, extent_img);
if (cudaStat != cudaSuccess) {
mexPrintf("Array memory allocation for array_img failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// malloc in device: another image pointer, for single view
float *d_singleViewImg1, *d_singleViewImg2, *d_imgOnes;
cudaMalloc(&d_singleViewImg1, numBytesImg);
cudaMalloc(&d_singleViewImg2, numBytesImg);
cudaMalloc(&d_imgOnes, numBytesImg);
float angle, volume, flow;
//Malloc forward and inverted DVFs in device
float *d_mx, *d_my, *d_mz, *d_mx2, *d_my2, *d_mz2;
cudaMalloc(&d_mx, numBytesImg);
cudaMalloc(&d_my, numBytesImg);
cudaMalloc(&d_mz, numBytesImg);
cudaMalloc(&d_mx2, numBytesImg);
cudaMalloc(&d_my2, numBytesImg);
cudaMalloc(&d_mz2, numBytesImg);
// Alloc forward and inverted DVFs in device, in form of array memory
cudaArray *array_mx, *array_my, *array_mz, *array_mx2, *array_my2, *array_mz2;
cudaStat = cudaMalloc3DArray(&array_mx, &channelDesc, extent_img);
if (cudaStat != cudaSuccess) {
mexPrintf("Array memory allocation for array_mx failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
cudaStat = cudaMalloc3DArray(&array_my, &channelDesc, extent_img);
if (cudaStat != cudaSuccess) {
mexPrintf("Array memory allocation for array_my failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
cudaStat = cudaMalloc3DArray(&array_mz, &channelDesc, extent_img);
if (cudaStat != cudaSuccess) {
mexPrintf("Array memory allocation for array_mz failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
cudaStat = cudaMalloc3DArray(&array_mx2, &channelDesc, extent_img);
if (cudaStat != cudaSuccess) {
mexPrintf("Array memory allocation for array_mx2 failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
cudaStat = cudaMalloc3DArray(&array_my2, &channelDesc, extent_img);
if (cudaStat != cudaSuccess) {
mexPrintf("Array memory allocation for array_my2 failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
cudaStat = cudaMalloc3DArray(&array_mz2, &channelDesc, extent_img);
if (cudaStat != cudaSuccess) {
mexPrintf("Array memory allocation for array_mz2 failed.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
// define tex_mx etc
cudaTextureObject_t tex_mx = 0, tex_my = 0, tex_mz = 0, tex_mx2 = 0, tex_my2 = 0, tex_mz2 = 0, tex_img = 0;
// setup output images
OUT_IMG = mxCreateNumericMatrix(0, 0, mxSINGLE_CLASS, mxREAL);
const mwSize outDim[4] = {(mwSize)nx, (mwSize)ny, (mwSize)nz, (mwSize)n_bin};
mxSetDimensions(OUT_IMG, outDim, 4);
mxSetData(OUT_IMG, mxMalloc(numBytesImg * n_bin));
float *h_outimg = (float*)mxGetData(OUT_IMG);
OUT_ERR = mxCreateNumericMatrix(n_iter, 1, mxSINGLE_CLASS, mxREAL);
float *h_outerr = (float*)mxGetData(OUT_ERR), temp_err[1];
cublasHandle_t handle;
copyParams.kind = cudaMemcpyDeviceToDevice;
// for (int i = 0; i < n_iter; i++)
// mexPrintf("=");mexEvalString("drawnow;");
// mexPrintf("\n");mexEvalString("drawnow;");
cudaMemcpy(d_img, h_img, numBytesImg * n_bin, cudaMemcpyHostToDevice);
for (int iter = 0; iter < n_iter; iter++){ // iteration
// mexPrintf("x");mexEvalString("drawnow;");
for (int ibin = 0; ibin < n_bin; ibin++){
processBar(iter, n_iter, ibin, n_bin);
// initial guesses of each bin
// mexPrintf("Dealing with ibin = %d.\n", ibin); mexEvalString("drawnow;");
// if (ibin < 1){
// cudaMemcpy(d_img, h_img, numBytesImg, cudaMemcpyHostToDevice);
// }
// else{
// // cudaMemcpy(d_img1, h_img, numBytesImg, cudaMemcpyHostToDevice);
// volume = ref_volumes[ibin];
// flow = ref_flows[ibin];
// kernel_forwardDVF<<<gridSize_img, blockSize>>>(d_mx, d_my, d_mz, tex_alpha_x, tex_alpha_y, tex_alpha_z, tex_beta_x, tex_beta_y, tex_beta_z, volume, flow, nx, ny, nz);
// cudaDeviceSynchronize();
// // copy img to pitched pointer and bind it to a texture object
// cudaPitchedPtr dp_img = make_cudaPitchedPtr((void*) d_img1, nx * sizeof(float), nx, ny);
// copyParams.srcPtr = dp_img;
// copyParams.dstArray = array_img;
// cudaStat = cudaMemcpy3D(©Params);
// if (cudaStat != cudaSuccess) {
// mexPrintf("Failed to copy dp_img to array memory array_img.\n");
// mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
// mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
// }
// resDesc.res.array.array = array_img;
// cudaCreateTextureObject(&tex_img, &resDesc, &texDesc, NULL);
// kernel_deformation<<<gridSize_img, blockSize>>>(d_img, tex_img, d_mx, d_my, d_mz, nx, ny, nz);
// cudaDeviceSynchronize();
// }
for (int i_view = n_views[ibin]; i_view < n_views[ibin + 1]; i_view++){ // view
angle = angles[i_view];
volume = ref_volumes[ibin] - volumes[i_view];
flow = ref_flows[ibin] - flows[i_view];
// generate forwards DVFs: d_mx, d_my, d_mz and inverted DVFs: d_mx2, d_my2, d_mz2
kernel_forwardDVF<<<gridSize_img, blockSize>>>(d_mx, d_my, d_mz, tex_alpha_x, tex_alpha_y, tex_alpha_z, tex_beta_x, tex_beta_y, tex_beta_z, volume, flow, nx, ny, nz);
cudaDeviceSynchronize();
// copy mx etc to pitched pointer and bind it to a texture object
cudaPitchedPtr dp_mx = make_cudaPitchedPtr((void*) d_mx, nx * sizeof(float), nx, ny);
copyParams.srcPtr = dp_mx;
copyParams.dstArray = array_mx;
cudaStat = cudaMemcpy3D(©Params);
if (cudaStat != cudaSuccess) {
mexPrintf("Failed to copy dp_mx to array memory array_mx2.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
resDesc.res.array.array = array_mx;
cudaCreateTextureObject(&tex_mx, &resDesc, &texDesc, NULL);
cudaPitchedPtr dp_my = make_cudaPitchedPtr((void*) d_my, nx * sizeof(float), nx, ny);
copyParams.srcPtr = dp_my;
copyParams.dstArray = array_my;
cudaStat = cudaMemcpy3D(©Params);
if (cudaStat != cudaSuccess) {
mexPrintf("Failed to copy dp_my to array memory array_mx2.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
resDesc.res.array.array = array_my;
cudaCreateTextureObject(&tex_my, &resDesc, &texDesc, NULL);
cudaPitchedPtr dp_mz = make_cudaPitchedPtr((void*) d_mz, nx * sizeof(float), nx, ny);
copyParams.srcPtr = dp_mz;
copyParams.dstArray = array_mz;
cudaStat = cudaMemcpy3D(©Params);
if (cudaStat != cudaSuccess) {
mexPrintf("Failed to copy dp_mz to array memory array_mx2.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
resDesc.res.array.array = array_mz;
cudaCreateTextureObject(&tex_mz, &resDesc, &texDesc, NULL);
kernel_invertDVF<<<gridSize_img, blockSize>>>(d_mx2, d_my2, d_mz2, tex_mx, tex_my, tex_mz, nx, ny, nz, n_iter_invertDVF);
cudaDeviceSynchronize();
// copy mx2 etc to pitched pointer and bind it to a texture object
cudaPitchedPtr dp_mx2 = make_cudaPitchedPtr((void*) d_mx2, nx * sizeof(float), nx, ny);
copyParams.srcPtr = dp_mx2;
copyParams.dstArray = array_mx2;
cudaStat = cudaMemcpy3D(©Params);
if (cudaStat != cudaSuccess) {
mexPrintf("Failed to copy dp_mx2 to array memory array_mx2.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
resDesc.res.array.array = array_mx2;
cudaCreateTextureObject(&tex_mx2, &resDesc, &texDesc, NULL);
cudaPitchedPtr dp_my2 = make_cudaPitchedPtr((void*) d_my2, nx * sizeof(float), nx, ny);
copyParams.srcPtr = dp_my2;
copyParams.dstArray = array_my2;
cudaStat = cudaMemcpy3D(©Params);
if (cudaStat != cudaSuccess) {
mexPrintf("Failed to copy dp_my2 to array memory array_mx2.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
resDesc.res.array.array = array_my2;
cudaCreateTextureObject(&tex_my2, &resDesc, &texDesc, NULL);
cudaPitchedPtr dp_mz2 = make_cudaPitchedPtr((void*) d_mz2, nx * sizeof(float), nx, ny);
copyParams.srcPtr = dp_mz2;
copyParams.dstArray = array_mz2;
cudaStat = cudaMemcpy3D(©Params);
if (cudaStat != cudaSuccess) {
mexPrintf("Failed to copy dp_mz2 to array memory array_mx2.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
resDesc.res.array.array = array_mz2;
cudaCreateTextureObject(&tex_mz2, &resDesc, &texDesc, NULL);
// copy img to pitched pointer and bind it to a texture object
cudaPitchedPtr dp_img = make_cudaPitchedPtr((void*) d_img + ibin * numBytesImg, nx * sizeof(float), nx, ny);
copyParams.srcPtr = dp_img;
copyParams.dstArray = array_img;
cudaStat = cudaMemcpy3D(©Params);
if (cudaStat != cudaSuccess) {
mexPrintf("Failed to copy dp_img to array memory array_img.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
resDesc.res.array.array = array_img;
cudaCreateTextureObject(&tex_img, &resDesc, &texDesc, NULL);
// deformed image for i_view, from reference image of the bin
kernel_deformation<<<gridSize_img, blockSize>>>(d_singleViewImg1, tex_img, d_mx2, d_my2, d_mz2, nx, ny, nz);
cudaDeviceSynchronize();
// projection of deformed image from initial guess
kernel_projection<<<gridSize_singleProj, blockSize>>>(d_singleViewProj2, d_singleViewImg1, angle, SO, SD, da, na, ai, db, nb, bi, nx, ny, nz); // TBD
cudaDeviceSynchronize();
// difference between true projection and projection from initial guess
// update d_singleViewProj2 instead of malloc a new one
cudaMemcpy(d_proj, h_proj + i_view * numSingleProj, numBytesSingleProj, cudaMemcpyHostToDevice);
// mexPrintf("i_view = %d.\n", i_view);mexEvalString("drawnow;");
kernel_add<<<gridSize_singleProj, blockSize>>>(d_singleViewProj2, d_proj, 0, na, nb, -1);
cudaDeviceSynchronize();
cublasSnrm2_v2(handle, na * nb, d_singleViewProj2, 1, temp_err);
h_outerr[iter] += temp_err[0];
// backprojecting the difference of projections
// print parameters
kernel_backprojection(d_singleViewImg1, d_singleViewProj2, angle, SO, SD, da, na, ai, db, nb, bi, nx, ny, nz);
// copy img to pitched pointer and bind it to a texture object
dp_img = make_cudaPitchedPtr((void*) d_singleViewImg1, nx * sizeof(float), nx, ny);
copyParams.srcPtr = dp_img;
copyParams.dstArray = array_img;
cudaStat = cudaMemcpy3D(©Params);
if (cudaStat != cudaSuccess) {
mexPrintf("Failed to copy dp_img to array memory array_img.\n");
mexPrintf("Error code %d: %s\n",cudaStat,cudaGetErrorString(cudaStat));
mexErrMsgIdAndTxt("MATLAB:cudaFail","SART failed.\n");
}
resDesc.res.array.array = array_img;
cudaCreateTextureObject(&tex_img, &resDesc, &texDesc2, NULL);
kernel_invertDVF<<<gridSize_img, blockSize>>>(d_mx, d_my, d_mz, tex_mx2, tex_my2, tex_mz2, nx, ny, nz, n_iter_invertDVF);
cudaDeviceSynchronize();
// deform backprojection back to the bin
kernel_deformation<<<gridSize_img, blockSize>>>(d_singleViewImg2, tex_img, d_mx, d_my, d_mz, nx, ny, nz);
cudaDeviceSynchronize();
// mexPrintf("8");mexEvalString("drawnow;");
// calculate the ones backprojection data
kernel_initial<<<gridSize_img, blockSize>>>(d_singleViewImg1, nx, ny, nz, 1);
cudaDeviceSynchronize();
// mexPrintf("9");mexEvalString("drawnow;");
kernel_projection<<<gridSize_singleProj, blockSize>>>(d_singleViewProj2, d_singleViewImg1, angle, SO, SD, da, na, ai, db, nb, bi, nx, ny, nz);
cudaDeviceSynchronize();
// mexPrintf("10");mexEvalString("drawnow;");
// kernel_backprojection<<<gridSize_img, blockSize>>>(d_singleViewImg1, d_singleViewProj2, angle, SO, SD, da, na, ai, db, nb, bi, nx, ny, nz);
// cudaDeviceSynchronize();
kernel_backprojection(d_singleViewImg1, d_singleViewProj2, angle, SO, SD, da, na, ai, db, nb, bi, nx, ny, nz);
// mexPrintf("11");mexEvalString("drawnow;");
// weighting
kernel_division<<<gridSize_img, blockSize>>>(d_singleViewImg2, d_singleViewImg1, nx, ny, nz);
cudaDeviceSynchronize();
// mexPrintf("12");mexEvalString("drawnow;");
// updating
kernel_update<<<gridSize_img, blockSize>>>(d_img + ibin * numImg, d_singleViewImg2, nx, ny, nz, lambda);
cudaDeviceSynchronize();
// mexPrintf("13");mexEvalString("drawnow;");
}
// if (ibin == 0){
// cudaMemcpy(d_img1, d_img, numBytesImg, cudaMemcpyDeviceToDevice);
// }
}
// mexPrintf("\n");mexEvalString("drawnow;");
}
cudaMemcpy(h_outimg, d_img, numBytesImg * n_bin, cudaMemcpyDeviceToHost);
// const mwSize *outDim = mxGetDimensions(PROJ); // IN_IMG or PROJ
// mxSetDimensions(OUT_IMG, outDim, 3);
// mxSetData(OUT_IMG, mxMalloc(numBytesImg));
// float *h_outimg = (float*)mxGetData(OUT_IMG);
// cudaMemcpy(h_outimg, d_singleViewProj2, numBytesSingleProj, cudaMemcpyDeviceToHost);
// const mwSize *outDim = mxGetDimensions(IN_IMG); // IN_IMG or PROJ
// mxSetDimensions(OUT_IMG, outDim, 3);
// mxSetData(OUT_IMG, mxMalloc(numBytesImg));
// float *h_outimg = (float*)mxGetData(OUT_IMG);
// cudaMemcpy(h_outimg, d_singleViewImg1, numBytesImg, cudaMemcpyDeviceToHost);
cudaDestroyTextureObject(tex_alpha_x);
cudaDestroyTextureObject(tex_alpha_y);
cudaDestroyTextureObject(tex_alpha_z);
cudaDestroyTextureObject(tex_beta_x);
cudaDestroyTextureObject(tex_beta_y);
cudaDestroyTextureObject(tex_beta_z);
cudaDestroyTextureObject(tex_img);
cudaDestroyTextureObject(tex_mx);
cudaDestroyTextureObject(tex_my);
cudaDestroyTextureObject(tex_mz);
cudaDestroyTextureObject(tex_mx2);
cudaDestroyTextureObject(tex_my2);
cudaDestroyTextureObject(tex_mz2);
cudaFreeArray(d_alpha_x);
cudaFreeArray(d_alpha_y);
cudaFreeArray(d_alpha_z);
cudaFreeArray(d_beta_x);
cudaFreeArray(d_beta_y);
cudaFreeArray(d_beta_z);
// cudaFreeArray(d_img);
cudaFree(d_mx);
cudaFree(d_my);
cudaFree(d_mz);
cudaFree(d_mx2);
cudaFree(d_my2);
cudaFree(d_mz2);
cudaFreeArray(array_mx);
cudaFreeArray(array_my);
cudaFreeArray(array_mz);
cudaFreeArray(array_mx2);
cudaFreeArray(array_my2);
cudaFreeArray(array_mz2);
cudaFree(d_proj);
cudaFree(d_singleViewImg1);
cudaFree(d_singleViewImg2);
cudaFree(d_singleViewProj2);
cudaFree(d_img);
cudaFree(d_img1);
cudaDeviceReset();
return;
}
|
b65b4ceadb46ed639360a2e2d05e42a2a54d1f6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#include <chrono>
#include <vector>
#include <algorithm>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
using namespace std;
/* ----- BEGIN Shared Library Export ----- */
// taken from http://stackoverflow.com/questions/2164827/explicitly-exporting-shared-library-functions-in-linux
#if defined(_MSC_VER)
// Microsoft
#define EXPORT __declspec(dllexport)
#define IMPORT __declspec(dllimport)
#elif defined(_GCC)
// GCC
#define EXPORT __attribute__((visibility("default")))
#define IMPORT
#else
// do nothing and hope for the best?
#define EXPORT
#define IMPORT
#pragma warning Unknown dynamic link import/export semantics.
#endif
/* ----- END Shared Library Export ----- */
/* ----- BEGIN Class Type ----- */
typedef int obj_id_t;
typedef int class_id_t;
/* ----- END Class Type ----- */
/* ----- BEGIN Environment (lexical variables) ----- */
// environment_struct must be defined later
typedef struct environment_struct environment_t;
/* ----- END Environment (lexical variables) ----- */
/* ----- BEGIN Forward declarations ----- */
typedef struct result_t result_t;
/* ----- END Forward declarations ----- */
// Define program result variable. Also contains benchmark numbers.
result_t *program_result;
// Variables for measuring time
chrono::high_resolution_clock::time_point start_time;
chrono::high_resolution_clock::time_point end_time;
/* ----- BEGIN Macros ----- */
#define timeStartMeasure() start_time = chrono::high_resolution_clock::now();
#define timeReportMeasure(result_var, variable_name) \
end_time = chrono::high_resolution_clock::now(); \
result_var->time_##variable_name = result_var->time_##variable_name + chrono::duration_cast<chrono::microseconds>(end_time - start_time).count();
/* ----- END Macros ----- */
struct indexed_struct_4_lt_int_int_int_int_gt_t
{
int field_0;
int field_1;
int field_2;
int field_3;
};
/* ----- BEGIN Structs ----- */
struct variable_size_array_t {
void *content;
int size;
variable_size_array_t(void *content_ = NULL, int size_ = 0) : content(content_), size(size_) { };
static const variable_size_array_t error_return_value;
};
// error_return_value is used in case a host section terminates abnormally
const variable_size_array_t variable_size_array_t::error_return_value =
variable_size_array_t(NULL, 0);
/* ----- BEGIN Union Type ----- */
typedef union union_type_value {
obj_id_t object_id;
int int_;
float float_;
bool bool_;
void *pointer;
variable_size_array_t variable_size_array;
__host__ __device__ union_type_value(int value) : int_(value) { };
__host__ __device__ union_type_value(float value) : float_(value) { };
__host__ __device__ union_type_value(bool value) : bool_(value) { };
__host__ __device__ union_type_value(void *value) : pointer(value) { };
__host__ __device__ union_type_value(variable_size_array_t value) : variable_size_array(value) { };
__host__ __device__ static union_type_value from_object_id(obj_id_t value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_int(int value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_float(float value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_bool(bool value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_pointer(void *value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_variable_size_array_t(variable_size_array_t value)
{
return union_type_value(value);
}
} union_v_t;
typedef struct union_type_struct
{
class_id_t class_id;
union_v_t value;
__host__ __device__ union_type_struct(
class_id_t class_id_ = 0, union_v_t value_ = union_v_t(0))
: class_id(class_id_), value(value_) { };
static const union_type_struct error_return_value;
} union_t;
// error_return_value is used in case a host section terminates abnormally
const union_type_struct union_t::error_return_value = union_type_struct(0, union_v_t(0));
/* ----- END Union Type ----- */
typedef struct result_t {
variable_size_array_t result;
int last_error;
uint64_t time_setup_cuda;
uint64_t time_prepare_env;
uint64_t time_kernel;
uint64_t time_free_memory;
uint64_t time_transfer_memory;
uint64_t time_allocate_memory;
// Memory management
vector<void*> *device_allocations;
} result_t;
/* ----- END Structs ----- */
struct array_command_1 {
// Ikra::Symbolic::ArrayIndexCommand
indexed_struct_4_lt_int_int_int_int_gt_t *result;
__host__ __device__ array_command_1(indexed_struct_4_lt_int_int_int_int_gt_t *result = NULL) : result(result) { }
};
struct array_command_2 {
// Ikra::Symbolic::ArrayCombineCommand
int *result;
array_command_1 *input_0;
__host__ __device__ array_command_2(int *result = NULL, array_command_1 *input_0 = NULL) : result(result), input_0(input_0) { }
};
struct array_command_3 {
// Ikra::Symbolic::FixedSizeArrayInHostSectionCommand
int *result;
variable_size_array_t input_0;
__host__ __device__ array_command_3(int *result = NULL, variable_size_array_t input_0 = variable_size_array_t::error_return_value) : result(result), input_0(input_0) { }
int size() { return input_0.size; }
};
struct array_command_5 {
// Ikra::Symbolic::ArrayIndexCommand
indexed_struct_4_lt_int_int_int_int_gt_t *result;
__host__ __device__ array_command_5(indexed_struct_4_lt_int_int_int_int_gt_t *result = NULL) : result(result) { }
};
struct array_command_4 {
// Ikra::Symbolic::ArrayCombineCommand
int *result;
array_command_3 *input_0;
array_command_5 *input_1;
__host__ __device__ array_command_4(int *result = NULL, array_command_3 *input_0 = NULL, array_command_5 *input_1 = NULL) : result(result), input_0(input_0), input_1(input_1) { }
};
struct environment_struct
{
};
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((indices.field_0 + indices.field_1)) % ((((indices.field_3 + ({ int _temp_var_1 = ((indices.field_1 % 4));
(_temp_var_1 == 0 ? indices.field_0 : (_temp_var_1 == 1 ? indices.field_1 : (_temp_var_1 == 2 ? indices.field_2 : (_temp_var_1 == 3 ? indices.field_3 : NULL)))); }))) + 7)));
}
}
#endif
__global__ void kernel_1(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((indices.field_0 + indices.field_1)) % ((((indices.field_3 + ({ int _temp_var_2 = ((indices.field_1 % 4));
(_temp_var_2 == 0 ? indices.field_0 : (_temp_var_2 == 1 ? indices.field_1 : (_temp_var_2 == 2 ? indices.field_2 : (_temp_var_2 == 3 ? indices.field_3 : NULL)))); }))) + 7)));
}
}
#endif
__global__ void kernel_3(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
__global__ void kernel_5(environment_t *_env_, int _num_threads_, int *_result_, int *_array_7_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_7_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((((((i % 938)) + ((i / 97)))) % 97717)) + ((((({ int _temp_var_5 = ((({ int _temp_var_6 = ((({ int _temp_var_7 = ((i % 4));
(_temp_var_7 == 0 ? indices.field_0 : (_temp_var_7 == 1 ? indices.field_1 : (_temp_var_7 == 2 ? indices.field_2 : (_temp_var_7 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_6 == 0 ? indices.field_0 : (_temp_var_6 == 1 ? indices.field_1 : (_temp_var_6 == 2 ? indices.field_2 : (_temp_var_6 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_5 == 0 ? indices.field_0 : (_temp_var_5 == 1 ? indices.field_1 : (_temp_var_5 == 2 ? indices.field_2 : (_temp_var_5 == 3 ? indices.field_3 : NULL)))); }) * ((i % 7)))) % 99)));
}
}
#endif
__global__ void kernel_8(environment_t *_env_, int _num_threads_, int *_result_, int *_array_10_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_4_(_env_, _array_10_[_tid_], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
__global__ void kernel_11(environment_t *_env_, int _num_threads_, int *_result_, int *_array_13_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_13_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((((((i % 938)) + ((i / 97)))) % 97717)) + ((((({ int _temp_var_10 = ((({ int _temp_var_11 = ((({ int _temp_var_12 = ((i % 4));
(_temp_var_12 == 0 ? indices.field_0 : (_temp_var_12 == 1 ? indices.field_1 : (_temp_var_12 == 2 ? indices.field_2 : (_temp_var_12 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_11 == 0 ? indices.field_0 : (_temp_var_11 == 1 ? indices.field_1 : (_temp_var_11 == 2 ? indices.field_2 : (_temp_var_11 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_10 == 0 ? indices.field_0 : (_temp_var_10 == 1 ? indices.field_1 : (_temp_var_10 == 2 ? indices.field_2 : (_temp_var_10 == 3 ? indices.field_3 : NULL)))); }) * ((i % 7)))) % 99)));
}
}
#endif
__global__ void kernel_14(environment_t *_env_, int _num_threads_, int *_result_, int *_array_16_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_4_(_env_, _array_16_[_tid_], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
__global__ void kernel_17(environment_t *_env_, int _num_threads_, int *_result_, int *_array_19_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_19_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((((((i % 938)) + ((i / 97)))) % 97717)) + ((((({ int _temp_var_15 = ((({ int _temp_var_16 = ((({ int _temp_var_17 = ((i % 4));
(_temp_var_17 == 0 ? indices.field_0 : (_temp_var_17 == 1 ? indices.field_1 : (_temp_var_17 == 2 ? indices.field_2 : (_temp_var_17 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_16 == 0 ? indices.field_0 : (_temp_var_16 == 1 ? indices.field_1 : (_temp_var_16 == 2 ? indices.field_2 : (_temp_var_16 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_15 == 0 ? indices.field_0 : (_temp_var_15 == 1 ? indices.field_1 : (_temp_var_15 == 2 ? indices.field_2 : (_temp_var_15 == 3 ? indices.field_3 : NULL)))); }) * ((i % 7)))) % 99)));
}
}
#endif
__global__ void kernel_20(environment_t *_env_, int _num_threads_, int *_result_, int *_array_22_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_4_(_env_, _array_22_[_tid_], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
__global__ void kernel_23(environment_t *_env_, int _num_threads_, int *_result_, int *_array_25_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_25_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((((((i % 938)) + ((i / 97)))) % 97717)) + ((((({ int _temp_var_20 = ((({ int _temp_var_21 = ((({ int _temp_var_22 = ((i % 4));
(_temp_var_22 == 0 ? indices.field_0 : (_temp_var_22 == 1 ? indices.field_1 : (_temp_var_22 == 2 ? indices.field_2 : (_temp_var_22 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_21 == 0 ? indices.field_0 : (_temp_var_21 == 1 ? indices.field_1 : (_temp_var_21 == 2 ? indices.field_2 : (_temp_var_21 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_20 == 0 ? indices.field_0 : (_temp_var_20 == 1 ? indices.field_1 : (_temp_var_20 == 2 ? indices.field_2 : (_temp_var_20 == 3 ? indices.field_3 : NULL)))); }) * ((i % 7)))) % 99)));
}
}
#endif
__global__ void kernel_26(environment_t *_env_, int _num_threads_, int *_result_, int *_array_28_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_4_(_env_, _array_28_[_tid_], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
__global__ void kernel_29(environment_t *_env_, int _num_threads_, int *_result_, int *_array_31_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_31_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((((((i % 938)) + ((i / 97)))) % 97717)) + ((((({ int _temp_var_27 = ((({ int _temp_var_28 = ((({ int _temp_var_29 = ((i % 4));
(_temp_var_29 == 0 ? indices.field_0 : (_temp_var_29 == 1 ? indices.field_1 : (_temp_var_29 == 2 ? indices.field_2 : (_temp_var_29 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_28 == 0 ? indices.field_0 : (_temp_var_28 == 1 ? indices.field_1 : (_temp_var_28 == 2 ? indices.field_2 : (_temp_var_28 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_27 == 0 ? indices.field_0 : (_temp_var_27 == 1 ? indices.field_1 : (_temp_var_27 == 2 ? indices.field_2 : (_temp_var_27 == 3 ? indices.field_3 : NULL)))); }) * ((i % 7)))) % 99)));
}
}
#endif
__global__ void kernel_32(environment_t *_env_, int _num_threads_, int *_result_, int *_array_34_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_4_(_env_, _array_34_[_tid_], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
#undef checkErrorReturn
#define checkErrorReturn(result_var, expr) \
if (result_var->last_error = expr) \
{\
hipError_t error = hipGetLastError();\
printf("!!! Cuda Failure %s:%d (%i): '%s'\n", __FILE__, __LINE__, expr, hipGetErrorString(error));\
hipDeviceReset();\
return variable_size_array_t::error_return_value;\
}
variable_size_array_t _host_section__(environment_t *host_env, environment_t *dev_env, result_t *program_result)
{
array_command_2 * x = new array_command_2();
int r;
union_t _ssa_var_old_data_2;
array_command_4 * _ssa_var_y_4;
union_t _ssa_var_old_data_3;
union_t _ssa_var_y_1;
{
_ssa_var_y_1 = union_t(10, union_v_t::from_pointer((void *) new array_command_3(NULL, ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 10000000]
array_command_2 * cmd = x;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_2;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_2, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_2);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_1), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_2);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_2;
}
variable_size_array_t((void *) cmd->result, 10000000);
}))));
_ssa_var_old_data_2 = _ssa_var_y_1;
for (r = 0; r <= (500 - 1); r++)
{
_ssa_var_old_data_3 = _ssa_var_y_1;
_ssa_var_y_4 = new array_command_4(NULL, new array_command_3(NULL, ({
variable_size_array_t _polytemp_result_3;
{
union_t _polytemp_expr_4 = _ssa_var_y_1;
switch (_polytemp_expr_4.class_id)
{
case 10: /* [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_3 = ({
// [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000]
array_command_3 * cmd = (array_command_3 *) _polytemp_expr_4.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_6;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_6, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_6);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_5), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_6, ((int *) cmd->input_0.content));
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_6;
}
variable_size_array_t((void *) cmd->result, 10000000);
}); break;
case 11: /* [Ikra::Symbolic::ArrayCombineCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_3 = ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 10000000]: [SendNode: [SendNode: [SendNode: [LVarReadNode: _ssa_var_y_1].__call__()].to_command()].pmap([HashNode: {<:with_index> => [BeginNode: {<true>}]}])]
array_command_4 * cmd = (array_command_4 *) _polytemp_expr_4.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_9;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_9, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_9);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_8), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_9, ((int *) ((int *) cmd->input_0->input_0.content)));
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_9;
}
variable_size_array_t((void *) cmd->result, 10000000);
}); break;
}
}
_polytemp_result_3;
})));
({
bool _polytemp_result_23;
{
union_t _polytemp_expr_24 = _ssa_var_old_data_3;
switch (_polytemp_expr_24.class_id)
{
case 10: /* [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_23 = ({
array_command_3 * cmd_to_free = (array_command_3 *) _polytemp_expr_24.value.pointer;
timeStartMeasure();
bool freed_memory = false;
if (cmd_to_free->result != 0) {
checkErrorReturn(program_result, hipFree(cmd_to_free->result));;
// Remove from list of allocations
program_result->device_allocations->erase(
std::remove(
program_result->device_allocations->begin(),
program_result->device_allocations->end(),
cmd_to_free->result),
program_result->device_allocations->end());
freed_memory = true;
}
timeReportMeasure(program_result, free_memory);
freed_memory;
}); break;
case 11: /* [Ikra::Symbolic::ArrayCombineCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_23 = ({
array_command_4 * cmd_to_free = (array_command_4 *) _polytemp_expr_24.value.pointer;
timeStartMeasure();
bool freed_memory = false;
if (cmd_to_free->result != 0) {
checkErrorReturn(program_result, hipFree(cmd_to_free->result));;
// Remove from list of allocations
program_result->device_allocations->erase(
std::remove(
program_result->device_allocations->begin(),
program_result->device_allocations->end(),
cmd_to_free->result),
program_result->device_allocations->end());
freed_memory = true;
}
timeReportMeasure(program_result, free_memory);
freed_memory;
}); break;
}
}
_polytemp_result_23;
});
_ssa_var_y_1 = union_t(11, union_v_t::from_pointer((void *) _ssa_var_y_4));
_ssa_var_old_data_2 = _ssa_var_old_data_3;
}
r--;
return ({
variable_size_array_t _polytemp_result_25;
{
union_t _polytemp_expr_26 = _ssa_var_y_1;
switch (_polytemp_expr_26.class_id)
{
case 10: /* [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_25 = ({
// [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000]
array_command_3 * cmd = (array_command_3 *) _polytemp_expr_26.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_30;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_30, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_30);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_29), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_30, ((int *) cmd->input_0.content));
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_30;
}
variable_size_array_t((void *) cmd->result, 10000000);
}); break;
case 11: /* [Ikra::Symbolic::ArrayCombineCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_25 = ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 10000000]: [SendNode: [SendNode: [SendNode: [LVarReadNode: _ssa_var_y_1].__call__()].to_command()].pmap([HashNode: {<:with_index> => [BeginNode: {<true>}]}])]
array_command_4 * cmd = (array_command_4 *) _polytemp_expr_26.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_33;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_33, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_33);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_32), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_33, ((int *) ((int *) cmd->input_0->input_0.content)));
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_33;
}
variable_size_array_t((void *) cmd->result, 10000000);
}); break;
}
}
_polytemp_result_25;
});
}
}
#undef checkErrorReturn
#define checkErrorReturn(result_var, expr) \
expr
extern "C" EXPORT result_t *launch_kernel(environment_t *host_env)
{
// CUDA Initialization
program_result = new result_t();
program_result->device_allocations = new vector<void*>();
timeStartMeasure();
hipError_t cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
program_result->last_error = -1;
return program_result;
}
checkErrorReturn(program_result, hipFree(0));
timeReportMeasure(program_result, setup_cuda);
/* Prepare environment */
/* Allocate device environment and copy over struct */
environment_t *dev_env;
timeStartMeasure();
checkErrorReturn(program_result, hipMalloc(&dev_env, sizeof(environment_t)));
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
checkErrorReturn(program_result, hipMemcpy(dev_env, host_env, sizeof(environment_t), hipMemcpyHostToDevice));
timeReportMeasure(program_result, transfer_memory);
/* Copy back memory and set pointer of result */
program_result->result = ({
variable_size_array_t device_array = _host_section__(host_env, dev_env, program_result);
int * tmp_result = (int *) malloc(sizeof(int) * device_array.size);
timeStartMeasure();
checkErrorReturn(program_result, hipMemcpy(tmp_result, device_array.content, sizeof(int) * device_array.size, hipMemcpyDeviceToHost));
timeReportMeasure(program_result, transfer_memory);
variable_size_array_t((void *) tmp_result, device_array.size);
});
/* Free device memory */
timeStartMeasure();
for (
auto device_ptr = program_result->device_allocations->begin();
device_ptr < program_result->device_allocations->end();
device_ptr++)
{
checkErrorReturn(program_result, hipFree(*device_ptr));
}
delete program_result->device_allocations;
timeReportMeasure(program_result, free_memory);
return program_result;
}
| b65b4ceadb46ed639360a2e2d05e42a2a54d1f6a.cu | #include <stdio.h>
#include <assert.h>
#include <chrono>
#include <vector>
#include <algorithm>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
using namespace std;
/* ----- BEGIN Shared Library Export ----- */
// taken from http://stackoverflow.com/questions/2164827/explicitly-exporting-shared-library-functions-in-linux
#if defined(_MSC_VER)
// Microsoft
#define EXPORT __declspec(dllexport)
#define IMPORT __declspec(dllimport)
#elif defined(_GCC)
// GCC
#define EXPORT __attribute__((visibility("default")))
#define IMPORT
#else
// do nothing and hope for the best?
#define EXPORT
#define IMPORT
#pragma warning Unknown dynamic link import/export semantics.
#endif
/* ----- END Shared Library Export ----- */
/* ----- BEGIN Class Type ----- */
typedef int obj_id_t;
typedef int class_id_t;
/* ----- END Class Type ----- */
/* ----- BEGIN Environment (lexical variables) ----- */
// environment_struct must be defined later
typedef struct environment_struct environment_t;
/* ----- END Environment (lexical variables) ----- */
/* ----- BEGIN Forward declarations ----- */
typedef struct result_t result_t;
/* ----- END Forward declarations ----- */
// Define program result variable. Also contains benchmark numbers.
result_t *program_result;
// Variables for measuring time
chrono::high_resolution_clock::time_point start_time;
chrono::high_resolution_clock::time_point end_time;
/* ----- BEGIN Macros ----- */
#define timeStartMeasure() start_time = chrono::high_resolution_clock::now();
#define timeReportMeasure(result_var, variable_name) \
end_time = chrono::high_resolution_clock::now(); \
result_var->time_##variable_name = result_var->time_##variable_name + chrono::duration_cast<chrono::microseconds>(end_time - start_time).count();
/* ----- END Macros ----- */
struct indexed_struct_4_lt_int_int_int_int_gt_t
{
int field_0;
int field_1;
int field_2;
int field_3;
};
/* ----- BEGIN Structs ----- */
struct variable_size_array_t {
void *content;
int size;
variable_size_array_t(void *content_ = NULL, int size_ = 0) : content(content_), size(size_) { };
static const variable_size_array_t error_return_value;
};
// error_return_value is used in case a host section terminates abnormally
const variable_size_array_t variable_size_array_t::error_return_value =
variable_size_array_t(NULL, 0);
/* ----- BEGIN Union Type ----- */
typedef union union_type_value {
obj_id_t object_id;
int int_;
float float_;
bool bool_;
void *pointer;
variable_size_array_t variable_size_array;
__host__ __device__ union_type_value(int value) : int_(value) { };
__host__ __device__ union_type_value(float value) : float_(value) { };
__host__ __device__ union_type_value(bool value) : bool_(value) { };
__host__ __device__ union_type_value(void *value) : pointer(value) { };
__host__ __device__ union_type_value(variable_size_array_t value) : variable_size_array(value) { };
__host__ __device__ static union_type_value from_object_id(obj_id_t value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_int(int value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_float(float value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_bool(bool value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_pointer(void *value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_variable_size_array_t(variable_size_array_t value)
{
return union_type_value(value);
}
} union_v_t;
typedef struct union_type_struct
{
class_id_t class_id;
union_v_t value;
__host__ __device__ union_type_struct(
class_id_t class_id_ = 0, union_v_t value_ = union_v_t(0))
: class_id(class_id_), value(value_) { };
static const union_type_struct error_return_value;
} union_t;
// error_return_value is used in case a host section terminates abnormally
const union_type_struct union_t::error_return_value = union_type_struct(0, union_v_t(0));
/* ----- END Union Type ----- */
typedef struct result_t {
variable_size_array_t result;
int last_error;
uint64_t time_setup_cuda;
uint64_t time_prepare_env;
uint64_t time_kernel;
uint64_t time_free_memory;
uint64_t time_transfer_memory;
uint64_t time_allocate_memory;
// Memory management
vector<void*> *device_allocations;
} result_t;
/* ----- END Structs ----- */
struct array_command_1 {
// Ikra::Symbolic::ArrayIndexCommand
indexed_struct_4_lt_int_int_int_int_gt_t *result;
__host__ __device__ array_command_1(indexed_struct_4_lt_int_int_int_int_gt_t *result = NULL) : result(result) { }
};
struct array_command_2 {
// Ikra::Symbolic::ArrayCombineCommand
int *result;
array_command_1 *input_0;
__host__ __device__ array_command_2(int *result = NULL, array_command_1 *input_0 = NULL) : result(result), input_0(input_0) { }
};
struct array_command_3 {
// Ikra::Symbolic::FixedSizeArrayInHostSectionCommand
int *result;
variable_size_array_t input_0;
__host__ __device__ array_command_3(int *result = NULL, variable_size_array_t input_0 = variable_size_array_t::error_return_value) : result(result), input_0(input_0) { }
int size() { return input_0.size; }
};
struct array_command_5 {
// Ikra::Symbolic::ArrayIndexCommand
indexed_struct_4_lt_int_int_int_int_gt_t *result;
__host__ __device__ array_command_5(indexed_struct_4_lt_int_int_int_int_gt_t *result = NULL) : result(result) { }
};
struct array_command_4 {
// Ikra::Symbolic::ArrayCombineCommand
int *result;
array_command_3 *input_0;
array_command_5 *input_1;
__host__ __device__ array_command_4(int *result = NULL, array_command_3 *input_0 = NULL, array_command_5 *input_1 = NULL) : result(result), input_0(input_0), input_1(input_1) { }
};
struct environment_struct
{
};
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((indices.field_0 + indices.field_1)) % ((((indices.field_3 + ({ int _temp_var_1 = ((indices.field_1 % 4));
(_temp_var_1 == 0 ? indices.field_0 : (_temp_var_1 == 1 ? indices.field_1 : (_temp_var_1 == 2 ? indices.field_2 : (_temp_var_1 == 3 ? indices.field_3 : NULL)))); }))) + 7)));
}
}
#endif
__global__ void kernel_1(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((indices.field_0 + indices.field_1)) % ((((indices.field_3 + ({ int _temp_var_2 = ((indices.field_1 % 4));
(_temp_var_2 == 0 ? indices.field_0 : (_temp_var_2 == 1 ? indices.field_1 : (_temp_var_2 == 2 ? indices.field_2 : (_temp_var_2 == 3 ? indices.field_3 : NULL)))); }))) + 7)));
}
}
#endif
__global__ void kernel_3(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
__global__ void kernel_5(environment_t *_env_, int _num_threads_, int *_result_, int *_array_7_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_7_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((((((i % 938)) + ((i / 97)))) % 97717)) + ((((({ int _temp_var_5 = ((({ int _temp_var_6 = ((({ int _temp_var_7 = ((i % 4));
(_temp_var_7 == 0 ? indices.field_0 : (_temp_var_7 == 1 ? indices.field_1 : (_temp_var_7 == 2 ? indices.field_2 : (_temp_var_7 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_6 == 0 ? indices.field_0 : (_temp_var_6 == 1 ? indices.field_1 : (_temp_var_6 == 2 ? indices.field_2 : (_temp_var_6 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_5 == 0 ? indices.field_0 : (_temp_var_5 == 1 ? indices.field_1 : (_temp_var_5 == 2 ? indices.field_2 : (_temp_var_5 == 3 ? indices.field_3 : NULL)))); }) * ((i % 7)))) % 99)));
}
}
#endif
__global__ void kernel_8(environment_t *_env_, int _num_threads_, int *_result_, int *_array_10_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_4_(_env_, _array_10_[_tid_], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
__global__ void kernel_11(environment_t *_env_, int _num_threads_, int *_result_, int *_array_13_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_13_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((((((i % 938)) + ((i / 97)))) % 97717)) + ((((({ int _temp_var_10 = ((({ int _temp_var_11 = ((({ int _temp_var_12 = ((i % 4));
(_temp_var_12 == 0 ? indices.field_0 : (_temp_var_12 == 1 ? indices.field_1 : (_temp_var_12 == 2 ? indices.field_2 : (_temp_var_12 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_11 == 0 ? indices.field_0 : (_temp_var_11 == 1 ? indices.field_1 : (_temp_var_11 == 2 ? indices.field_2 : (_temp_var_11 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_10 == 0 ? indices.field_0 : (_temp_var_10 == 1 ? indices.field_1 : (_temp_var_10 == 2 ? indices.field_2 : (_temp_var_10 == 3 ? indices.field_3 : NULL)))); }) * ((i % 7)))) % 99)));
}
}
#endif
__global__ void kernel_14(environment_t *_env_, int _num_threads_, int *_result_, int *_array_16_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_4_(_env_, _array_16_[_tid_], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
__global__ void kernel_17(environment_t *_env_, int _num_threads_, int *_result_, int *_array_19_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_19_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((((((i % 938)) + ((i / 97)))) % 97717)) + ((((({ int _temp_var_15 = ((({ int _temp_var_16 = ((({ int _temp_var_17 = ((i % 4));
(_temp_var_17 == 0 ? indices.field_0 : (_temp_var_17 == 1 ? indices.field_1 : (_temp_var_17 == 2 ? indices.field_2 : (_temp_var_17 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_16 == 0 ? indices.field_0 : (_temp_var_16 == 1 ? indices.field_1 : (_temp_var_16 == 2 ? indices.field_2 : (_temp_var_16 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_15 == 0 ? indices.field_0 : (_temp_var_15 == 1 ? indices.field_1 : (_temp_var_15 == 2 ? indices.field_2 : (_temp_var_15 == 3 ? indices.field_3 : NULL)))); }) * ((i % 7)))) % 99)));
}
}
#endif
__global__ void kernel_20(environment_t *_env_, int _num_threads_, int *_result_, int *_array_22_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_4_(_env_, _array_22_[_tid_], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
__global__ void kernel_23(environment_t *_env_, int _num_threads_, int *_result_, int *_array_25_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_25_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((((((i % 938)) + ((i / 97)))) % 97717)) + ((((({ int _temp_var_20 = ((({ int _temp_var_21 = ((({ int _temp_var_22 = ((i % 4));
(_temp_var_22 == 0 ? indices.field_0 : (_temp_var_22 == 1 ? indices.field_1 : (_temp_var_22 == 2 ? indices.field_2 : (_temp_var_22 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_21 == 0 ? indices.field_0 : (_temp_var_21 == 1 ? indices.field_1 : (_temp_var_21 == 2 ? indices.field_2 : (_temp_var_21 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_20 == 0 ? indices.field_0 : (_temp_var_20 == 1 ? indices.field_1 : (_temp_var_20 == 2 ? indices.field_2 : (_temp_var_20 == 3 ? indices.field_3 : NULL)))); }) * ((i % 7)))) % 99)));
}
}
#endif
__global__ void kernel_26(environment_t *_env_, int _num_threads_, int *_result_, int *_array_28_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_4_(_env_, _array_28_[_tid_], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
__global__ void kernel_29(environment_t *_env_, int _num_threads_, int *_result_, int *_array_31_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_31_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((((((i % 938)) + ((i / 97)))) % 97717)) + ((((({ int _temp_var_27 = ((({ int _temp_var_28 = ((({ int _temp_var_29 = ((i % 4));
(_temp_var_29 == 0 ? indices.field_0 : (_temp_var_29 == 1 ? indices.field_1 : (_temp_var_29 == 2 ? indices.field_2 : (_temp_var_29 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_28 == 0 ? indices.field_0 : (_temp_var_28 == 1 ? indices.field_1 : (_temp_var_28 == 2 ? indices.field_2 : (_temp_var_28 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_27 == 0 ? indices.field_0 : (_temp_var_27 == 1 ? indices.field_1 : (_temp_var_27 == 2 ? indices.field_2 : (_temp_var_27 == 3 ? indices.field_3 : NULL)))); }) * ((i % 7)))) % 99)));
}
}
#endif
__global__ void kernel_32(environment_t *_env_, int _num_threads_, int *_result_, int *_array_34_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_4_(_env_, _array_34_[_tid_], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
#undef checkErrorReturn
#define checkErrorReturn(result_var, expr) \
if (result_var->last_error = expr) \
{\
cudaError_t error = cudaGetLastError();\
printf("!!! Cuda Failure %s:%d (%i): '%s'\n", __FILE__, __LINE__, expr, cudaGetErrorString(error));\
cudaDeviceReset();\
return variable_size_array_t::error_return_value;\
}
variable_size_array_t _host_section__(environment_t *host_env, environment_t *dev_env, result_t *program_result)
{
array_command_2 * x = new array_command_2();
int r;
union_t _ssa_var_old_data_2;
array_command_4 * _ssa_var_y_4;
union_t _ssa_var_old_data_3;
union_t _ssa_var_y_1;
{
_ssa_var_y_1 = union_t(10, union_v_t::from_pointer((void *) new array_command_3(NULL, ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 10000000]
array_command_2 * cmd = x;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_2;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_2, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_2);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_1<<<39063, 256>>>(dev_env, 10000000, _kernel_result_2);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_2;
}
variable_size_array_t((void *) cmd->result, 10000000);
}))));
_ssa_var_old_data_2 = _ssa_var_y_1;
for (r = 0; r <= (500 - 1); r++)
{
_ssa_var_old_data_3 = _ssa_var_y_1;
_ssa_var_y_4 = new array_command_4(NULL, new array_command_3(NULL, ({
variable_size_array_t _polytemp_result_3;
{
union_t _polytemp_expr_4 = _ssa_var_y_1;
switch (_polytemp_expr_4.class_id)
{
case 10: /* [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_3 = ({
// [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000]
array_command_3 * cmd = (array_command_3 *) _polytemp_expr_4.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_6;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_6, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_6);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_5<<<39063, 256>>>(dev_env, 10000000, _kernel_result_6, ((int *) cmd->input_0.content));
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_6;
}
variable_size_array_t((void *) cmd->result, 10000000);
}); break;
case 11: /* [Ikra::Symbolic::ArrayCombineCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_3 = ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 10000000]: [SendNode: [SendNode: [SendNode: [LVarReadNode: _ssa_var_y_1].__call__()].to_command()].pmap([HashNode: {<:with_index> => [BeginNode: {<true>}]}])]
array_command_4 * cmd = (array_command_4 *) _polytemp_expr_4.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_9;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_9, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_9);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_8<<<39063, 256>>>(dev_env, 10000000, _kernel_result_9, ((int *) ((int *) cmd->input_0->input_0.content)));
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_9;
}
variable_size_array_t((void *) cmd->result, 10000000);
}); break;
}
}
_polytemp_result_3;
})));
({
bool _polytemp_result_23;
{
union_t _polytemp_expr_24 = _ssa_var_old_data_3;
switch (_polytemp_expr_24.class_id)
{
case 10: /* [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_23 = ({
array_command_3 * cmd_to_free = (array_command_3 *) _polytemp_expr_24.value.pointer;
timeStartMeasure();
bool freed_memory = false;
if (cmd_to_free->result != 0) {
checkErrorReturn(program_result, cudaFree(cmd_to_free->result));;
// Remove from list of allocations
program_result->device_allocations->erase(
std::remove(
program_result->device_allocations->begin(),
program_result->device_allocations->end(),
cmd_to_free->result),
program_result->device_allocations->end());
freed_memory = true;
}
timeReportMeasure(program_result, free_memory);
freed_memory;
}); break;
case 11: /* [Ikra::Symbolic::ArrayCombineCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_23 = ({
array_command_4 * cmd_to_free = (array_command_4 *) _polytemp_expr_24.value.pointer;
timeStartMeasure();
bool freed_memory = false;
if (cmd_to_free->result != 0) {
checkErrorReturn(program_result, cudaFree(cmd_to_free->result));;
// Remove from list of allocations
program_result->device_allocations->erase(
std::remove(
program_result->device_allocations->begin(),
program_result->device_allocations->end(),
cmd_to_free->result),
program_result->device_allocations->end());
freed_memory = true;
}
timeReportMeasure(program_result, free_memory);
freed_memory;
}); break;
}
}
_polytemp_result_23;
});
_ssa_var_y_1 = union_t(11, union_v_t::from_pointer((void *) _ssa_var_y_4));
_ssa_var_old_data_2 = _ssa_var_old_data_3;
}
r--;
return ({
variable_size_array_t _polytemp_result_25;
{
union_t _polytemp_expr_26 = _ssa_var_y_1;
switch (_polytemp_expr_26.class_id)
{
case 10: /* [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_25 = ({
// [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000]
array_command_3 * cmd = (array_command_3 *) _polytemp_expr_26.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_30;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_30, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_30);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_29<<<39063, 256>>>(dev_env, 10000000, _kernel_result_30, ((int *) cmd->input_0.content));
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_30;
}
variable_size_array_t((void *) cmd->result, 10000000);
}); break;
case 11: /* [Ikra::Symbolic::ArrayCombineCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_25 = ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 10000000]: [SendNode: [SendNode: [SendNode: [LVarReadNode: _ssa_var_y_1].__call__()].to_command()].pmap([HashNode: {<:with_index> => [BeginNode: {<true>}]}])]
array_command_4 * cmd = (array_command_4 *) _polytemp_expr_26.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_33;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_33, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_33);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_32<<<39063, 256>>>(dev_env, 10000000, _kernel_result_33, ((int *) ((int *) cmd->input_0->input_0.content)));
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_33;
}
variable_size_array_t((void *) cmd->result, 10000000);
}); break;
}
}
_polytemp_result_25;
});
}
}
#undef checkErrorReturn
#define checkErrorReturn(result_var, expr) \
expr
extern "C" EXPORT result_t *launch_kernel(environment_t *host_env)
{
// CUDA Initialization
program_result = new result_t();
program_result->device_allocations = new vector<void*>();
timeStartMeasure();
cudaError_t cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
program_result->last_error = -1;
return program_result;
}
checkErrorReturn(program_result, cudaFree(0));
timeReportMeasure(program_result, setup_cuda);
/* Prepare environment */
/* Allocate device environment and copy over struct */
environment_t *dev_env;
timeStartMeasure();
checkErrorReturn(program_result, cudaMalloc(&dev_env, sizeof(environment_t)));
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
checkErrorReturn(program_result, cudaMemcpy(dev_env, host_env, sizeof(environment_t), cudaMemcpyHostToDevice));
timeReportMeasure(program_result, transfer_memory);
/* Copy back memory and set pointer of result */
program_result->result = ({
variable_size_array_t device_array = _host_section__(host_env, dev_env, program_result);
int * tmp_result = (int *) malloc(sizeof(int) * device_array.size);
timeStartMeasure();
checkErrorReturn(program_result, cudaMemcpy(tmp_result, device_array.content, sizeof(int) * device_array.size, cudaMemcpyDeviceToHost));
timeReportMeasure(program_result, transfer_memory);
variable_size_array_t((void *) tmp_result, device_array.size);
});
/* Free device memory */
timeStartMeasure();
for (
auto device_ptr = program_result->device_allocations->begin();
device_ptr < program_result->device_allocations->end();
device_ptr++)
{
checkErrorReturn(program_result, cudaFree(*device_ptr));
}
delete program_result->device_allocations;
timeReportMeasure(program_result, free_memory);
return program_result;
}
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.