hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
052500f0b44b38f3e60c6cf6c86752678e86916b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Host code for vector-matrix multiplication: Y = A * X.
* A is a n x m matrix, X is a m x 1 vector and Y is the n x 1 result vector.
*
* Build and run as follows:
* make clean && make
* ./vec_mat_mult num-rows num-columns
* Author: Naga Kandasamy
* Date modified: May 6, 2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
/* Include kernel code here */
#include "vec_mat_mult_kernel.cu"
extern "C" void compute_gold(matrix_t, matrix_t, matrix_t);
matrix_t allocate_matrix_on_device(matrix_t);
matrix_t allocate_matrix_on_host(int, int, int);
void copy_matrix_to_device(matrix_t, matrix_t);
void copy_matrix_from_device(matrix_t, matrix_t);
void compute_on_device(matrix_t, matrix_t, matrix_t);
void print_matrix(matrix_t);
float get_random_number(int, int);
void check_CUDA_error(const char *);
int check_results(float *, float *, int, float);
int main(int argc, char **argv)
{
if (argc < 3) {
fprintf(stderr, "Usage: %s num-rows num-columns\n", argv[0]);
fprintf(stderr, "num-rows: Height of the matrix\n");
fprintf(stderr, "num-columns: Width of the matrix\n");
exit(EXIT_FAILURE);
}
int num_rows = atoi(argv[1]);
int num_columns = atoi(argv[2]);
/* Allocate and initialize the matrices on the host */
matrix_t A; /* n x m matrix */
matrix_t X; /* m x 1 vector */
/* Initialize random number generator */
srand(time(NULL));
fprintf(stderr, "Creating the %d x %d matrix\n", num_rows, num_columns);
A = allocate_matrix_on_host(num_rows, num_columns, 1);
if (A.elements == NULL) {
perror("Malloc");
exit(EXIT_FAILURE);
}
fprintf(stderr, "Creating the %d x 1 vector\n", num_columns);
X = allocate_matrix_on_host(num_columns, 1, 1);
if (X.elements == NULL) {
perror("Malloc");
exit(EXIT_FAILURE);
}
/* Compute vector-matrix multiplication on the CPU */
matrix_t Y_ref = allocate_matrix_on_host(num_rows, 1, 0);
if (Y_ref.elements == NULL) {
perror("Malloc");
exit(EXIT_FAILURE);
}
fprintf(stderr, "\nComputing vector-matrix multiplication on CPU\n");
compute_gold(A, X, Y_ref);
/* Perform vector-matrix multiplication on the GPU */
matrix_t Y_device = allocate_matrix_on_host(num_rows, 1, 0);
if (Y_device.elements == NULL) {
perror("Malloc");
exit(EXIT_FAILURE);
}
fprintf(stderr, "\nComputing vector-matrix multiplication on GPU\n");
compute_on_device(A, X, Y_device);
/* Check device result against reference */
float eps = 1e-6;
int check = check_results(Y_ref.elements, Y_device.elements, Y_ref.num_rows, eps);
if (check == 1)
fprintf(stderr, "TEST PASSED\n");
else
fprintf(stderr, "TEST FAILED\n");
free((void *)A.elements);
free((void *)X.elements);
free((void *)Y_ref.elements);
free((void *)Y_device.elements);
exit(EXIT_SUCCESS);
}
/* Perform multiplication on device */
void compute_on_device(matrix_t A, matrix_t X, matrix_t Y)
{
/* Load matrices A and X on to device */
matrix_t Ad = allocate_matrix_on_device(A);
copy_matrix_to_device(Ad, A);
matrix_t Xd = allocate_matrix_on_device(X);
copy_matrix_to_device(Xd, X);
/* Allocate Y on device */
matrix_t Yd = allocate_matrix_on_device(Y);
struct timeval start, stop;
fprintf(stderr, "\nUsing naive verion when matrix A is stored in row major form on device\n");
gettimeofday(&start, NULL);
/* Set up execution configuration for the naive kernel and launch it */
dim3 threads(1, THREAD_BLOCK_SIZE, 1);
dim3 grid(1, (Yd.num_rows + THREAD_BLOCK_SIZE - 1)/THREAD_BLOCK_SIZE);
hipLaunchKernelGGL(( multiply_kernel_naive), dim3(grid), dim3(threads), 0, 0, Ad.elements, Xd.elements, Yd.elements, Ad.num_rows, Ad.num_columns);
hipDeviceSynchronize();
gettimeofday(&stop, NULL);
fprintf(stderr, "Execution time = %fs\n", (float)(stop.tv_sec - start.tv_sec +\
(stop.tv_usec - start.tv_usec)/(float)1000000));
check_CUDA_error("Error in kernel");
/* Set up execution configuration for optimized kernel that uses shared memory.
Memory accesses made by threads are coalesced.
*/
fprintf(stderr, "\nUsing optimized version when matrix A is stored in row major form on device\n");
gettimeofday(&start, NULL);
threads.x = threads.y = TILE_SIZE;
grid.x = 1;
grid.y = (Yd.num_rows + TILE_SIZE - 1)/TILE_SIZE;
hipLaunchKernelGGL(( multiply_kernel_optimized), dim3(grid), dim3(threads) , 0, 0, Ad.elements, Xd.elements, Yd.elements, Ad.num_rows, Ad.num_columns);
hipDeviceSynchronize();
gettimeofday(&stop, NULL);
fprintf(stderr, "Execution time = %fs\n", (float)(stop.tv_sec - start.tv_sec +\
(stop.tv_usec - start.tv_usec)/(float)1000000));
check_CUDA_error("Error in kernel");
/* Copy result from the device */
copy_matrix_from_device(Y, Yd);
/* Free memory on device */
hipFree(&Ad);
hipFree(&Xd);
hipFree(&Yd);
}
/* Allocate memory for matrix on device */
matrix_t allocate_matrix_on_device(matrix_t M)
{
matrix_t Mdevice = M;
int size = M.num_rows * M.num_columns * sizeof(float);
hipMalloc((void **)&Mdevice.elements, size);
return Mdevice;
}
/* Allocate a matrix of dimensions height * width.
If init == 0, initialize to all zeroes.
If init == 1, perform random initialization with values between [-0.5, +0.5].
*/
matrix_t allocate_matrix_on_host(int num_rows, int num_columns, int init)
{
matrix_t M;
M.num_columns = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float *)malloc(size * sizeof(float));
if (M.elements == NULL)
return M;
int i;
for (i = 0; i < size; i++) {
if(init == 0)
M.elements[i] = 0.0;
else
M.elements[i] = rand()/(float)RAND_MAX - 0.5;
}
return M;
}
/* Copy matrix from host memory to device memory */
void copy_matrix_to_device(matrix_t Mdevice, matrix_t Mhost)
{
int size = Mhost.num_rows * Mhost.num_columns * sizeof(float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice);
}
/* Copy matrix from device memory to host memory */
void copy_matrix_from_device(matrix_t Mhost, matrix_t Mdevice)
{
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost);
}
/* Prints matrix out to screen */
void print_matrix(matrix_t M)
{
int i, j;
for (i = 0; i < M.num_rows; i++) {
for (j = 0; j < M.num_columns; j++) {
fprintf(stderr, "%f ", M.elements[i * M.num_columns + j]);
}
fprintf(stderr, "\n");
}
printf("\n");
}
void check_CUDA_error(const char *msg)
{
hipError_t err = hipGetLastError();
if ( hipSuccess != err) {
fprintf(stderr, "CUDA ERROR: %s (%s).\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/* Compare reference and device results */
int check_results(float *val1, float *val2, int num_elements, float eps)
{
float max_re = 0.0;
float re = 0.0;
int i;
for (i = 0; i < num_elements; i++) {
re = fabsf((val1[i] - val2[i])/val1[i]);
if (re > max_re)
max_re = re;
}
fprintf(stderr, "Max relative error = %f\n", max_re);
if (max_re <= eps)
return 1;
else
return 0;
}
| 052500f0b44b38f3e60c6cf6c86752678e86916b.cu | /* Host code for vector-matrix multiplication: Y = A * X.
* A is a n x m matrix, X is a m x 1 vector and Y is the n x 1 result vector.
*
* Build and run as follows:
* make clean && make
* ./vec_mat_mult num-rows num-columns
* Author: Naga Kandasamy
* Date modified: May 6, 2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
/* Include kernel code here */
#include "vec_mat_mult_kernel.cu"
extern "C" void compute_gold(matrix_t, matrix_t, matrix_t);
matrix_t allocate_matrix_on_device(matrix_t);
matrix_t allocate_matrix_on_host(int, int, int);
void copy_matrix_to_device(matrix_t, matrix_t);
void copy_matrix_from_device(matrix_t, matrix_t);
void compute_on_device(matrix_t, matrix_t, matrix_t);
void print_matrix(matrix_t);
float get_random_number(int, int);
void check_CUDA_error(const char *);
int check_results(float *, float *, int, float);
int main(int argc, char **argv)
{
if (argc < 3) {
fprintf(stderr, "Usage: %s num-rows num-columns\n", argv[0]);
fprintf(stderr, "num-rows: Height of the matrix\n");
fprintf(stderr, "num-columns: Width of the matrix\n");
exit(EXIT_FAILURE);
}
int num_rows = atoi(argv[1]);
int num_columns = atoi(argv[2]);
/* Allocate and initialize the matrices on the host */
matrix_t A; /* n x m matrix */
matrix_t X; /* m x 1 vector */
/* Initialize random number generator */
srand(time(NULL));
fprintf(stderr, "Creating the %d x %d matrix\n", num_rows, num_columns);
A = allocate_matrix_on_host(num_rows, num_columns, 1);
if (A.elements == NULL) {
perror("Malloc");
exit(EXIT_FAILURE);
}
fprintf(stderr, "Creating the %d x 1 vector\n", num_columns);
X = allocate_matrix_on_host(num_columns, 1, 1);
if (X.elements == NULL) {
perror("Malloc");
exit(EXIT_FAILURE);
}
/* Compute vector-matrix multiplication on the CPU */
matrix_t Y_ref = allocate_matrix_on_host(num_rows, 1, 0);
if (Y_ref.elements == NULL) {
perror("Malloc");
exit(EXIT_FAILURE);
}
fprintf(stderr, "\nComputing vector-matrix multiplication on CPU\n");
compute_gold(A, X, Y_ref);
/* Perform vector-matrix multiplication on the GPU */
matrix_t Y_device = allocate_matrix_on_host(num_rows, 1, 0);
if (Y_device.elements == NULL) {
perror("Malloc");
exit(EXIT_FAILURE);
}
fprintf(stderr, "\nComputing vector-matrix multiplication on GPU\n");
compute_on_device(A, X, Y_device);
/* Check device result against reference */
float eps = 1e-6;
int check = check_results(Y_ref.elements, Y_device.elements, Y_ref.num_rows, eps);
if (check == 1)
fprintf(stderr, "TEST PASSED\n");
else
fprintf(stderr, "TEST FAILED\n");
free((void *)A.elements);
free((void *)X.elements);
free((void *)Y_ref.elements);
free((void *)Y_device.elements);
exit(EXIT_SUCCESS);
}
/* Perform multiplication on device */
void compute_on_device(matrix_t A, matrix_t X, matrix_t Y)
{
/* Load matrices A and X on to device */
matrix_t Ad = allocate_matrix_on_device(A);
copy_matrix_to_device(Ad, A);
matrix_t Xd = allocate_matrix_on_device(X);
copy_matrix_to_device(Xd, X);
/* Allocate Y on device */
matrix_t Yd = allocate_matrix_on_device(Y);
struct timeval start, stop;
fprintf(stderr, "\nUsing naive verion when matrix A is stored in row major form on device\n");
gettimeofday(&start, NULL);
/* Set up execution configuration for the naive kernel and launch it */
dim3 threads(1, THREAD_BLOCK_SIZE, 1);
dim3 grid(1, (Yd.num_rows + THREAD_BLOCK_SIZE - 1)/THREAD_BLOCK_SIZE);
multiply_kernel_naive<<<grid, threads>>>(Ad.elements, Xd.elements, Yd.elements, Ad.num_rows, Ad.num_columns);
cudaDeviceSynchronize();
gettimeofday(&stop, NULL);
fprintf(stderr, "Execution time = %fs\n", (float)(stop.tv_sec - start.tv_sec +\
(stop.tv_usec - start.tv_usec)/(float)1000000));
check_CUDA_error("Error in kernel");
/* Set up execution configuration for optimized kernel that uses shared memory.
Memory accesses made by threads are coalesced.
*/
fprintf(stderr, "\nUsing optimized version when matrix A is stored in row major form on device\n");
gettimeofday(&start, NULL);
threads.x = threads.y = TILE_SIZE;
grid.x = 1;
grid.y = (Yd.num_rows + TILE_SIZE - 1)/TILE_SIZE;
multiply_kernel_optimized<<< grid, threads >>>(Ad.elements, Xd.elements, Yd.elements, Ad.num_rows, Ad.num_columns);
cudaDeviceSynchronize();
gettimeofday(&stop, NULL);
fprintf(stderr, "Execution time = %fs\n", (float)(stop.tv_sec - start.tv_sec +\
(stop.tv_usec - start.tv_usec)/(float)1000000));
check_CUDA_error("Error in kernel");
/* Copy result from the device */
copy_matrix_from_device(Y, Yd);
/* Free memory on device */
cudaFree(&Ad);
cudaFree(&Xd);
cudaFree(&Yd);
}
/* Allocate memory for matrix on device */
matrix_t allocate_matrix_on_device(matrix_t M)
{
matrix_t Mdevice = M;
int size = M.num_rows * M.num_columns * sizeof(float);
cudaMalloc((void **)&Mdevice.elements, size);
return Mdevice;
}
/* Allocate a matrix of dimensions height * width.
If init == 0, initialize to all zeroes.
If init == 1, perform random initialization with values between [-0.5, +0.5].
*/
matrix_t allocate_matrix_on_host(int num_rows, int num_columns, int init)
{
matrix_t M;
M.num_columns = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float *)malloc(size * sizeof(float));
if (M.elements == NULL)
return M;
int i;
for (i = 0; i < size; i++) {
if(init == 0)
M.elements[i] = 0.0;
else
M.elements[i] = rand()/(float)RAND_MAX - 0.5;
}
return M;
}
/* Copy matrix from host memory to device memory */
void copy_matrix_to_device(matrix_t Mdevice, matrix_t Mhost)
{
int size = Mhost.num_rows * Mhost.num_columns * sizeof(float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice);
}
/* Copy matrix from device memory to host memory */
void copy_matrix_from_device(matrix_t Mhost, matrix_t Mdevice)
{
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost);
}
/* Prints matrix out to screen */
void print_matrix(matrix_t M)
{
int i, j;
for (i = 0; i < M.num_rows; i++) {
for (j = 0; j < M.num_columns; j++) {
fprintf(stderr, "%f ", M.elements[i * M.num_columns + j]);
}
fprintf(stderr, "\n");
}
printf("\n");
}
void check_CUDA_error(const char *msg)
{
cudaError_t err = cudaGetLastError();
if ( cudaSuccess != err) {
fprintf(stderr, "CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/* Compare reference and device results */
int check_results(float *val1, float *val2, int num_elements, float eps)
{
float max_re = 0.0;
float re = 0.0;
int i;
for (i = 0; i < num_elements; i++) {
re = fabsf((val1[i] - val2[i])/val1[i]);
if (re > max_re)
max_re = re;
}
fprintf(stderr, "Max relative error = %f\n", max_re);
if (max_re <= eps)
return 1;
else
return 0;
}
|
bced88811f89df72e3eddc84d701b49eba7d7c94.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel1_t1 [8][2];
static int dims_update_halo_kernel1_t1_h [8][2] = {0};
//user function
__device__
inline void update_halo_kernel1_t1_gpu(ACC<double> &density0,
ACC<double> &density1,
ACC<double> &energy0,
ACC<double> &energy1,
ACC<double> &pressure,
ACC<double> &viscosity,
ACC<double> &soundspeed,
const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0(0,0,0) = density0(0,-1,0);
if(fields[FIELD_DENSITY1] == 1) density1(0,0,0) = density1(0,-1,0);
if(fields[FIELD_ENERGY0] == 1) energy0(0,0,0) = energy0(0,-1,0);
if(fields[FIELD_ENERGY1] == 1) energy1(0,0,0) = energy1(0,-1,0);
if(fields[FIELD_PRESSURE] == 1) pressure(0,0,0) = pressure(0,-1,0);
if(fields[FIELD_VISCOSITY] == 1) viscosity(0,0,0) = viscosity(0,-1,0);
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed(0,0,0) = soundspeed(0,-1,0);
}
__global__ void ops_update_halo_kernel1_t1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[0][0] + idx_z * 1*1 * dims_update_halo_kernel1_t1[0][0] * dims_update_halo_kernel1_t1[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[1][0] + idx_z * 1*1 * dims_update_halo_kernel1_t1[1][0] * dims_update_halo_kernel1_t1[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[2][0] + idx_z * 1*1 * dims_update_halo_kernel1_t1[2][0] * dims_update_halo_kernel1_t1[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[3][0] + idx_z * 1*1 * dims_update_halo_kernel1_t1[3][0] * dims_update_halo_kernel1_t1[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[4][0] + idx_z * 1*1 * dims_update_halo_kernel1_t1[4][0] * dims_update_halo_kernel1_t1[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[5][0] + idx_z * 1*1 * dims_update_halo_kernel1_t1[5][0] * dims_update_halo_kernel1_t1[5][1];
arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[6][0] + idx_z * 1*1 * dims_update_halo_kernel1_t1[6][0] * dims_update_halo_kernel1_t1[6][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel1_t1[0][0], dims_update_halo_kernel1_t1[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel1_t1[1][0], dims_update_halo_kernel1_t1[1][1], arg1);
ACC<double> argp2(dims_update_halo_kernel1_t1[2][0], dims_update_halo_kernel1_t1[2][1], arg2);
ACC<double> argp3(dims_update_halo_kernel1_t1[3][0], dims_update_halo_kernel1_t1[3][1], arg3);
ACC<double> argp4(dims_update_halo_kernel1_t1[4][0], dims_update_halo_kernel1_t1[4][1], arg4);
ACC<double> argp5(dims_update_halo_kernel1_t1[5][0], dims_update_halo_kernel1_t1[5][1], arg5);
ACC<double> argp6(dims_update_halo_kernel1_t1[6][0], dims_update_halo_kernel1_t1[6][1], arg6);
update_halo_kernel1_t1_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_t1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,14)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(14,"update_halo_kernel1_t1");
OPS_kernels[14].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 8,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != dims_update_halo_kernel1_t1_h[0][0] || ydim0 != dims_update_halo_kernel1_t1_h[0][1] || xdim1 != dims_update_halo_kernel1_t1_h[1][0] || ydim1 != dims_update_halo_kernel1_t1_h[1][1] || xdim2 != dims_update_halo_kernel1_t1_h[2][0] || ydim2 != dims_update_halo_kernel1_t1_h[2][1] || xdim3 != dims_update_halo_kernel1_t1_h[3][0] || ydim3 != dims_update_halo_kernel1_t1_h[3][1] || xdim4 != dims_update_halo_kernel1_t1_h[4][0] || ydim4 != dims_update_halo_kernel1_t1_h[4][1] || xdim5 != dims_update_halo_kernel1_t1_h[5][0] || ydim5 != dims_update_halo_kernel1_t1_h[5][1] || xdim6 != dims_update_halo_kernel1_t1_h[6][0] || ydim6 != dims_update_halo_kernel1_t1_h[6][1]) {
dims_update_halo_kernel1_t1_h[0][0] = xdim0;
dims_update_halo_kernel1_t1_h[0][1] = ydim0;
dims_update_halo_kernel1_t1_h[1][0] = xdim1;
dims_update_halo_kernel1_t1_h[1][1] = ydim1;
dims_update_halo_kernel1_t1_h[2][0] = xdim2;
dims_update_halo_kernel1_t1_h[2][1] = ydim2;
dims_update_halo_kernel1_t1_h[3][0] = xdim3;
dims_update_halo_kernel1_t1_h[3][1] = ydim3;
dims_update_halo_kernel1_t1_h[4][0] = xdim4;
dims_update_halo_kernel1_t1_h[4][1] = ydim4;
dims_update_halo_kernel1_t1_h[5][0] = xdim5;
dims_update_halo_kernel1_t1_h[5][1] = ydim5;
dims_update_halo_kernel1_t1_h[6][0] = xdim6;
dims_update_halo_kernel1_t1_h[6][1] = ydim6;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel1_t1, dims_update_halo_kernel1_t1_h, sizeof(dims_update_halo_kernel1_t1)));
}
int *arg7h = (int *)arg7.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[14].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel1_t1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[14].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[14].mpi_time += t2-t1;
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 14;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 14;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_t1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(14,"update_halo_kernel1_t1");
}
ops_enqueue_kernel(desc);
}
#endif
| bced88811f89df72e3eddc84d701b49eba7d7c94.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel1_t1 [8][2];
static int dims_update_halo_kernel1_t1_h [8][2] = {0};
//user function
__device__
inline void update_halo_kernel1_t1_gpu(ACC<double> &density0,
ACC<double> &density1,
ACC<double> &energy0,
ACC<double> &energy1,
ACC<double> &pressure,
ACC<double> &viscosity,
ACC<double> &soundspeed,
const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0(0,0,0) = density0(0,-1,0);
if(fields[FIELD_DENSITY1] == 1) density1(0,0,0) = density1(0,-1,0);
if(fields[FIELD_ENERGY0] == 1) energy0(0,0,0) = energy0(0,-1,0);
if(fields[FIELD_ENERGY1] == 1) energy1(0,0,0) = energy1(0,-1,0);
if(fields[FIELD_PRESSURE] == 1) pressure(0,0,0) = pressure(0,-1,0);
if(fields[FIELD_VISCOSITY] == 1) viscosity(0,0,0) = viscosity(0,-1,0);
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed(0,0,0) = soundspeed(0,-1,0);
}
__global__ void ops_update_halo_kernel1_t1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[0][0] + idx_z * 1*1 * dims_update_halo_kernel1_t1[0][0] * dims_update_halo_kernel1_t1[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[1][0] + idx_z * 1*1 * dims_update_halo_kernel1_t1[1][0] * dims_update_halo_kernel1_t1[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[2][0] + idx_z * 1*1 * dims_update_halo_kernel1_t1[2][0] * dims_update_halo_kernel1_t1[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[3][0] + idx_z * 1*1 * dims_update_halo_kernel1_t1[3][0] * dims_update_halo_kernel1_t1[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[4][0] + idx_z * 1*1 * dims_update_halo_kernel1_t1[4][0] * dims_update_halo_kernel1_t1[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[5][0] + idx_z * 1*1 * dims_update_halo_kernel1_t1[5][0] * dims_update_halo_kernel1_t1[5][1];
arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[6][0] + idx_z * 1*1 * dims_update_halo_kernel1_t1[6][0] * dims_update_halo_kernel1_t1[6][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel1_t1[0][0], dims_update_halo_kernel1_t1[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel1_t1[1][0], dims_update_halo_kernel1_t1[1][1], arg1);
ACC<double> argp2(dims_update_halo_kernel1_t1[2][0], dims_update_halo_kernel1_t1[2][1], arg2);
ACC<double> argp3(dims_update_halo_kernel1_t1[3][0], dims_update_halo_kernel1_t1[3][1], arg3);
ACC<double> argp4(dims_update_halo_kernel1_t1[4][0], dims_update_halo_kernel1_t1[4][1], arg4);
ACC<double> argp5(dims_update_halo_kernel1_t1[5][0], dims_update_halo_kernel1_t1[5][1], arg5);
ACC<double> argp6(dims_update_halo_kernel1_t1[6][0], dims_update_halo_kernel1_t1[6][1], arg6);
update_halo_kernel1_t1_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_t1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,14)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(14,"update_halo_kernel1_t1");
OPS_kernels[14].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 8,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != dims_update_halo_kernel1_t1_h[0][0] || ydim0 != dims_update_halo_kernel1_t1_h[0][1] || xdim1 != dims_update_halo_kernel1_t1_h[1][0] || ydim1 != dims_update_halo_kernel1_t1_h[1][1] || xdim2 != dims_update_halo_kernel1_t1_h[2][0] || ydim2 != dims_update_halo_kernel1_t1_h[2][1] || xdim3 != dims_update_halo_kernel1_t1_h[3][0] || ydim3 != dims_update_halo_kernel1_t1_h[3][1] || xdim4 != dims_update_halo_kernel1_t1_h[4][0] || ydim4 != dims_update_halo_kernel1_t1_h[4][1] || xdim5 != dims_update_halo_kernel1_t1_h[5][0] || ydim5 != dims_update_halo_kernel1_t1_h[5][1] || xdim6 != dims_update_halo_kernel1_t1_h[6][0] || ydim6 != dims_update_halo_kernel1_t1_h[6][1]) {
dims_update_halo_kernel1_t1_h[0][0] = xdim0;
dims_update_halo_kernel1_t1_h[0][1] = ydim0;
dims_update_halo_kernel1_t1_h[1][0] = xdim1;
dims_update_halo_kernel1_t1_h[1][1] = ydim1;
dims_update_halo_kernel1_t1_h[2][0] = xdim2;
dims_update_halo_kernel1_t1_h[2][1] = ydim2;
dims_update_halo_kernel1_t1_h[3][0] = xdim3;
dims_update_halo_kernel1_t1_h[3][1] = ydim3;
dims_update_halo_kernel1_t1_h[4][0] = xdim4;
dims_update_halo_kernel1_t1_h[4][1] = ydim4;
dims_update_halo_kernel1_t1_h[5][0] = xdim5;
dims_update_halo_kernel1_t1_h[5][1] = ydim5;
dims_update_halo_kernel1_t1_h[6][0] = xdim6;
dims_update_halo_kernel1_t1_h[6][1] = ydim6;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel1_t1, dims_update_halo_kernel1_t1_h, sizeof(dims_update_halo_kernel1_t1)));
}
int *arg7h = (int *)arg7.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[14].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel1_t1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[14].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[14].mpi_time += t2-t1;
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 14;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 14;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_t1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(14,"update_halo_kernel1_t1");
}
ops_enqueue_kernel(desc);
}
#endif
|
01ddc026c6a3a8ac7c9cb4dfaba6a173043d7448.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
__constant__ float grid_size=1.0;
namespace{
/**
* perform max-pooling within the cells
* parallel over each cell and each feature dimension
*/
template <typename scalar_t>
__global__ void grid_pooling_kernel(
const scalar_t *point,
const scalar_t *feat_points,
scalar_t *feat_cell,
int *indices,
const int n ){
// cell indices
int i = blockIdx.x;
int j = blockIdx.y;
int k = blockIdx.z;
// cell size
// int W = gridDim.x;
int H = gridDim.y;
int D = gridDim.z;
int ind = i*H*D + j*D + k;
int c = threadIdx.x;
int C = blockDim.x;
for (int p=0; p<n; p++){
scalar_t px = point[p*3+0];
scalar_t py = point[p*3+1];
scalar_t pz = point[p*3+2];
// if point is inside of the grid
if (px >= i && px < i+grid_size && py >= j && py < j+grid_size && pz >= k && pz < k+grid_size){
// max-pooling, update feat_cell if the feature is larger than the current feat_cell
// can be async for max operation
if ( feat_points[p*C + c] > feat_cell[ind*C + c] ){
feat_cell[ind*C + c] = feat_points[p*C + c];
indices[ind*C + c] = p;
}
}
}
}
/**
* back-propagate the loss from the max-pooled feature to point features
* parallel over each cell and each feature dimension
*/
template <typename scalar_t>
__global__ void grad_grid_pooling_kernel(
const scalar_t *grad_output,
const int *indices,
scalar_t *grad_feat_points){
// cell indices
int i = blockIdx.x;
int j = blockIdx.y;
int k = blockIdx.z;
// cell size
// int W = gridDim.x;
int H = gridDim.y;
int D = gridDim.z;
int ind = i*H*D + j*D + k;
int c = threadIdx.x;
int C = blockDim.x;
long int p = indices[ind*C + c];
if (p < 0) return;
grad_feat_points[p*C + c] = grad_output[ind*C + c];
}
} //namespace
/*
* Forward function, project the point features to cells, perform max pooling in every cell
* params:
* point input, all points, Nx3
* feat_points input, feature of all points, NxC
* shape input, size of the grid [W, H, D], 3
* feat_cell output, feature of all cells, (WxHxD)xC
* indices output, indices of max pooling, saved for back propagation, (WxHxD)xC
*
*/
void grid_pooling_kernel_forward(
at::Tensor point,
at::Tensor feat_points,
at::Tensor shape,
at::Tensor feat_cell,
at::Tensor indices){
int W = shape.data<long>()[0];
int H = shape.data<long>()[1];
int D = shape.data<long>()[2];
int C = feat_cell.size(1);
dim3 dimGrid(W, H, D);
dim3 dimBlock(C, 1, 1);
// lauch the kernel
int n = point.size(0);
hipLaunchKernelGGL(( grid_pooling_kernel<float>), dim3(dimGrid), dim3(dimBlock), 0, 0,
point.data<float>(),
feat_points.data<float>(),
feat_cell.data<float>(),
indices.data<int>(),
n);
}
/*
* Backward function, back-propagate the loss to the point features
* params:
* grad_output input, gradient on the output feature, WxHxC
* shape input, size of the grid [W, H, D], 3
* indices input, indices of max pooling, WxHxC
* grad_feat_points output, gradient on the features, NxC
*
*/
void grid_pooling_kernel_backward(
at::Tensor grad_output,
at::Tensor shape,
at::Tensor indices,
at::Tensor grad_feat_points){
int W = shape.data<long>()[0];
int H = shape.data<long>()[1];
int D = shape.data<long>()[2];
int C = grad_output.size(1);
dim3 dimGrid(W, H, D);
dim3 dimBlock(C, 1, 1);
// lauch the kernel
hipLaunchKernelGGL(( grad_grid_pooling_kernel<float>), dim3(dimGrid), dim3(dimBlock), 0, 0,
grad_output.data<float>(),
indices.data<int>(),
grad_feat_points.data<float>());
}
| 01ddc026c6a3a8ac7c9cb4dfaba6a173043d7448.cu | #include <ATen/ATen.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
__constant__ float grid_size=1.0;
namespace{
/**
* perform max-pooling within the cells
* parallel over each cell and each feature dimension
*/
template <typename scalar_t>
__global__ void grid_pooling_kernel(
const scalar_t *point,
const scalar_t *feat_points,
scalar_t *feat_cell,
int *indices,
const int n ){
// cell indices
int i = blockIdx.x;
int j = blockIdx.y;
int k = blockIdx.z;
// cell size
// int W = gridDim.x;
int H = gridDim.y;
int D = gridDim.z;
int ind = i*H*D + j*D + k;
int c = threadIdx.x;
int C = blockDim.x;
for (int p=0; p<n; p++){
scalar_t px = point[p*3+0];
scalar_t py = point[p*3+1];
scalar_t pz = point[p*3+2];
// if point is inside of the grid
if (px >= i && px < i+grid_size && py >= j && py < j+grid_size && pz >= k && pz < k+grid_size){
// max-pooling, update feat_cell if the feature is larger than the current feat_cell
// can be async for max operation
if ( feat_points[p*C + c] > feat_cell[ind*C + c] ){
feat_cell[ind*C + c] = feat_points[p*C + c];
indices[ind*C + c] = p;
}
}
}
}
/**
* back-propagate the loss from the max-pooled feature to point features
* parallel over each cell and each feature dimension
*/
template <typename scalar_t>
__global__ void grad_grid_pooling_kernel(
const scalar_t *grad_output,
const int *indices,
scalar_t *grad_feat_points){
// cell indices
int i = blockIdx.x;
int j = blockIdx.y;
int k = blockIdx.z;
// cell size
// int W = gridDim.x;
int H = gridDim.y;
int D = gridDim.z;
int ind = i*H*D + j*D + k;
int c = threadIdx.x;
int C = blockDim.x;
long int p = indices[ind*C + c];
if (p < 0) return;
grad_feat_points[p*C + c] = grad_output[ind*C + c];
}
} //namespace
/*
* Forward function, project the point features to cells, perform max pooling in every cell
* params:
* point input, all points, Nx3
* feat_points input, feature of all points, NxC
* shape input, size of the grid [W, H, D], 3
* feat_cell output, feature of all cells, (WxHxD)xC
* indices output, indices of max pooling, saved for back propagation, (WxHxD)xC
*
*/
void grid_pooling_kernel_forward(
at::Tensor point,
at::Tensor feat_points,
at::Tensor shape,
at::Tensor feat_cell,
at::Tensor indices){
int W = shape.data<long>()[0];
int H = shape.data<long>()[1];
int D = shape.data<long>()[2];
int C = feat_cell.size(1);
dim3 dimGrid(W, H, D);
dim3 dimBlock(C, 1, 1);
// lauch the kernel
int n = point.size(0);
grid_pooling_kernel<float><<< dimGrid, dimBlock>>>(
point.data<float>(),
feat_points.data<float>(),
feat_cell.data<float>(),
indices.data<int>(),
n);
}
/*
* Backward function, back-propagate the loss to the point features
* params:
* grad_output input, gradient on the output feature, WxHxC
* shape input, size of the grid [W, H, D], 3
* indices input, indices of max pooling, WxHxC
* grad_feat_points output, gradient on the features, NxC
*
*/
void grid_pooling_kernel_backward(
at::Tensor grad_output,
at::Tensor shape,
at::Tensor indices,
at::Tensor grad_feat_points){
int W = shape.data<long>()[0];
int H = shape.data<long>()[1];
int D = shape.data<long>()[2];
int C = grad_output.size(1);
dim3 dimGrid(W, H, D);
dim3 dimBlock(C, 1, 1);
// lauch the kernel
grad_grid_pooling_kernel<float><<< dimGrid, dimBlock>>>(
grad_output.data<float>(),
indices.data<int>(),
grad_feat_points.data<float>());
}
|
fbf3cd6192462e7f35ae8ceec4a9af5670315974.hip | // !!! This is a file automatically generated by hipify!!!
#include "gcTerms.hpp"
#include <cmath>
// Parallel acceleration
float eval_aPar(CParticle& p, const C3<float> r, const float *r_GC, const float *bDotGradB, int nGC)
{
int status = 0;
float This_bDotGradB = kj_interp1D(r.c1, r_GC, bDotGradB, nGC, status);
p.status = max(p.status, status);
#if DEBUG_EVAL_APAR >= 1
if (status > 0) {
cout << "ERROR 1 in eval_aPar" << endl;
exit(1);
}
#endif
float aPar = -p.u / p.m * This_bDotGradB;
#if DEBUG_EVAL_APRA >= 1
if (isnan(aPar) || isinf(aPar)) {
status = 1;
cout << "ERROR 2 in eval_aPar" << endl;
exit(1);
}
#endif
return aPar;
}
// Perpendicular velocity
float eval_vPer(CParticle& p, const C3<float> r, const float *r_b0, const C3<float> *b0_CYL, int n)
{
int status = 0;
C3<float> This_b0_CYL = kj_interp1D(r.c1, r_b0, b0_CYL, n, status);
p.status = max(p.status, status);
return std::sqrt(2.0 * p.u * mag(This_b0_CYL) / p.m);
}
// Guiding center veclocity
C3<float> eval_vGC(CParticle& p, const C3<float> r, const float vPer, const float vPar,
const float *r_b0, const C3<float> *b0_CYL, int n,
const float *r_GC, const C3<float> *curv_CYL, const C3<float> *grad_CYL, int nGC)
{
int status = 0;
C3<float> This_b0_CYL = kj_interp1D(r.c1, r_b0, b0_CYL, n, status);
p.status = max(p.status, status);
#if DEBUG_EVAL_VGC >= 1
if (status > 0) {
cout << "ERROR 1 in eval_vGC" << endl;
exit(1);
}
#endif
status = 0;
C3<float> This_curv_CYL = kj_interp1D(r.c1, r_GC, curv_CYL, nGC, status);
p.status = max(p.status, status);
#if DEBUG_EVAL_VGC >= 1
if (status > 0) {
cout << "ERROR 2 in eval_vGC" << endl;
exit(1);
}
#endif
status = 0;
C3<float> This_grad_CYL = kj_interp1D(r.c1, r_GC, grad_CYL, nGC, status);
p.status = max(p.status, status);
#if DEBUG_EVAL_VGC >= 1
if (status > 0) {
cout << "ERROR 3 in eval_vGC" << endl;
exit(1);
}
#endif
#if DEBUG_EVAL_VGC >= 1
cout << "r.c1: " << r.c1 << endl;
cout << "p.c1: " << p.c1 << endl;
cout << "vPar: " << vPar << endl;
cout << "vPer: " << vPer << endl;
cout << "b0_CYL: " << This_b0_CYL.c1 << " " << This_b0_CYL.c2 << " " << This_b0_CYL.c3 << endl;
cout << "curv_CYL: " << This_curv_CYL.c1 << " " << This_curv_CYL.c2 << " " << This_curv_CYL.c3 << endl;
cout << "grad_CYL: " << This_grad_CYL.c1 << " " << This_grad_CYL.c2 << " " << This_grad_CYL.c3 << endl
<< endl;
cout << "max(grad_CYL): " << maxC3VecAbs(grad_CYL) << endl;
#endif
C3<float> UnitB_CYL = This_b0_CYL / mag(This_b0_CYL);
C3<float> vGC = vPar * UnitB_CYL + ::pow(vPer, 2) * This_grad_CYL + ::pow(vPar, 2) * This_curv_CYL;
return vGC;
}
float GetAlpComp(const float vPer, const float phs)
{
return vPer * sin(phs);
}
float GetBetComp(const float vPer, const float phs)
{
return vPer * cos(phs);
}
| fbf3cd6192462e7f35ae8ceec4a9af5670315974.cu | #include "gcTerms.hpp"
#include <cmath>
// Parallel acceleration
float eval_aPar(CParticle& p, const C3<float> r, const float *r_GC, const float *bDotGradB, int nGC)
{
int status = 0;
float This_bDotGradB = kj_interp1D(r.c1, r_GC, bDotGradB, nGC, status);
p.status = max(p.status, status);
#if DEBUG_EVAL_APAR >= 1
if (status > 0) {
cout << "ERROR 1 in eval_aPar" << endl;
exit(1);
}
#endif
float aPar = -p.u / p.m * This_bDotGradB;
#if DEBUG_EVAL_APRA >= 1
if (isnan(aPar) || isinf(aPar)) {
status = 1;
cout << "ERROR 2 in eval_aPar" << endl;
exit(1);
}
#endif
return aPar;
}
// Perpendicular velocity
float eval_vPer(CParticle& p, const C3<float> r, const float *r_b0, const C3<float> *b0_CYL, int n)
{
int status = 0;
C3<float> This_b0_CYL = kj_interp1D(r.c1, r_b0, b0_CYL, n, status);
p.status = max(p.status, status);
return std::sqrt(2.0 * p.u * mag(This_b0_CYL) / p.m);
}
// Guiding center veclocity
C3<float> eval_vGC(CParticle& p, const C3<float> r, const float vPer, const float vPar,
const float *r_b0, const C3<float> *b0_CYL, int n,
const float *r_GC, const C3<float> *curv_CYL, const C3<float> *grad_CYL, int nGC)
{
int status = 0;
C3<float> This_b0_CYL = kj_interp1D(r.c1, r_b0, b0_CYL, n, status);
p.status = max(p.status, status);
#if DEBUG_EVAL_VGC >= 1
if (status > 0) {
cout << "ERROR 1 in eval_vGC" << endl;
exit(1);
}
#endif
status = 0;
C3<float> This_curv_CYL = kj_interp1D(r.c1, r_GC, curv_CYL, nGC, status);
p.status = max(p.status, status);
#if DEBUG_EVAL_VGC >= 1
if (status > 0) {
cout << "ERROR 2 in eval_vGC" << endl;
exit(1);
}
#endif
status = 0;
C3<float> This_grad_CYL = kj_interp1D(r.c1, r_GC, grad_CYL, nGC, status);
p.status = max(p.status, status);
#if DEBUG_EVAL_VGC >= 1
if (status > 0) {
cout << "ERROR 3 in eval_vGC" << endl;
exit(1);
}
#endif
#if DEBUG_EVAL_VGC >= 1
cout << "r.c1: " << r.c1 << endl;
cout << "p.c1: " << p.c1 << endl;
cout << "vPar: " << vPar << endl;
cout << "vPer: " << vPer << endl;
cout << "b0_CYL: " << This_b0_CYL.c1 << " " << This_b0_CYL.c2 << " " << This_b0_CYL.c3 << endl;
cout << "curv_CYL: " << This_curv_CYL.c1 << " " << This_curv_CYL.c2 << " " << This_curv_CYL.c3 << endl;
cout << "grad_CYL: " << This_grad_CYL.c1 << " " << This_grad_CYL.c2 << " " << This_grad_CYL.c3 << endl
<< endl;
cout << "max(grad_CYL): " << maxC3VecAbs(grad_CYL) << endl;
#endif
C3<float> UnitB_CYL = This_b0_CYL / mag(This_b0_CYL);
C3<float> vGC = vPar * UnitB_CYL + std::pow(vPer, 2) * This_grad_CYL + std::pow(vPar, 2) * This_curv_CYL;
return vGC;
}
float GetAlpComp(const float vPer, const float phs)
{
return vPer * sin(phs);
}
float GetBetComp(const float vPer, const float phs)
{
return vPer * cos(phs);
}
|
e6dccfa34314a35b30b839ff12f6307eaf012cc3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/core.h"
#include "../include/graph.h"
#include "../include/SSSPutils.h"
#include "../include/algoCPU.h"
#include <iostream>
#include <chrono>
#include <vector>
#include <utility>
#define NUM_THREADS 256
int main(int argc, char* argv[]) {
int s;
Graph G(argc, argv);
std::cout << "Source Vertex: ";
std::cin >> s;
// ========================= CUDA ============================= //
hipEventCreate(&start);
hipEventCreate(&stop);
// Declare and Initialise Host Array
int *V, *E, *W, *C, *U, Vs, Es;
bool *M, flag;
Vs = vecToArr(G.posV, &V);
Es = vecToArr(G.packE, &E);
vecToArr(G.packW, &W);
C = new int[Vs];
U = new int[Vs];
M = new bool[Vs];
std::fill_n(C, Vs, INF);
std::fill_n(U, Vs, INF);
std::fill_n(M, Vs, false);
C[s] = U[s] = 0; // Update source values
M[s] = flag = true;
// Declare and Initialise Device Array
int *devV, *devE, *devW, *devC, *devU;
bool *devM, *devFlag;
allocCopy<int>(&devV, V, Vs, "V_a");
allocCopy<int>(&devE, E, Es, "E_a");
allocCopy<int>(&devW, W, Es, "W_a");
allocCopy<int>(&devC, C, Vs, "C_a");
allocCopy<int>(&devU, U, Vs, "U_a");
allocCopy<bool>(&devM, M, Vs, "M_a");
allocCopy<bool>(&devFlag, &flag, 1, "flag");
// Run Cuda Parallel
int blocks = (Vs + NUM_THREADS - 1) / NUM_THREADS;
hipEventRecord(start);
while (flag) {
flag = false;
hipMemcpy(devFlag, &flag, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( SSSP_kernel1), dim3(blocks), dim3(NUM_THREADS) , 0, 0, devV, devE, devW, devM, devC, devU, Vs);
hipLaunchKernelGGL(( SSSP_kernel2), dim3(blocks), dim3(NUM_THREADS) , 0, 0, devM, devC, devU, devFlag, Vs);
hipMemcpy(&flag, devFlag, sizeof(bool), hipMemcpyDeviceToHost);
}
hipEventRecord(stop);
cudaCheck(hipPeekAtLastError());
if (cudaCheck(hipMemcpy(C, devC, Vs * sizeof(int), hipMemcpyDeviceToHost))) {
std::cout << "Obtained distance in host at C_a" << std::endl;
}
// Free memory
clear<int>(devV, "devV");
clear<int>(devE, "devE");
clear<int>(devW, "devW");
clear<int>(devC, "devC");
clear<int>(devU, "devU");
clear<bool>(devM, "devM");
clear<bool>(devFlag, "devFlag");
// Calculate Time Taken
hipEventSynchronize(stop);
float timeGPU = 0;
hipEventElapsedTime(&timeGPU, start, stop);
std::cout << "CUDA Elapsed Time (in ms): " << timeGPU << std::endl;
// ========================= CPU ============================= //
int *dis = new int[Vs];
std::fill_n(dis, Vs, INF);
auto beg = std::chrono::high_resolution_clock::now();
djikstraCPU(G, s, dis);
auto end = std::chrono::high_resolution_clock::now();
float timeCPU = std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count();
std::cout << "CPU Elapsed Time (in ms): " << timeCPU / 1000 << std::endl;
// ======================= Verification ==========================//
for (int i = 0; i < Vs; i++) {
if (dis[i] != C[i]) {
std::cout << "Not a Match at " << i << std::endl;
std::cout << "GPU dist: " << C[i] << std::endl;
std::cout << "CPU dist: " << dis[i] << std::endl;
exit(EXIT_FAILURE);
}
}
} | e6dccfa34314a35b30b839ff12f6307eaf012cc3.cu | #include "../include/core.h"
#include "../include/graph.h"
#include "../include/SSSPutils.h"
#include "../include/algoCPU.h"
#include <iostream>
#include <chrono>
#include <vector>
#include <utility>
#define NUM_THREADS 256
int main(int argc, char* argv[]) {
int s;
Graph G(argc, argv);
std::cout << "Source Vertex: ";
std::cin >> s;
// ========================= CUDA ============================= //
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Declare and Initialise Host Array
int *V, *E, *W, *C, *U, Vs, Es;
bool *M, flag;
Vs = vecToArr(G.posV, &V);
Es = vecToArr(G.packE, &E);
vecToArr(G.packW, &W);
C = new int[Vs];
U = new int[Vs];
M = new bool[Vs];
std::fill_n(C, Vs, INF);
std::fill_n(U, Vs, INF);
std::fill_n(M, Vs, false);
C[s] = U[s] = 0; // Update source values
M[s] = flag = true;
// Declare and Initialise Device Array
int *devV, *devE, *devW, *devC, *devU;
bool *devM, *devFlag;
allocCopy<int>(&devV, V, Vs, "V_a");
allocCopy<int>(&devE, E, Es, "E_a");
allocCopy<int>(&devW, W, Es, "W_a");
allocCopy<int>(&devC, C, Vs, "C_a");
allocCopy<int>(&devU, U, Vs, "U_a");
allocCopy<bool>(&devM, M, Vs, "M_a");
allocCopy<bool>(&devFlag, &flag, 1, "flag");
// Run Cuda Parallel
int blocks = (Vs + NUM_THREADS - 1) / NUM_THREADS;
cudaEventRecord(start);
while (flag) {
flag = false;
cudaMemcpy(devFlag, &flag, sizeof(bool), cudaMemcpyHostToDevice);
SSSP_kernel1<<< blocks, NUM_THREADS >>>(devV, devE, devW, devM, devC, devU, Vs);
SSSP_kernel2<<< blocks, NUM_THREADS >>>(devM, devC, devU, devFlag, Vs);
cudaMemcpy(&flag, devFlag, sizeof(bool), cudaMemcpyDeviceToHost);
}
cudaEventRecord(stop);
cudaCheck(cudaPeekAtLastError());
if (cudaCheck(cudaMemcpy(C, devC, Vs * sizeof(int), cudaMemcpyDeviceToHost))) {
std::cout << "Obtained distance in host at C_a" << std::endl;
}
// Free memory
clear<int>(devV, "devV");
clear<int>(devE, "devE");
clear<int>(devW, "devW");
clear<int>(devC, "devC");
clear<int>(devU, "devU");
clear<bool>(devM, "devM");
clear<bool>(devFlag, "devFlag");
// Calculate Time Taken
cudaEventSynchronize(stop);
float timeGPU = 0;
cudaEventElapsedTime(&timeGPU, start, stop);
std::cout << "CUDA Elapsed Time (in ms): " << timeGPU << std::endl;
// ========================= CPU ============================= //
int *dis = new int[Vs];
std::fill_n(dis, Vs, INF);
auto beg = std::chrono::high_resolution_clock::now();
djikstraCPU(G, s, dis);
auto end = std::chrono::high_resolution_clock::now();
float timeCPU = std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count();
std::cout << "CPU Elapsed Time (in ms): " << timeCPU / 1000 << std::endl;
// ======================= Verification ==========================//
for (int i = 0; i < Vs; i++) {
if (dis[i] != C[i]) {
std::cout << "Not a Match at " << i << std::endl;
std::cout << "GPU dist: " << C[i] << std::endl;
std::cout << "CPU dist: " << dis[i] << std::endl;
exit(EXIT_FAILURE);
}
}
} |
e98409171acf2a02810b18a974535ad9ffb50661.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/math_function.h"
namespace paddle {
namespace operators {
namespace math {
template <>
void gemm<platform::GPUPlace, float>(const platform::DeviceContext& context,
const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const float alpha, const float* A,
const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(transA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(transB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::hipblasSgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void gemm<platform::GPUPlace, double>(const platform::DeviceContext& context,
const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const double alpha, const double* A,
const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(transA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(transB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::hipblasDgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void gemm<platform::GPUPlace, float>(const platform::DeviceContext& context,
const bool transA, const bool transB,
const int M, const int N, const int K,
const float alpha, const float* A,
const int lda, const float* B,
const int ldb, const float beta, float* C,
const int ldc) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
hipblasOperation_t cuTransA = transA == false ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB = transB == false ? HIPBLAS_OP_N : HIPBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::hipblasSgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc));
}
template <>
void gemm<platform::GPUPlace, double>(const platform::DeviceContext& context,
const bool transA, const bool transB,
const int M, const int N, const int K,
const double alpha, const double* A,
const int lda, const double* B,
const int ldb, const double beta,
double* C, const int ldc) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
hipblasOperation_t cuTransA = transA == false ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB = transB == false ? HIPBLAS_OP_N : HIPBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::hipblasDgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc));
}
template <>
void matmul<platform::GPUPlace, float>(
const platform::DeviceContext& context, const framework::Tensor& matrix_a,
bool trans_a, const framework::Tensor& matrix_b, bool trans_b, float alpha,
framework::Tensor* matrix_out, float beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in GPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, float>(
context, transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>());
}
template <>
void matmul<platform::GPUPlace, double>(
const platform::DeviceContext& context, const framework::Tensor& matrix_a,
bool trans_a, const framework::Tensor& matrix_b, bool trans_b, double alpha,
framework::Tensor* matrix_out, double beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in GPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, double>(
context, transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>());
}
template <>
void batched_gemm<platform::GPUPlace, float>(
const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C, const int batchCount, const int strideA, const int strideB) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
hipblasOperation_t cuTransA =
(transA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(transB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const int strideC = M * N;
PADDLE_ENFORCE(platform::dynload::hipblasSgemmStridedBatched(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, strideB, A, lda, strideA,
&beta, C, ldc, strideC, batchCount));
}
template <>
void batched_gemm<platform::GPUPlace, double>(
const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C, const int batchCount, const int strideA, const int strideB) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
hipblasOperation_t cuTransA =
(transA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(transB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const int strideC = M * N;
PADDLE_ENFORCE(platform::dynload::hipblasDgemmStridedBatched(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, strideB, A, lda, strideA,
&beta, C, ldc, strideC, batchCount));
}
template <>
void gemv<platform::GPUPlace, float>(const platform::DeviceContext& context,
const bool trans_a, const int M,
const int N, const float alpha,
const float* A, const float* B,
const float beta, float* C) {
hipblasOperation_t cuTransA = (trans_a == false) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
PADDLE_ENFORCE(platform::dynload::hipblasSgemv(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransA, N, M, &alpha, A, N, B, 1, &beta, C, 1));
}
template <>
void gemv<platform::GPUPlace, double>(const platform::DeviceContext& context,
const bool trans_a, const int M,
const int N, const double alpha,
const double* A, const double* B,
const double beta, double* C) {
hipblasOperation_t cuTransA = (trans_a == false) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
PADDLE_ENFORCE(platform::dynload::hipblasDgemv(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransA, N, M, &alpha, A, N, B, 1, &beta, C, 1));
}
template struct SetConstant<platform::GPUPlace, float>;
} // namespace math
} // namespace operators
} // namespace paddle
| e98409171acf2a02810b18a974535ad9ffb50661.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/math_function.h"
namespace paddle {
namespace operators {
namespace math {
template <>
void gemm<platform::GPUPlace, float>(const platform::DeviceContext& context,
const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const float alpha, const float* A,
const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasSgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void gemm<platform::GPUPlace, double>(const platform::DeviceContext& context,
const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const double alpha, const double* A,
const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasDgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void gemm<platform::GPUPlace, float>(const platform::DeviceContext& context,
const bool transA, const bool transB,
const int M, const int N, const int K,
const float alpha, const float* A,
const int lda, const float* B,
const int ldb, const float beta, float* C,
const int ldc) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
cublasOperation_t cuTransA = transA == false ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB = transB == false ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasSgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc));
}
template <>
void gemm<platform::GPUPlace, double>(const platform::DeviceContext& context,
const bool transA, const bool transB,
const int M, const int N, const int K,
const double alpha, const double* A,
const int lda, const double* B,
const int ldb, const double beta,
double* C, const int ldc) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
cublasOperation_t cuTransA = transA == false ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB = transB == false ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasDgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc));
}
template <>
void matmul<platform::GPUPlace, float>(
const platform::DeviceContext& context, const framework::Tensor& matrix_a,
bool trans_a, const framework::Tensor& matrix_b, bool trans_b, float alpha,
framework::Tensor* matrix_out, float beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in GPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, float>(
context, transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>());
}
template <>
void matmul<platform::GPUPlace, double>(
const platform::DeviceContext& context, const framework::Tensor& matrix_a,
bool trans_a, const framework::Tensor& matrix_b, bool trans_b, double alpha,
framework::Tensor* matrix_out, double beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in GPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, double>(
context, transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>());
}
template <>
void batched_gemm<platform::GPUPlace, float>(
const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C, const int batchCount, const int strideA, const int strideB) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const int strideC = M * N;
PADDLE_ENFORCE(platform::dynload::cublasSgemmStridedBatched(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, strideB, A, lda, strideA,
&beta, C, ldc, strideC, batchCount));
}
template <>
void batched_gemm<platform::GPUPlace, double>(
const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C, const int batchCount, const int strideA, const int strideB) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const int strideC = M * N;
PADDLE_ENFORCE(platform::dynload::cublasDgemmStridedBatched(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, strideB, A, lda, strideA,
&beta, C, ldc, strideC, batchCount));
}
template <>
void gemv<platform::GPUPlace, float>(const platform::DeviceContext& context,
const bool trans_a, const int M,
const int N, const float alpha,
const float* A, const float* B,
const float beta, float* C) {
cublasOperation_t cuTransA = (trans_a == false) ? CUBLAS_OP_T : CUBLAS_OP_N;
PADDLE_ENFORCE(platform::dynload::cublasSgemv(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransA, N, M, &alpha, A, N, B, 1, &beta, C, 1));
}
template <>
void gemv<platform::GPUPlace, double>(const platform::DeviceContext& context,
const bool trans_a, const int M,
const int N, const double alpha,
const double* A, const double* B,
const double beta, double* C) {
cublasOperation_t cuTransA = (trans_a == false) ? CUBLAS_OP_T : CUBLAS_OP_N;
PADDLE_ENFORCE(platform::dynload::cublasDgemv(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransA, N, M, &alpha, A, N, B, 1, &beta, C, 1));
}
template struct SetConstant<platform::GPUPlace, float>;
} // namespace math
} // namespace operators
} // namespace paddle
|
a4a764a3fc4aedbbf25a5deb1ec27c9945156a5b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <memory.h>
#include <string.h>
#include <map>
#ifndef _WIN32
#include <unistd.h>
#endif
// include thrust
#ifndef __cplusplus
#include <thrust/version.h>
#include <thrust/remove.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/constant_iterator.h>
#else
#include <ctype.h>
#endif
#include "../../miner.h"
#include "hip/hip_runtime.h"
hipStream_t gpustream[MAX_GPUS] = { 0 };
int opt_api_listen;
// CUDA Devices on the System
extern "C" int cuda_num_devices()
{
int version;
hipError_t err = hipDriverGetVersion(&version);
if (err != hipSuccess)
{
exit(1);
}
int maj = version / 1000, min = version % 100; // same as in deviceQuery sample
if (maj < 5 || (maj == 5 && min < 5))
{
exit(1);
}
int GPU_N;
err = hipGetDeviceCount(&GPU_N);
if (err != hipSuccess)
{
exit(1);
}
return GPU_N;
}
extern "C" void cuda_devicenames()
{
hipError_t err;
int GPU_N;
err = hipGetDeviceCount(&GPU_N);
if (err != hipSuccess)
{
exit(1);
}
for (int i = 0; i < GPU_N*opt_n_gputhreads; i++)
{
hipDeviceProp_t props;
hipGetDeviceProperties(&props, device_map[i / opt_n_gputhreads]);
device_name[i] = strdup(props.name);
device_sm[i] = (props.major * 100 + props.minor * 10);
}
}
// Can't be called directly in cpu-miner.c
extern "C" void cuda_devicereset()
{
hipDeviceSynchronize();
hipDeviceReset();
}
extern "C" void cuda_print_devices()
{
int ngpus = cuda_num_devices();
cuda_devicenames();
for (int n = 0; n < ngpus; n++) {
int m = device_map[n % MAX_GPUS];
hipDeviceProp_t props;
hipGetDeviceProperties(&props, m);
if (!opt_n_threads || n < opt_n_threads) {
fprintf(stderr, "GPU #%d: SM %d.%d %s\n", m, props.major, props.minor, device_name[n]);
}
}
}
extern "C" static bool substringsearch(const char *haystack, const char *needle, int &match)
{
int hlen = (int) strlen(haystack);
int nlen = (int) strlen(needle);
for (int i=0; i < hlen; ++i)
{
if (haystack[i] == ' ') continue;
int j=0, x = 0;
while(j < nlen)
{
if (haystack[i+x] == ' ') {++x; continue;}
if (needle[j] == ' ') {++j; continue;}
if (needle[j] == '#') return ++match == needle[j+1]-'0';
if (tolower(haystack[i+x]) != tolower(needle[j])) break;
++j; ++x;
}
if (j == nlen) return true;
}
return false;
}
// CUDA Gert nach Namen finden (gibt Gerte-Index zurck oder -1)
extern "C" int cuda_finddevice(char *name)
{
int num = cuda_num_devices();
int match = 0;
for (int i=0; i < num; ++i)
{
hipDeviceProp_t props;
if (hipGetDeviceProperties(&props, i) == hipSuccess)
if (substringsearch(props.name, name, match)) return i;
}
return -1;
}
extern "C" uint32_t device_intensity(int thr_id, const char *func, uint32_t defcount)
{
uint32_t throughput = gpus_intensity[thr_id] ? gpus_intensity[thr_id] : defcount;
if(opt_api_listen!=0) api_set_throughput(thr_id, throughput);
return throughput;
}
// Zeitsynchronisations-Routine von cudaminer mit CPU sleep
typedef struct { double value[8]; } tsumarray;
extern "C" hipError_t MyStreamSynchronize(hipStream_t stream, int situation, int thr_id)
{
hipError_t result = hipSuccess;
if (situation >= 0)
{
static std::map<int, tsumarray> tsum;
double tsync = 0.0;
double tsleep = 0.95;
double a = 0.95, b = 0.05;
if (tsum.find(situation) == tsum.end()) { a = 0.5; b = 0.5; } // faster initial convergence
tsleep = 0.95*tsum[situation].value[thr_id];
if (hipStreamQuery(stream) == hipErrorNotReady)
{
usleep((useconds_t)(1e6*tsleep));
struct timeval tv_start, tv_end;
gettimeofday(&tv_start, NULL);
result = hipStreamSynchronize(stream);
gettimeofday(&tv_end, NULL);
tsync = 1e-6 * (tv_end.tv_usec - tv_start.tv_usec) + (tv_end.tv_sec - tv_start.tv_sec);
}
if (tsync >= 0) tsum[situation].value[thr_id] = a * tsum[situation].value[thr_id] + b * (tsleep + tsync);
}
else
result = hipStreamSynchronize(stream);
return result;
}
extern "C" int cuda_gpu_clocks(struct cgpu_info *gpu)
{
hipDeviceProp_t props;
if (hipGetDeviceProperties(&props, gpu->gpu_id) == hipSuccess) {
gpu->gpu_clock = props.clockRate;
gpu->gpu_memclock = props.memoryClockRate;
gpu->gpu_mem = props.totalGlobalMem;
return 0;
}
return -1;
}
extern "C" void cudaReportHardwareFailure(int thr_id, hipError_t err, const char* func)
{
struct cgpu_info *gpu = &thr_info[thr_id].gpu;
gpu->hw_errors++;
sleep(1);
}
| a4a764a3fc4aedbbf25a5deb1ec27c9945156a5b.cu | #include <stdio.h>
#include <memory.h>
#include <string.h>
#include <map>
#ifndef _WIN32
#include <unistd.h>
#endif
// include thrust
#ifndef __cplusplus
#include <thrust/version.h>
#include <thrust/remove.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/constant_iterator.h>
#else
#include <ctype.h>
#endif
#include "../../miner.h"
#include "cuda_runtime.h"
cudaStream_t gpustream[MAX_GPUS] = { 0 };
int opt_api_listen;
// CUDA Devices on the System
extern "C" int cuda_num_devices()
{
int version;
cudaError_t err = cudaDriverGetVersion(&version);
if (err != cudaSuccess)
{
exit(1);
}
int maj = version / 1000, min = version % 100; // same as in deviceQuery sample
if (maj < 5 || (maj == 5 && min < 5))
{
exit(1);
}
int GPU_N;
err = cudaGetDeviceCount(&GPU_N);
if (err != cudaSuccess)
{
exit(1);
}
return GPU_N;
}
extern "C" void cuda_devicenames()
{
cudaError_t err;
int GPU_N;
err = cudaGetDeviceCount(&GPU_N);
if (err != cudaSuccess)
{
exit(1);
}
for (int i = 0; i < GPU_N*opt_n_gputhreads; i++)
{
cudaDeviceProp props;
cudaGetDeviceProperties(&props, device_map[i / opt_n_gputhreads]);
device_name[i] = strdup(props.name);
device_sm[i] = (props.major * 100 + props.minor * 10);
}
}
// Can't be called directly in cpu-miner.c
extern "C" void cuda_devicereset()
{
cudaDeviceSynchronize();
cudaDeviceReset();
}
extern "C" void cuda_print_devices()
{
int ngpus = cuda_num_devices();
cuda_devicenames();
for (int n = 0; n < ngpus; n++) {
int m = device_map[n % MAX_GPUS];
cudaDeviceProp props;
cudaGetDeviceProperties(&props, m);
if (!opt_n_threads || n < opt_n_threads) {
fprintf(stderr, "GPU #%d: SM %d.%d %s\n", m, props.major, props.minor, device_name[n]);
}
}
}
extern "C" static bool substringsearch(const char *haystack, const char *needle, int &match)
{
int hlen = (int) strlen(haystack);
int nlen = (int) strlen(needle);
for (int i=0; i < hlen; ++i)
{
if (haystack[i] == ' ') continue;
int j=0, x = 0;
while(j < nlen)
{
if (haystack[i+x] == ' ') {++x; continue;}
if (needle[j] == ' ') {++j; continue;}
if (needle[j] == '#') return ++match == needle[j+1]-'0';
if (tolower(haystack[i+x]) != tolower(needle[j])) break;
++j; ++x;
}
if (j == nlen) return true;
}
return false;
}
// CUDA Gerät nach Namen finden (gibt Geräte-Index zurück oder -1)
extern "C" int cuda_finddevice(char *name)
{
int num = cuda_num_devices();
int match = 0;
for (int i=0; i < num; ++i)
{
cudaDeviceProp props;
if (cudaGetDeviceProperties(&props, i) == cudaSuccess)
if (substringsearch(props.name, name, match)) return i;
}
return -1;
}
extern "C" uint32_t device_intensity(int thr_id, const char *func, uint32_t defcount)
{
uint32_t throughput = gpus_intensity[thr_id] ? gpus_intensity[thr_id] : defcount;
if(opt_api_listen!=0) api_set_throughput(thr_id, throughput);
return throughput;
}
// Zeitsynchronisations-Routine von cudaminer mit CPU sleep
typedef struct { double value[8]; } tsumarray;
extern "C" cudaError_t MyStreamSynchronize(cudaStream_t stream, int situation, int thr_id)
{
cudaError_t result = cudaSuccess;
if (situation >= 0)
{
static std::map<int, tsumarray> tsum;
double tsync = 0.0;
double tsleep = 0.95;
double a = 0.95, b = 0.05;
if (tsum.find(situation) == tsum.end()) { a = 0.5; b = 0.5; } // faster initial convergence
tsleep = 0.95*tsum[situation].value[thr_id];
if (cudaStreamQuery(stream) == cudaErrorNotReady)
{
usleep((useconds_t)(1e6*tsleep));
struct timeval tv_start, tv_end;
gettimeofday(&tv_start, NULL);
result = cudaStreamSynchronize(stream);
gettimeofday(&tv_end, NULL);
tsync = 1e-6 * (tv_end.tv_usec - tv_start.tv_usec) + (tv_end.tv_sec - tv_start.tv_sec);
}
if (tsync >= 0) tsum[situation].value[thr_id] = a * tsum[situation].value[thr_id] + b * (tsleep + tsync);
}
else
result = cudaStreamSynchronize(stream);
return result;
}
extern "C" int cuda_gpu_clocks(struct cgpu_info *gpu)
{
cudaDeviceProp props;
if (cudaGetDeviceProperties(&props, gpu->gpu_id) == cudaSuccess) {
gpu->gpu_clock = props.clockRate;
gpu->gpu_memclock = props.memoryClockRate;
gpu->gpu_mem = props.totalGlobalMem;
return 0;
}
return -1;
}
extern "C" void cudaReportHardwareFailure(int thr_id, cudaError_t err, const char* func)
{
struct cgpu_info *gpu = &thr_info[thr_id].gpu;
gpu->hw_errors++;
sleep(1);
}
|
ef706323ce75c826496bb8b7f086226121931f41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal d -> s
*/
#include "common_magma.h"
#include "commonblas_d.h"
/*
* Computes C = alpha*A*B + beta*C when alpha == 0 and beta == 0.
* That is, C = 0.
*/
__global__ void
dgemm_kernel_ab_0(
double* __restrict__ C,
const double* __restrict__ A,
const double* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta )
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
C += ibx + idt + __mul24(iby, ldc);
ibx = ibx + idt - m;
if ( (iby+16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
C[14*ldc] = 0;
C[15*ldc] = 0;
break;
case 15:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
C[14*ldc] = 0;
break;
case 14:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
break;
case 13:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
break;
case 12:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
break;
case 11:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
break;
case 10:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
break;
case 9:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
break;
case 8:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
break;
case 7:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
break;
case 6:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
break;
case 5:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
break;
case 4:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
break;
case 3:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
break;
case 2:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
break;
case 1:
C[ 0 ] = 0;
break;
case 0:
break;
}
}
extern "C" void
magmablas_dgemm_ab_0(
double *C, const double *A, const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
hipLaunchKernelGGL(( dgemm_kernel_ab_0), dim3(grid), dim3(threads), 0, magma_stream ,
C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
| ef706323ce75c826496bb8b7f086226121931f41.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal d -> s
*/
#include "common_magma.h"
#include "commonblas_d.h"
/*
* Computes C = alpha*A*B + beta*C when alpha == 0 and beta == 0.
* That is, C = 0.
*/
__global__ void
dgemm_kernel_ab_0(
double* __restrict__ C,
const double* __restrict__ A,
const double* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta )
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
C += ibx + idt + __mul24(iby, ldc);
ibx = ibx + idt - m;
if ( (iby+16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
C[14*ldc] = 0;
C[15*ldc] = 0;
break;
case 15:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
C[14*ldc] = 0;
break;
case 14:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
break;
case 13:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
break;
case 12:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
break;
case 11:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
break;
case 10:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
break;
case 9:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
break;
case 8:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
break;
case 7:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
break;
case 6:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
break;
case 5:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
break;
case 4:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
break;
case 3:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
break;
case 2:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
break;
case 1:
C[ 0 ] = 0;
break;
case 0:
break;
}
}
extern "C" void
magmablas_dgemm_ab_0(
double *C, const double *A, const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
dgemm_kernel_ab_0<<< grid, threads, 0, magma_stream >>>
( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
da4950eedcbbb17d6e84d0f7c9a1c2efcee95831.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath> // Without this, abs returns zero!
#include <random>
/*
#include "mddft1.fftx.codegen.hpp"
#include "imddft1.fftx.codegen.hpp"
#include "mddft2.fftx.codegen.hpp"
#include "imddft2.fftx.codegen.hpp"
*/
#include "mddft3.fftx.codegen.hpp"
#include "imddft3.fftx.codegen.hpp"
/*
#include "prdft1.fftx.codegen.hpp"
#include "iprdft1.fftx.codegen.hpp"
#include "prdft2.fftx.codegen.hpp"
#include "iprdft2.fftx.codegen.hpp"
*/
#include "prdft3.fftx.codegen.hpp"
#include "iprdft3.fftx.codegen.hpp"
#include "fftx3utilities.h"
#include "verify.h"
enum VerbosityLevel { SHOW_CATEGORIES = 1, SHOW_SUBTESTS = 2, SHOW_ROUNDS = 3};
// using namespace fftx;
std::mt19937 generator;
// unifRealDist is uniform over the reals in (-1/2, 1/2).
std::uniform_real_distribution<double> unifRealDist;
// unifInt[d] is uniform over the integers in domain.lo[d] : domain.hi[d]
std::uniform_int_distribution<int> unifInt[3];
// Return random point in domain.
template<int DIM>
fftx::point_t<DIM> unifPoint()
{
fftx::point_t<DIM> ret;
for (int d = 0; d < DIM; d++)
{
ret[d] = unifInt[d](generator);
}
return ret;
}
// Return random real number.
double unifReal()
{
return unifRealDist(generator);
}
// Return random complex number.
std::complex<double> unifComplex()
{
return std::complex<double>(unifReal(), unifReal());
}
inline void getUnifScalar(double& a_scalar)
{
a_scalar = unifReal();
}
inline void getUnifScalar(std::complex<double>& a_scalar)
{
a_scalar = unifComplex();
}
template<typename T>
inline T unifScalar()
{
T ret;
getUnifScalar(ret);
return ret;
}
template<typename T_IN, typename T_OUT>
void getUnifScalarPair(T_IN& a_scalarIn,
T_OUT& a_scalarOut);
void getUnifScalarPair(std::complex<double>& a_scalarIn,
std::complex<double>& a_scalarOut)
{
a_scalarIn = unifComplex();
a_scalarOut = a_scalarIn;
}
void getUnifScalarPair(double& a_scalarIn,
std::complex<double>& a_scalarOut)
{
a_scalarIn = unifReal();
a_scalarOut = std::complex<double>(a_scalarIn, 0.);
}
void getUnifScalarPair(std::complex<double>& a_scalarIn,
double& a_scalarOut)
{
a_scalarOut = unifReal();
a_scalarIn = std::complex<double>(a_scalarOut, 0.);
}
// Fill a_arr with real numbers distributed uniformly in (-1/2, 1/2).
template<int DIM>
void unifRealArray(fftx::array_t<DIM, double>& a_arr)
{
forall([](double(&v),
const fftx::point_t<DIM>& p)
{
v = unifReal();
}, a_arr);
}
// Fill a_arr with complex numbers with real and imaginary components distributed uniformly in (-1/2, 1/2).
template<int DIM>
void unifComplexArray(fftx::array_t<DIM, std::complex<double>>& a_arr)
{
forall([](std::complex<double>(&v),
const fftx::point_t<DIM>& p)
{
v = unifComplex();
}, a_arr);
}
template<int DIM, typename T>
void unifArray(fftx::array_t<DIM, T>& a_arr);
template<int DIM>
void unifArray(fftx::array_t<DIM, double>& a_arr)
{
unifRealArray(a_arr);
}
template<int DIM>
void unifArray(fftx::array_t<DIM, std::complex<double>>& a_arr)
{
unifComplexArray(a_arr);
}
// Set a_arr to a_scaling at point a_fixed, and 0 elsewhere.
template<int DIM, typename T>
void setUnitImpulse(fftx::array_t<DIM, T>& a_arr,
const fftx::point_t<DIM>& a_fixed,
T a_scaling = scalarVal<T>(1.) )
{
forall([a_fixed, a_scaling](T(&v),
const fftx::point_t<DIM>& p)
{
if (p == a_fixed)
{
v = a_scaling;
}
else
{
v = scalarVal<T>(0.);
}
}, a_arr);
}
// Set a_arr to product of waves from impulse at a_fixed.
template<int DIM>
void setProductWaves(fftx::array_t<DIM, std::complex<double>>& a_arr,
const fftx::point_t<DIM>& a_extent,
const fftx::point_t<DIM>& a_fixed,
int a_sign)
{
fftx::point_t<DIM> lo = a_arr.m_domain.lo;
std::complex<double> omega[DIM];
for (int d = 0; d < DIM; d++)
{
double th = (a_sign*2*(a_fixed[d] - lo[d])) * M_PI / (a_extent[d] * 1.);
omega[d] = std::complex<double>(cos(th), sin(th));
}
forall([omega, lo](std::complex<double>(&v),
const fftx::point_t<DIM>& p)
{
v = std::complex<double>(1., 0.);
for (int d = 0; d < DIM; d++)
{
v *= pow(omega[d], p[d] - lo[d]);
}
}, a_arr);
}
template<int DIM>
void setRotator(fftx::array_t<DIM, std::complex<double>>& a_arr,
const fftx::box_t<DIM>& a_dom,
int a_dim,
int a_shift)
{
fftx::point_t<DIM> lo = a_dom.lo;
fftx::point_t<DIM> hi = a_dom.hi;
fftx::point_t<DIM> fixed = lo;
if (a_shift > 0)
{
fixed[a_dim] = lo[a_dim] + a_shift;
}
else if (a_shift < 0)
{
fixed[a_dim] = hi[a_dim] - (a_shift+1);
}
// std::cout << "setRotator in " << a_dim << " shift " << a_shift
// << " waves " << fixed << " of " << a_arr.m_domain << std::endl;
setProductWaves(a_arr, a_dom.extents(), fixed, -1);
}
template<int DIM, typename T_IN, typename T_OUT>
void DFTfunctionDevice(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
fftx::array_t<DIM, T_IN>& a_input, // make this const?
fftx::array_t<DIM, T_OUT>& a_output)
{
auto inputDomain = a_input.m_domain;
auto outputDomain = a_output.m_domain;
auto input_size = inputDomain.size();
auto output_size = outputDomain.size();
auto input_bytes = input_size * sizeof(T_IN);
auto output_bytes = output_size * sizeof(T_OUT);
char* bufferPtr;
hipMalloc(&bufferPtr, input_bytes + output_bytes);
T_IN* inputPtr = (T_IN*) bufferPtr;
bufferPtr += input_bytes;
T_OUT* outputPtr = (T_OUT*) bufferPtr;
hipMemcpy(inputPtr, a_input.m_data.local(), input_bytes,
hipMemcpyHostToDevice);
fftx::array_t<DIM, T_IN> inputDevice(fftx::global_ptr<T_IN>
(inputPtr, 0, 1), inputDomain);
fftx::array_t<DIM, T_OUT> outputDevice(fftx::global_ptr<T_OUT>
(outputPtr, 0, 1), outputDomain);
a_dftFunction(inputDevice, outputDevice);
hipMemcpy(a_output.m_data.local(), outputPtr, output_bytes,
hipMemcpyDeviceToHost);
}
template<int DIM, typename T_IN, typename T_OUT>
double test1DFTfunction(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
fftx::box_t<DIM> a_inDomain,
fftx::box_t<DIM> a_outDomain,
int a_rounds,
int a_verbosity)
{
fftx::array_t<DIM, T_IN> inA(a_inDomain);
fftx::array_t<DIM, T_IN> inB(a_inDomain);
fftx::array_t<DIM, T_IN> LCin(a_inDomain);
fftx::array_t<DIM, T_OUT> outA(a_outDomain);
fftx::array_t<DIM, T_OUT> outB(a_outDomain);
fftx::array_t<DIM, T_OUT> LCout(a_outDomain);
fftx::array_t<DIM, T_OUT> outLCin(a_outDomain);
double errtest1 = 0.;
for (int itn = 1; itn <= a_rounds; itn++)
{
T_IN alphaIn, betaIn;
T_OUT alphaOut, betaOut;
getUnifScalarPair(alphaIn, alphaOut);
getUnifScalarPair(betaIn, betaOut);
unifArray(inA);
unifArray(inB);
sumArrays(LCin, inA, inB, alphaIn, betaIn);
DFTfunctionDevice(a_dftFunction, inA, outA);
DFTfunctionDevice(a_dftFunction, inB, outB);
sumArrays(LCout, outA, outB, alphaOut, betaOut);
DFTfunctionDevice(a_dftFunction, LCin, outLCin);
double err = absMaxDiffArray(outLCin, LCout);
updateMax(errtest1, err);
if (a_verbosity >= SHOW_ROUNDS)
{
printf("%dD linearity test round %d max error %11.5e\n", DIM, itn, err);
}
}
if (a_verbosity >= SHOW_CATEGORIES)
{
printf("%dD Test 1 (linearity) in %d rounds: max error %11.5e\n", DIM, a_rounds, errtest1);
}
return errtest1;
}
template<int DIM, typename T_IN, typename T_OUT>
double test2impulse1(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
fftx::box_t<DIM> a_inDomain,
fftx::box_t<DIM> a_outDomain,
int a_verbosity)
{ // Unit impulse at low corner.
fftx::array_t<DIM, T_IN> inImpulse(a_inDomain);
fftx::array_t<DIM, T_OUT> outImpulse(a_outDomain);
fftx::array_t<DIM, T_OUT> all1out(a_outDomain);
setUnitImpulse(inImpulse, a_inDomain.lo);
setConstant(all1out, scalarVal<T_OUT>(1.));
DFTfunctionDevice(a_dftFunction, inImpulse, outImpulse);
double errtest2impulse1 = absMaxDiffArray(outImpulse, all1out);
if (a_verbosity >= SHOW_SUBTESTS)
{
printf("%dD unit impulse low corner test: max error %11.5e\n", DIM, errtest2impulse1);
}
return errtest2impulse1;
}
template<int DIM, typename T_IN, typename T_OUT>
double test2impulsePlus(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
fftx::box_t<DIM> a_inDomain,
fftx::box_t<DIM> a_outDomain,
int a_rounds,
int a_verbosity)
{ // Unit impulse at low corner.
fftx::array_t<DIM, T_IN> inImpulse(a_inDomain);
fftx::array_t<DIM, T_OUT> outImpulse(a_outDomain);
fftx::array_t<DIM, T_OUT> all1out(a_outDomain);
setUnitImpulse(inImpulse, a_inDomain.lo);
setConstant(all1out, scalarVal<T_OUT>(1.));
DFTfunctionDevice(a_dftFunction, inImpulse, outImpulse);
fftx::array_t<DIM, T_IN> inRand(a_inDomain);
fftx::array_t<DIM, T_IN> inImpulseMinusRand(a_inDomain);
fftx::array_t<DIM, T_OUT> outRand(a_outDomain);
fftx::array_t<DIM, T_OUT> outImpulseMinusRand(a_outDomain);
fftx::array_t<DIM, T_OUT> mysum(a_outDomain);
// Check that for random arrays inRand,
// fft(inRand) + fft(inImpulse - inRand) = fft(inImpulse) = all1out.
double errtest2impulsePlus = 0.;
for (int itn = 1; itn <= a_rounds; itn++)
{
unifArray(inRand);
DFTfunctionDevice(a_dftFunction, inRand, outRand);
diffArrays(inImpulseMinusRand, inImpulse, inRand);
DFTfunctionDevice(a_dftFunction, inImpulseMinusRand, outImpulseMinusRand);
sumArrays(mysum, outRand, outImpulseMinusRand);
double err = absMaxDiffArray(mysum, all1out);
updateMax(errtest2impulsePlus, err);
if (a_verbosity >= SHOW_ROUNDS)
{
printf("%dD random + unit impulse low corner test round %d max error %11.5e\n", DIM, itn, err);
}
}
if (a_verbosity >= SHOW_SUBTESTS)
{
printf("%dD unit impulse low corner test in %d rounds: max error %11.5e\n", DIM, a_rounds, errtest2impulsePlus);
}
return errtest2impulsePlus;
}
template<int DIM, typename T_IN, typename T_OUT>
double test2constant(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
const fftx::point_t<DIM>& a_fullExtents,
int a_verbosity)
{ // Check that constant maps back to unit impulse at low corner.
fftx::array_t<DIM, T_IN> all1in(a_inDomain);
setConstant(all1in, scalarVal<T_IN>(1.));
fftx::array_t<DIM, T_OUT> magImpulse(a_outDomain);
size_t npts = 1;
for (int d = 0; d < DIM; d++)
{
npts *= a_fullExtents[d];
}
T_OUT mag = scalarVal<T_OUT>(npts * 1.);
setUnitImpulse(magImpulse, a_outDomain.lo, mag);
fftx::array_t<DIM, T_OUT> outImpulse(a_outDomain);
DFTfunctionDevice(a_dftFunction, all1in, outImpulse);
double errtest2constant = absMaxDiffArray(outImpulse, magImpulse);
if (a_verbosity >= SHOW_SUBTESTS)
{
printf("%dD constant test: max error %11.5e\n", DIM, errtest2constant);
}
return errtest2constant;
}
template<int DIM, typename T_IN, typename T_OUT>
double test2constantPlus(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
const fftx::point_t<DIM>& a_fullExtents,
int a_rounds,
int a_verbosity)
{
fftx::array_t<DIM, T_IN> all1in(a_inDomain);
setConstant(all1in, scalarVal<T_IN>(1.));
fftx::array_t<DIM, T_OUT> magImpulse(a_outDomain);
size_t npts = 1;
for (int d = 0; d < DIM; d++)
{
npts *= a_fullExtents[d];
}
T_OUT mag = scalarVal<T_OUT>(npts * 1.);
setUnitImpulse(magImpulse, a_outDomain.lo, mag);
fftx::array_t<DIM, T_IN> inRand(a_inDomain);
fftx::array_t<DIM, T_IN> inConstantMinusRand(a_inDomain);
fftx::array_t<DIM, T_OUT> outRand(a_outDomain);
fftx::array_t<DIM, T_OUT> outConstantMinusRand(a_outDomain);
fftx::array_t<DIM, T_OUT> outSum(a_outDomain);
// Check that for random arrays inRand,
// fft(inRand) + fft(all1 - inRand) = fft(all1) = magImpulse.
double errtest2constantPlus = 0.;
for (int itn = 1; itn <= a_rounds; itn++)
{
unifArray(inRand);
DFTfunctionDevice(a_dftFunction, inRand, outRand);
diffArrays(inConstantMinusRand, all1in, inRand);
DFTfunctionDevice(a_dftFunction, inConstantMinusRand, outConstantMinusRand);
sumArrays(outSum, outRand, outConstantMinusRand);
double err = absMaxDiffArray(outSum, magImpulse);
updateMax(errtest2constantPlus, err);
if (a_verbosity >= SHOW_ROUNDS)
{
printf("%dD random + constant test round %d max error %11.5e\n", DIM, itn, err);
}
}
if (a_verbosity >= SHOW_SUBTESTS)
{
printf("%dD random + constant test in %d rounds: max error %11.5e\n", DIM, a_rounds, errtest2constantPlus);
}
return errtest2constantPlus;
}
template<int DIM, typename T_IN, typename T_OUT>
double test2impulseRandom(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
int a_sign,
int a_rounds,
int a_verbosity)
{
return 0.;
}
template<int DIM, typename T_IN>
double test2impulseRandom(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, std::complex<double>>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
int a_sign,
int a_rounds,
int a_verbosity)
{
// Check unit impulse at random position.
fftx::array_t<DIM, T_IN> inImpulse(a_inDomain);
fftx::array_t<DIM, std::complex<double>> outImpulse(a_outDomain);
fftx::array_t<DIM, std::complex<double>> outCheck(a_outDomain);
double errtest2impulseRandom = 0.;
fftx::point_t<DIM> fullExtents = a_inDomain.extents();
for (int itn = 1; itn <= a_rounds; itn++)
{
fftx::point_t<DIM> rpoint = unifPoint<DIM>();
setUnitImpulse(inImpulse, rpoint);
DFTfunctionDevice(a_dftFunction, inImpulse, outImpulse);
// Recall a_inDomain is whole domain, but a_outDomain may be truncated;
// waves defined on a_outDomain, but based on the full a_inDomain extents.
setProductWaves(outCheck, fullExtents, rpoint, a_sign);
double err = absMaxDiffArray(outImpulse, outCheck);
updateMax(errtest2impulseRandom, err);
if (a_verbosity >= SHOW_ROUNDS)
{
printf("%dD random impulse test round %d max error %11.5e\n", DIM, itn, err);
}
}
return errtest2impulseRandom;
}
template<int DIM, typename T_IN, typename T_OUT>
double test2DFTfunction(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
const fftx::point_t<DIM>& a_fullExtents,
int a_sign,
int a_rounds,
int a_verbosity)
{
double errtest2 = 0.;
updateMax(errtest2,
test2impulse1(a_dftFunction, a_inDomain, a_outDomain,
a_verbosity));
updateMax(errtest2,
test2impulsePlus(a_dftFunction, a_inDomain, a_outDomain,
a_rounds, a_verbosity));
updateMax(errtest2,
test2constant(a_dftFunction, a_inDomain, a_outDomain,
a_fullExtents, a_verbosity));
updateMax(errtest2,
test2constantPlus(a_dftFunction, a_inDomain, a_outDomain,
a_fullExtents, a_rounds, a_verbosity));
updateMax(errtest2,
test2impulseRandom(a_dftFunction, a_inDomain, a_outDomain,
a_sign, a_rounds, a_verbosity));
if (a_verbosity >= SHOW_CATEGORIES)
{
printf("%dD Test 2 (impulses) in %d rounds: max error %11.5e\n", DIM, a_rounds, errtest2);
}
return errtest2;
}
template<int DIM, typename T_IN, typename T_OUT>
double test3time(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
int a_sign,
int a_rounds,
int a_verbosity)
{
return 0.;
}
template<int DIM, typename T_IN>
double test3time(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, std::complex<double>>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
int a_sign,
int a_rounds,
int a_verbosity)
{
fftx::array_t<DIM, T_IN> inRand(a_inDomain);
fftx::array_t<DIM, T_IN> inRandRot(a_inDomain);
fftx::array_t<DIM, std::complex<double>> outRand(a_outDomain);
fftx::array_t<DIM, std::complex<double>> outRandRot(a_outDomain);
fftx::array_t<DIM, std::complex<double>> rotator(a_outDomain);
fftx::array_t<DIM, std::complex<double>> outRandRotMult(a_outDomain);
double errtest3timeDim[DIM];
double errtest3time = 0.;
for (int d = 0; d < DIM; d++)
{
errtest3timeDim[d] = 0.;
setRotator(rotator, a_inDomain, d, -a_sign); // +1 for MDDFT, -1 for IMDDFT, -1 for PRDFT
for (int itn = 1; itn <= a_rounds; itn++)
{
unifArray(inRand);
// time-shift test in dimension d
rotate(inRandRot, inRand, d, 1); // +1 for MDDFT, +1 for IMDDFT, +1 for PRDFT
DFTfunctionDevice(a_dftFunction, inRand, outRand);
DFTfunctionDevice(a_dftFunction, inRandRot, outRandRot);
productArrays(outRandRotMult, outRandRot, rotator);
double err = absMaxDiffArray(outRandRotMult, outRand);
updateMax(errtest3timeDim[d], err);
updateMax(errtest3time, errtest3timeDim[d]);
if (a_verbosity >= SHOW_ROUNDS)
{
printf("%dD dim %d time-shift test %d max error %11.5e\n", DIM, d, itn, err);
}
}
if (a_verbosity >= SHOW_SUBTESTS)
{
printf("%dD dim %d time-shift test in %d rounds: max error %11.5e\n", DIM, d, a_rounds, errtest3timeDim[d]);
}
}
return errtest3time;
}
template<int DIM, typename T_IN, typename T_OUT>
double test3frequency(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
int a_sign,
int a_rounds,
int a_verbosity)
{
return 0.;
}
template<int DIM, typename T_OUT>
double test3frequency(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, std::complex<double>>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
int a_sign,
int a_rounds,
int a_verbosity)
{
fftx::array_t<DIM, std::complex<double>> inRand(a_inDomain);
fftx::array_t<DIM, std::complex<double>> inRandMult(a_inDomain);
fftx::array_t<DIM, T_OUT> outRand(a_outDomain);
fftx::array_t<DIM, T_OUT> outRandMult(a_outDomain);
fftx::array_t<DIM, std::complex<double>> rotatorUp(a_inDomain);
fftx::array_t<DIM, T_OUT> outRandMultRot(a_outDomain);
double errtest3frequencyDim[DIM];
double errtest3frequency = 0.;
for (int d = 0; d < DIM; d++)
{
// frequency-shift test in dimension d
errtest3frequencyDim[d] = 0.;
// Recall a_outDomain is whole domain, but a_inDomain may be truncated;
// rotatorUp is defined on a_inDomain, but based on the full a_outDomain.
setRotator(rotatorUp, a_outDomain, d, 1);
for (int itn = 1; itn <= a_rounds; itn++)
{
unifComplexArray(inRand);
productArrays(inRandMult, inRand, rotatorUp);
DFTfunctionDevice(a_dftFunction, inRand, outRand);
DFTfunctionDevice(a_dftFunction, inRandMult, outRandMult);
rotate(outRandMultRot, outRandMult, d, a_sign);
double err = absMaxDiffArray(outRandMultRot, outRand);
updateMax(errtest3frequencyDim[d], err);
updateMax(errtest3frequency, errtest3frequencyDim[d]);
if (a_verbosity >= SHOW_ROUNDS)
{
printf("%dD dim %d frequency-shift test %d max error %11.5e\n", DIM, d, itn, err);
}
}
if (a_verbosity >= SHOW_SUBTESTS)
{
printf("%dD dim %d frequency-shift test in %d rounds: max error %11.5e\n", DIM, d, a_rounds, errtest3frequencyDim[d]);
}
}
return errtest3frequency;
}
template<int DIM, typename T_IN, typename T_OUT>
double test3DFTfunction(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
int a_sign,
int a_rounds,
int a_verbosity)
{
double errtest3 = 0.;
updateMax(errtest3,
test3time(a_dftFunction, a_inDomain, a_outDomain,
a_sign, a_rounds, a_verbosity));
updateMax(errtest3,
test3frequency(a_dftFunction, a_inDomain, a_outDomain,
a_sign, a_rounds, a_verbosity));
if (a_verbosity >= SHOW_CATEGORIES)
{
printf("%dD Test 3 (shifts) in %d rounds: max error %11.5e\n", DIM, a_rounds, errtest3);
}
return errtest3;
}
template<int DIM, typename T_IN, typename T_OUT>
void verifyDFTfunction(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
const fftx::point_t<DIM>& a_fullExtents,
int a_sign,
int a_rounds,
int a_verbosity)
{
double err = 0.;
updateMax(err,
test1DFTfunction(a_dftFunction, a_inDomain, a_outDomain,
a_rounds, a_verbosity));
updateMax(err,
test2DFTfunction(a_dftFunction, a_inDomain, a_outDomain,
a_fullExtents, a_sign, a_rounds, a_verbosity));
updateMax(err,
test3DFTfunction(a_dftFunction, a_inDomain, a_outDomain,
a_sign, a_rounds, a_verbosity));
printf("%dD test in %d rounds max error %11.5e\n", DIM, a_rounds, err);
}
template<int DIM>
void verifyDimension(fftx::box_t<DIM> a_domain,
fftx::box_t<DIM> a_fdomain,
int a_rounds,
fftx::handle_t (a_mddft)
(fftx::array_t<DIM, std::complex<double>>&,
fftx::array_t<DIM, std::complex<double>>&),
fftx::handle_t (a_imddft)
(fftx::array_t<DIM, std::complex<double>>&,
fftx::array_t<DIM, std::complex<double>>&),
fftx::handle_t (a_prdft)
(fftx::array_t<DIM, double>&,
fftx::array_t<DIM, std::complex<double>>&),
fftx::handle_t (a_iprdft)
(fftx::array_t<DIM, std::complex<double>>&,
fftx::array_t<DIM, double>&),
int a_verbosity)
{
// std::cout << "*******************************************" << std::endl;
// std::cout << DIM << "D domain = " << verify::domain1 << std::endl;
// std::cout << "*******************************************" << std::endl;
fftx::point_t<DIM> fullextents = a_domain.extents();
std::cout << "***** test " << DIM << "D MDDFT on complex "
<< a_domain << std::endl;
verifyDFTfunction(a_mddft, a_domain, a_domain, fullextents, -1, a_rounds, a_verbosity);
std::cout << "***** test " << DIM << "D IMDDFT on complex "
<< a_domain << std::endl;
verifyDFTfunction(a_imddft, a_domain, a_domain, fullextents, 1, a_rounds, a_verbosity);
std::cout << "***** test " << DIM << "D PRDFT from real "
<< a_domain << " to complex " << a_fdomain << std::endl;
verifyDFTfunction(a_prdft, a_domain, a_fdomain, fullextents, -1, a_rounds, a_verbosity);
std::cout << "***** test " << DIM << "D IPRDFT from complex "
<< a_fdomain << " to real " << a_domain << std::endl;
verifyDFTfunction(a_iprdft, a_fdomain, a_domain, fullextents, 1, a_rounds, a_verbosity);
}
int main(int argc, char* argv[])
{
// { SHOW_CATEGORIES = 1, SHOW_SUBTESTS = 2, SHOW_ROUNDS = 3};
printf("Usage: %s [verbosity=0] [rounds=20]\n", argv[0]);
int verbosity = 0;
int rounds = 20;
if (argc > 1)
{
verbosity = atoi(argv[1]);
if (argc > 2)
{
rounds = atoi(argv[2]);
}
}
printf("Running with verbosity %d, random %d rounds\n", verbosity, rounds);
/*
Set up random number generator.
*/
std::random_device rd;
generator = std::mt19937(rd());
unifRealDist = std::uniform_real_distribution<double>(-0.5, 0.5);
for (int d = 0; d < 3; d++)
{
unifInt[d] = std::uniform_int_distribution<int>(verify::domain3.lo[d],
verify::domain3.hi[d]);
}
/*
// printf("Call mddft1::init()\n");
mddft1::init();
// printf("Call imddft1::init()\n");
imddft1::init();
// printf("Call prdft1::init()\n");
prdft1::init();
// printf("Call iprdft1::init()\n");
iprdft1::init();
verifyDimension(verify::domain1, verify::fdomain1, rounds,
mddft1::transform, imddft1::transform,
prdft1::transform, iprdft1::transform,
verbosity);
mddft1::destroy();
imddft1::destroy();
prdft1::destroy();
iprdft1::destroy();
*/
/*
// printf("Call mddft2::init()\n");
mddft2::init();
// printf("Call imddft2::init()\n");
imddft2::init();
// printf("Call prdft2::init()\n");
prdft2::init();
// printf("Call iprdft2::init()\n");
iprdft2::init();
verifyDimension(verify::domain2, verify::fdomain2, rounds,
mddft2::transform, imddft2::transform,
prdft2::transform, iprdft2::transform,
verbosity);
mddft2::destroy();
imddft2::destroy();
prdft2::destroy();
iprdft2::destroy();
*/
// printf("Call mddft3::init()\n");
mddft3::init();
// printf("Call imddft3::init()\n");
imddft3::init();
// printf("Call prdft3::init()\n");
prdft3::init();
// printf("Call iprdft3::init()\n");
iprdft3::init();
verifyDimension(verify::domain3, verify::fdomain3, rounds,
mddft3::transform, imddft3::transform,
prdft3::transform, iprdft3::transform,
verbosity);
mddft3::destroy();
imddft3::destroy();
prdft3::destroy();
iprdft3::destroy();
printf("%s: All done, exiting\n", argv[0]);
return 0;
}
| da4950eedcbbb17d6e84d0f7c9a1c2efcee95831.cu | #include <cmath> // Without this, abs returns zero!
#include <random>
/*
#include "mddft1.fftx.codegen.hpp"
#include "imddft1.fftx.codegen.hpp"
#include "mddft2.fftx.codegen.hpp"
#include "imddft2.fftx.codegen.hpp"
*/
#include "mddft3.fftx.codegen.hpp"
#include "imddft3.fftx.codegen.hpp"
/*
#include "prdft1.fftx.codegen.hpp"
#include "iprdft1.fftx.codegen.hpp"
#include "prdft2.fftx.codegen.hpp"
#include "iprdft2.fftx.codegen.hpp"
*/
#include "prdft3.fftx.codegen.hpp"
#include "iprdft3.fftx.codegen.hpp"
#include "fftx3utilities.h"
#include "verify.h"
enum VerbosityLevel { SHOW_CATEGORIES = 1, SHOW_SUBTESTS = 2, SHOW_ROUNDS = 3};
// using namespace fftx;
std::mt19937 generator;
// unifRealDist is uniform over the reals in (-1/2, 1/2).
std::uniform_real_distribution<double> unifRealDist;
// unifInt[d] is uniform over the integers in domain.lo[d] : domain.hi[d]
std::uniform_int_distribution<int> unifInt[3];
// Return random point in domain.
template<int DIM>
fftx::point_t<DIM> unifPoint()
{
fftx::point_t<DIM> ret;
for (int d = 0; d < DIM; d++)
{
ret[d] = unifInt[d](generator);
}
return ret;
}
// Return random real number.
double unifReal()
{
return unifRealDist(generator);
}
// Return random complex number.
std::complex<double> unifComplex()
{
return std::complex<double>(unifReal(), unifReal());
}
inline void getUnifScalar(double& a_scalar)
{
a_scalar = unifReal();
}
inline void getUnifScalar(std::complex<double>& a_scalar)
{
a_scalar = unifComplex();
}
template<typename T>
inline T unifScalar()
{
T ret;
getUnifScalar(ret);
return ret;
}
template<typename T_IN, typename T_OUT>
void getUnifScalarPair(T_IN& a_scalarIn,
T_OUT& a_scalarOut);
void getUnifScalarPair(std::complex<double>& a_scalarIn,
std::complex<double>& a_scalarOut)
{
a_scalarIn = unifComplex();
a_scalarOut = a_scalarIn;
}
void getUnifScalarPair(double& a_scalarIn,
std::complex<double>& a_scalarOut)
{
a_scalarIn = unifReal();
a_scalarOut = std::complex<double>(a_scalarIn, 0.);
}
void getUnifScalarPair(std::complex<double>& a_scalarIn,
double& a_scalarOut)
{
a_scalarOut = unifReal();
a_scalarIn = std::complex<double>(a_scalarOut, 0.);
}
// Fill a_arr with real numbers distributed uniformly in (-1/2, 1/2).
template<int DIM>
void unifRealArray(fftx::array_t<DIM, double>& a_arr)
{
forall([](double(&v),
const fftx::point_t<DIM>& p)
{
v = unifReal();
}, a_arr);
}
// Fill a_arr with complex numbers with real and imaginary components distributed uniformly in (-1/2, 1/2).
template<int DIM>
void unifComplexArray(fftx::array_t<DIM, std::complex<double>>& a_arr)
{
forall([](std::complex<double>(&v),
const fftx::point_t<DIM>& p)
{
v = unifComplex();
}, a_arr);
}
template<int DIM, typename T>
void unifArray(fftx::array_t<DIM, T>& a_arr);
template<int DIM>
void unifArray(fftx::array_t<DIM, double>& a_arr)
{
unifRealArray(a_arr);
}
template<int DIM>
void unifArray(fftx::array_t<DIM, std::complex<double>>& a_arr)
{
unifComplexArray(a_arr);
}
// Set a_arr to a_scaling at point a_fixed, and 0 elsewhere.
template<int DIM, typename T>
void setUnitImpulse(fftx::array_t<DIM, T>& a_arr,
const fftx::point_t<DIM>& a_fixed,
T a_scaling = scalarVal<T>(1.) )
{
forall([a_fixed, a_scaling](T(&v),
const fftx::point_t<DIM>& p)
{
if (p == a_fixed)
{
v = a_scaling;
}
else
{
v = scalarVal<T>(0.);
}
}, a_arr);
}
// Set a_arr to product of waves from impulse at a_fixed.
template<int DIM>
void setProductWaves(fftx::array_t<DIM, std::complex<double>>& a_arr,
const fftx::point_t<DIM>& a_extent,
const fftx::point_t<DIM>& a_fixed,
int a_sign)
{
fftx::point_t<DIM> lo = a_arr.m_domain.lo;
std::complex<double> omega[DIM];
for (int d = 0; d < DIM; d++)
{
double th = (a_sign*2*(a_fixed[d] - lo[d])) * M_PI / (a_extent[d] * 1.);
omega[d] = std::complex<double>(cos(th), sin(th));
}
forall([omega, lo](std::complex<double>(&v),
const fftx::point_t<DIM>& p)
{
v = std::complex<double>(1., 0.);
for (int d = 0; d < DIM; d++)
{
v *= pow(omega[d], p[d] - lo[d]);
}
}, a_arr);
}
template<int DIM>
void setRotator(fftx::array_t<DIM, std::complex<double>>& a_arr,
const fftx::box_t<DIM>& a_dom,
int a_dim,
int a_shift)
{
fftx::point_t<DIM> lo = a_dom.lo;
fftx::point_t<DIM> hi = a_dom.hi;
fftx::point_t<DIM> fixed = lo;
if (a_shift > 0)
{
fixed[a_dim] = lo[a_dim] + a_shift;
}
else if (a_shift < 0)
{
fixed[a_dim] = hi[a_dim] - (a_shift+1);
}
// std::cout << "setRotator in " << a_dim << " shift " << a_shift
// << " waves " << fixed << " of " << a_arr.m_domain << std::endl;
setProductWaves(a_arr, a_dom.extents(), fixed, -1);
}
template<int DIM, typename T_IN, typename T_OUT>
void DFTfunctionDevice(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
fftx::array_t<DIM, T_IN>& a_input, // make this const?
fftx::array_t<DIM, T_OUT>& a_output)
{
auto inputDomain = a_input.m_domain;
auto outputDomain = a_output.m_domain;
auto input_size = inputDomain.size();
auto output_size = outputDomain.size();
auto input_bytes = input_size * sizeof(T_IN);
auto output_bytes = output_size * sizeof(T_OUT);
char* bufferPtr;
cudaMalloc(&bufferPtr, input_bytes + output_bytes);
T_IN* inputPtr = (T_IN*) bufferPtr;
bufferPtr += input_bytes;
T_OUT* outputPtr = (T_OUT*) bufferPtr;
cudaMemcpy(inputPtr, a_input.m_data.local(), input_bytes,
cudaMemcpyHostToDevice);
fftx::array_t<DIM, T_IN> inputDevice(fftx::global_ptr<T_IN>
(inputPtr, 0, 1), inputDomain);
fftx::array_t<DIM, T_OUT> outputDevice(fftx::global_ptr<T_OUT>
(outputPtr, 0, 1), outputDomain);
a_dftFunction(inputDevice, outputDevice);
cudaMemcpy(a_output.m_data.local(), outputPtr, output_bytes,
cudaMemcpyDeviceToHost);
}
template<int DIM, typename T_IN, typename T_OUT>
double test1DFTfunction(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
fftx::box_t<DIM> a_inDomain,
fftx::box_t<DIM> a_outDomain,
int a_rounds,
int a_verbosity)
{
fftx::array_t<DIM, T_IN> inA(a_inDomain);
fftx::array_t<DIM, T_IN> inB(a_inDomain);
fftx::array_t<DIM, T_IN> LCin(a_inDomain);
fftx::array_t<DIM, T_OUT> outA(a_outDomain);
fftx::array_t<DIM, T_OUT> outB(a_outDomain);
fftx::array_t<DIM, T_OUT> LCout(a_outDomain);
fftx::array_t<DIM, T_OUT> outLCin(a_outDomain);
double errtest1 = 0.;
for (int itn = 1; itn <= a_rounds; itn++)
{
T_IN alphaIn, betaIn;
T_OUT alphaOut, betaOut;
getUnifScalarPair(alphaIn, alphaOut);
getUnifScalarPair(betaIn, betaOut);
unifArray(inA);
unifArray(inB);
sumArrays(LCin, inA, inB, alphaIn, betaIn);
DFTfunctionDevice(a_dftFunction, inA, outA);
DFTfunctionDevice(a_dftFunction, inB, outB);
sumArrays(LCout, outA, outB, alphaOut, betaOut);
DFTfunctionDevice(a_dftFunction, LCin, outLCin);
double err = absMaxDiffArray(outLCin, LCout);
updateMax(errtest1, err);
if (a_verbosity >= SHOW_ROUNDS)
{
printf("%dD linearity test round %d max error %11.5e\n", DIM, itn, err);
}
}
if (a_verbosity >= SHOW_CATEGORIES)
{
printf("%dD Test 1 (linearity) in %d rounds: max error %11.5e\n", DIM, a_rounds, errtest1);
}
return errtest1;
}
template<int DIM, typename T_IN, typename T_OUT>
double test2impulse1(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
fftx::box_t<DIM> a_inDomain,
fftx::box_t<DIM> a_outDomain,
int a_verbosity)
{ // Unit impulse at low corner.
fftx::array_t<DIM, T_IN> inImpulse(a_inDomain);
fftx::array_t<DIM, T_OUT> outImpulse(a_outDomain);
fftx::array_t<DIM, T_OUT> all1out(a_outDomain);
setUnitImpulse(inImpulse, a_inDomain.lo);
setConstant(all1out, scalarVal<T_OUT>(1.));
DFTfunctionDevice(a_dftFunction, inImpulse, outImpulse);
double errtest2impulse1 = absMaxDiffArray(outImpulse, all1out);
if (a_verbosity >= SHOW_SUBTESTS)
{
printf("%dD unit impulse low corner test: max error %11.5e\n", DIM, errtest2impulse1);
}
return errtest2impulse1;
}
template<int DIM, typename T_IN, typename T_OUT>
double test2impulsePlus(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
fftx::box_t<DIM> a_inDomain,
fftx::box_t<DIM> a_outDomain,
int a_rounds,
int a_verbosity)
{ // Unit impulse at low corner.
fftx::array_t<DIM, T_IN> inImpulse(a_inDomain);
fftx::array_t<DIM, T_OUT> outImpulse(a_outDomain);
fftx::array_t<DIM, T_OUT> all1out(a_outDomain);
setUnitImpulse(inImpulse, a_inDomain.lo);
setConstant(all1out, scalarVal<T_OUT>(1.));
DFTfunctionDevice(a_dftFunction, inImpulse, outImpulse);
fftx::array_t<DIM, T_IN> inRand(a_inDomain);
fftx::array_t<DIM, T_IN> inImpulseMinusRand(a_inDomain);
fftx::array_t<DIM, T_OUT> outRand(a_outDomain);
fftx::array_t<DIM, T_OUT> outImpulseMinusRand(a_outDomain);
fftx::array_t<DIM, T_OUT> mysum(a_outDomain);
// Check that for random arrays inRand,
// fft(inRand) + fft(inImpulse - inRand) = fft(inImpulse) = all1out.
double errtest2impulsePlus = 0.;
for (int itn = 1; itn <= a_rounds; itn++)
{
unifArray(inRand);
DFTfunctionDevice(a_dftFunction, inRand, outRand);
diffArrays(inImpulseMinusRand, inImpulse, inRand);
DFTfunctionDevice(a_dftFunction, inImpulseMinusRand, outImpulseMinusRand);
sumArrays(mysum, outRand, outImpulseMinusRand);
double err = absMaxDiffArray(mysum, all1out);
updateMax(errtest2impulsePlus, err);
if (a_verbosity >= SHOW_ROUNDS)
{
printf("%dD random + unit impulse low corner test round %d max error %11.5e\n", DIM, itn, err);
}
}
if (a_verbosity >= SHOW_SUBTESTS)
{
printf("%dD unit impulse low corner test in %d rounds: max error %11.5e\n", DIM, a_rounds, errtest2impulsePlus);
}
return errtest2impulsePlus;
}
template<int DIM, typename T_IN, typename T_OUT>
double test2constant(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
const fftx::point_t<DIM>& a_fullExtents,
int a_verbosity)
{ // Check that constant maps back to unit impulse at low corner.
fftx::array_t<DIM, T_IN> all1in(a_inDomain);
setConstant(all1in, scalarVal<T_IN>(1.));
fftx::array_t<DIM, T_OUT> magImpulse(a_outDomain);
size_t npts = 1;
for (int d = 0; d < DIM; d++)
{
npts *= a_fullExtents[d];
}
T_OUT mag = scalarVal<T_OUT>(npts * 1.);
setUnitImpulse(magImpulse, a_outDomain.lo, mag);
fftx::array_t<DIM, T_OUT> outImpulse(a_outDomain);
DFTfunctionDevice(a_dftFunction, all1in, outImpulse);
double errtest2constant = absMaxDiffArray(outImpulse, magImpulse);
if (a_verbosity >= SHOW_SUBTESTS)
{
printf("%dD constant test: max error %11.5e\n", DIM, errtest2constant);
}
return errtest2constant;
}
template<int DIM, typename T_IN, typename T_OUT>
double test2constantPlus(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
const fftx::point_t<DIM>& a_fullExtents,
int a_rounds,
int a_verbosity)
{
fftx::array_t<DIM, T_IN> all1in(a_inDomain);
setConstant(all1in, scalarVal<T_IN>(1.));
fftx::array_t<DIM, T_OUT> magImpulse(a_outDomain);
size_t npts = 1;
for (int d = 0; d < DIM; d++)
{
npts *= a_fullExtents[d];
}
T_OUT mag = scalarVal<T_OUT>(npts * 1.);
setUnitImpulse(magImpulse, a_outDomain.lo, mag);
fftx::array_t<DIM, T_IN> inRand(a_inDomain);
fftx::array_t<DIM, T_IN> inConstantMinusRand(a_inDomain);
fftx::array_t<DIM, T_OUT> outRand(a_outDomain);
fftx::array_t<DIM, T_OUT> outConstantMinusRand(a_outDomain);
fftx::array_t<DIM, T_OUT> outSum(a_outDomain);
// Check that for random arrays inRand,
// fft(inRand) + fft(all1 - inRand) = fft(all1) = magImpulse.
double errtest2constantPlus = 0.;
for (int itn = 1; itn <= a_rounds; itn++)
{
unifArray(inRand);
DFTfunctionDevice(a_dftFunction, inRand, outRand);
diffArrays(inConstantMinusRand, all1in, inRand);
DFTfunctionDevice(a_dftFunction, inConstantMinusRand, outConstantMinusRand);
sumArrays(outSum, outRand, outConstantMinusRand);
double err = absMaxDiffArray(outSum, magImpulse);
updateMax(errtest2constantPlus, err);
if (a_verbosity >= SHOW_ROUNDS)
{
printf("%dD random + constant test round %d max error %11.5e\n", DIM, itn, err);
}
}
if (a_verbosity >= SHOW_SUBTESTS)
{
printf("%dD random + constant test in %d rounds: max error %11.5e\n", DIM, a_rounds, errtest2constantPlus);
}
return errtest2constantPlus;
}
template<int DIM, typename T_IN, typename T_OUT>
double test2impulseRandom(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
int a_sign,
int a_rounds,
int a_verbosity)
{
return 0.;
}
template<int DIM, typename T_IN>
double test2impulseRandom(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, std::complex<double>>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
int a_sign,
int a_rounds,
int a_verbosity)
{
// Check unit impulse at random position.
fftx::array_t<DIM, T_IN> inImpulse(a_inDomain);
fftx::array_t<DIM, std::complex<double>> outImpulse(a_outDomain);
fftx::array_t<DIM, std::complex<double>> outCheck(a_outDomain);
double errtest2impulseRandom = 0.;
fftx::point_t<DIM> fullExtents = a_inDomain.extents();
for (int itn = 1; itn <= a_rounds; itn++)
{
fftx::point_t<DIM> rpoint = unifPoint<DIM>();
setUnitImpulse(inImpulse, rpoint);
DFTfunctionDevice(a_dftFunction, inImpulse, outImpulse);
// Recall a_inDomain is whole domain, but a_outDomain may be truncated;
// waves defined on a_outDomain, but based on the full a_inDomain extents.
setProductWaves(outCheck, fullExtents, rpoint, a_sign);
double err = absMaxDiffArray(outImpulse, outCheck);
updateMax(errtest2impulseRandom, err);
if (a_verbosity >= SHOW_ROUNDS)
{
printf("%dD random impulse test round %d max error %11.5e\n", DIM, itn, err);
}
}
return errtest2impulseRandom;
}
template<int DIM, typename T_IN, typename T_OUT>
double test2DFTfunction(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
const fftx::point_t<DIM>& a_fullExtents,
int a_sign,
int a_rounds,
int a_verbosity)
{
double errtest2 = 0.;
updateMax(errtest2,
test2impulse1(a_dftFunction, a_inDomain, a_outDomain,
a_verbosity));
updateMax(errtest2,
test2impulsePlus(a_dftFunction, a_inDomain, a_outDomain,
a_rounds, a_verbosity));
updateMax(errtest2,
test2constant(a_dftFunction, a_inDomain, a_outDomain,
a_fullExtents, a_verbosity));
updateMax(errtest2,
test2constantPlus(a_dftFunction, a_inDomain, a_outDomain,
a_fullExtents, a_rounds, a_verbosity));
updateMax(errtest2,
test2impulseRandom(a_dftFunction, a_inDomain, a_outDomain,
a_sign, a_rounds, a_verbosity));
if (a_verbosity >= SHOW_CATEGORIES)
{
printf("%dD Test 2 (impulses) in %d rounds: max error %11.5e\n", DIM, a_rounds, errtest2);
}
return errtest2;
}
template<int DIM, typename T_IN, typename T_OUT>
double test3time(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
int a_sign,
int a_rounds,
int a_verbosity)
{
return 0.;
}
template<int DIM, typename T_IN>
double test3time(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, std::complex<double>>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
int a_sign,
int a_rounds,
int a_verbosity)
{
fftx::array_t<DIM, T_IN> inRand(a_inDomain);
fftx::array_t<DIM, T_IN> inRandRot(a_inDomain);
fftx::array_t<DIM, std::complex<double>> outRand(a_outDomain);
fftx::array_t<DIM, std::complex<double>> outRandRot(a_outDomain);
fftx::array_t<DIM, std::complex<double>> rotator(a_outDomain);
fftx::array_t<DIM, std::complex<double>> outRandRotMult(a_outDomain);
double errtest3timeDim[DIM];
double errtest3time = 0.;
for (int d = 0; d < DIM; d++)
{
errtest3timeDim[d] = 0.;
setRotator(rotator, a_inDomain, d, -a_sign); // +1 for MDDFT, -1 for IMDDFT, -1 for PRDFT
for (int itn = 1; itn <= a_rounds; itn++)
{
unifArray(inRand);
// time-shift test in dimension d
rotate(inRandRot, inRand, d, 1); // +1 for MDDFT, +1 for IMDDFT, +1 for PRDFT
DFTfunctionDevice(a_dftFunction, inRand, outRand);
DFTfunctionDevice(a_dftFunction, inRandRot, outRandRot);
productArrays(outRandRotMult, outRandRot, rotator);
double err = absMaxDiffArray(outRandRotMult, outRand);
updateMax(errtest3timeDim[d], err);
updateMax(errtest3time, errtest3timeDim[d]);
if (a_verbosity >= SHOW_ROUNDS)
{
printf("%dD dim %d time-shift test %d max error %11.5e\n", DIM, d, itn, err);
}
}
if (a_verbosity >= SHOW_SUBTESTS)
{
printf("%dD dim %d time-shift test in %d rounds: max error %11.5e\n", DIM, d, a_rounds, errtest3timeDim[d]);
}
}
return errtest3time;
}
template<int DIM, typename T_IN, typename T_OUT>
double test3frequency(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
int a_sign,
int a_rounds,
int a_verbosity)
{
return 0.;
}
template<int DIM, typename T_OUT>
double test3frequency(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, std::complex<double>>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
int a_sign,
int a_rounds,
int a_verbosity)
{
fftx::array_t<DIM, std::complex<double>> inRand(a_inDomain);
fftx::array_t<DIM, std::complex<double>> inRandMult(a_inDomain);
fftx::array_t<DIM, T_OUT> outRand(a_outDomain);
fftx::array_t<DIM, T_OUT> outRandMult(a_outDomain);
fftx::array_t<DIM, std::complex<double>> rotatorUp(a_inDomain);
fftx::array_t<DIM, T_OUT> outRandMultRot(a_outDomain);
double errtest3frequencyDim[DIM];
double errtest3frequency = 0.;
for (int d = 0; d < DIM; d++)
{
// frequency-shift test in dimension d
errtest3frequencyDim[d] = 0.;
// Recall a_outDomain is whole domain, but a_inDomain may be truncated;
// rotatorUp is defined on a_inDomain, but based on the full a_outDomain.
setRotator(rotatorUp, a_outDomain, d, 1);
for (int itn = 1; itn <= a_rounds; itn++)
{
unifComplexArray(inRand);
productArrays(inRandMult, inRand, rotatorUp);
DFTfunctionDevice(a_dftFunction, inRand, outRand);
DFTfunctionDevice(a_dftFunction, inRandMult, outRandMult);
rotate(outRandMultRot, outRandMult, d, a_sign);
double err = absMaxDiffArray(outRandMultRot, outRand);
updateMax(errtest3frequencyDim[d], err);
updateMax(errtest3frequency, errtest3frequencyDim[d]);
if (a_verbosity >= SHOW_ROUNDS)
{
printf("%dD dim %d frequency-shift test %d max error %11.5e\n", DIM, d, itn, err);
}
}
if (a_verbosity >= SHOW_SUBTESTS)
{
printf("%dD dim %d frequency-shift test in %d rounds: max error %11.5e\n", DIM, d, a_rounds, errtest3frequencyDim[d]);
}
}
return errtest3frequency;
}
template<int DIM, typename T_IN, typename T_OUT>
double test3DFTfunction(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
int a_sign,
int a_rounds,
int a_verbosity)
{
double errtest3 = 0.;
updateMax(errtest3,
test3time(a_dftFunction, a_inDomain, a_outDomain,
a_sign, a_rounds, a_verbosity));
updateMax(errtest3,
test3frequency(a_dftFunction, a_inDomain, a_outDomain,
a_sign, a_rounds, a_verbosity));
if (a_verbosity >= SHOW_CATEGORIES)
{
printf("%dD Test 3 (shifts) in %d rounds: max error %11.5e\n", DIM, a_rounds, errtest3);
}
return errtest3;
}
template<int DIM, typename T_IN, typename T_OUT>
void verifyDFTfunction(fftx::handle_t (a_dftFunction)
(fftx::array_t<DIM, T_IN>&,
fftx::array_t<DIM, T_OUT>&),
const fftx::box_t<DIM>& a_inDomain,
const fftx::box_t<DIM>& a_outDomain,
const fftx::point_t<DIM>& a_fullExtents,
int a_sign,
int a_rounds,
int a_verbosity)
{
double err = 0.;
updateMax(err,
test1DFTfunction(a_dftFunction, a_inDomain, a_outDomain,
a_rounds, a_verbosity));
updateMax(err,
test2DFTfunction(a_dftFunction, a_inDomain, a_outDomain,
a_fullExtents, a_sign, a_rounds, a_verbosity));
updateMax(err,
test3DFTfunction(a_dftFunction, a_inDomain, a_outDomain,
a_sign, a_rounds, a_verbosity));
printf("%dD test in %d rounds max error %11.5e\n", DIM, a_rounds, err);
}
template<int DIM>
void verifyDimension(fftx::box_t<DIM> a_domain,
fftx::box_t<DIM> a_fdomain,
int a_rounds,
fftx::handle_t (a_mddft)
(fftx::array_t<DIM, std::complex<double>>&,
fftx::array_t<DIM, std::complex<double>>&),
fftx::handle_t (a_imddft)
(fftx::array_t<DIM, std::complex<double>>&,
fftx::array_t<DIM, std::complex<double>>&),
fftx::handle_t (a_prdft)
(fftx::array_t<DIM, double>&,
fftx::array_t<DIM, std::complex<double>>&),
fftx::handle_t (a_iprdft)
(fftx::array_t<DIM, std::complex<double>>&,
fftx::array_t<DIM, double>&),
int a_verbosity)
{
// std::cout << "*******************************************" << std::endl;
// std::cout << DIM << "D domain = " << verify::domain1 << std::endl;
// std::cout << "*******************************************" << std::endl;
fftx::point_t<DIM> fullextents = a_domain.extents();
std::cout << "***** test " << DIM << "D MDDFT on complex "
<< a_domain << std::endl;
verifyDFTfunction(a_mddft, a_domain, a_domain, fullextents, -1, a_rounds, a_verbosity);
std::cout << "***** test " << DIM << "D IMDDFT on complex "
<< a_domain << std::endl;
verifyDFTfunction(a_imddft, a_domain, a_domain, fullextents, 1, a_rounds, a_verbosity);
std::cout << "***** test " << DIM << "D PRDFT from real "
<< a_domain << " to complex " << a_fdomain << std::endl;
verifyDFTfunction(a_prdft, a_domain, a_fdomain, fullextents, -1, a_rounds, a_verbosity);
std::cout << "***** test " << DIM << "D IPRDFT from complex "
<< a_fdomain << " to real " << a_domain << std::endl;
verifyDFTfunction(a_iprdft, a_fdomain, a_domain, fullextents, 1, a_rounds, a_verbosity);
}
int main(int argc, char* argv[])
{
// { SHOW_CATEGORIES = 1, SHOW_SUBTESTS = 2, SHOW_ROUNDS = 3};
printf("Usage: %s [verbosity=0] [rounds=20]\n", argv[0]);
int verbosity = 0;
int rounds = 20;
if (argc > 1)
{
verbosity = atoi(argv[1]);
if (argc > 2)
{
rounds = atoi(argv[2]);
}
}
printf("Running with verbosity %d, random %d rounds\n", verbosity, rounds);
/*
Set up random number generator.
*/
std::random_device rd;
generator = std::mt19937(rd());
unifRealDist = std::uniform_real_distribution<double>(-0.5, 0.5);
for (int d = 0; d < 3; d++)
{
unifInt[d] = std::uniform_int_distribution<int>(verify::domain3.lo[d],
verify::domain3.hi[d]);
}
/*
// printf("Call mddft1::init()\n");
mddft1::init();
// printf("Call imddft1::init()\n");
imddft1::init();
// printf("Call prdft1::init()\n");
prdft1::init();
// printf("Call iprdft1::init()\n");
iprdft1::init();
verifyDimension(verify::domain1, verify::fdomain1, rounds,
mddft1::transform, imddft1::transform,
prdft1::transform, iprdft1::transform,
verbosity);
mddft1::destroy();
imddft1::destroy();
prdft1::destroy();
iprdft1::destroy();
*/
/*
// printf("Call mddft2::init()\n");
mddft2::init();
// printf("Call imddft2::init()\n");
imddft2::init();
// printf("Call prdft2::init()\n");
prdft2::init();
// printf("Call iprdft2::init()\n");
iprdft2::init();
verifyDimension(verify::domain2, verify::fdomain2, rounds,
mddft2::transform, imddft2::transform,
prdft2::transform, iprdft2::transform,
verbosity);
mddft2::destroy();
imddft2::destroy();
prdft2::destroy();
iprdft2::destroy();
*/
// printf("Call mddft3::init()\n");
mddft3::init();
// printf("Call imddft3::init()\n");
imddft3::init();
// printf("Call prdft3::init()\n");
prdft3::init();
// printf("Call iprdft3::init()\n");
iprdft3::init();
verifyDimension(verify::domain3, verify::fdomain3, rounds,
mddft3::transform, imddft3::transform,
prdft3::transform, iprdft3::transform,
verbosity);
mddft3::destroy();
imddft3::destroy();
prdft3::destroy();
iprdft3::destroy();
printf("%s: All done, exiting\n", argv[0]);
return 0;
}
|
d5dc01686b8904535a2f94172b5dfa2623d99fd0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This cuda routine allows one to compute the derivative wrt the point cloud 'x' of the derivative
* wrt 'x' of the expression
* K(x_i,y_j) @ b_j = sum_j f( |x_i-y_j|^2 ) b_j
*
*
* We're looking for the gradient with respect to x of
*
* < e, K(s,a,x,y,b) > = \sum_{i,j} f_s'( |x_i-y_j|^2 ) * < a_i, b_j > * 2 < e_i, x_i-y_j>,
*
* which is an N-by-D array g_i (i from 1 to N), where each line is equal to
*
* g_i = 2* \sum_j < a_i, b_j > * [ f_s'( |x_i-y_j|^2 ) * e_i
* + 2* < x_i-y_j, e_i > * f_s''( |x_i-y_j|^2 ) * (x_i-y_j) ]
*
* We will compute this sum over the index 'j' on the GPU, with 'one thread' = 'one index i'.
* Data will be stored as follow:
* - e_i in the thread memory
* - a_i in the thread memory
* - x_i in the thread memory
* - y_j in the SharedData
* - b_j in the SharedData (beta_j, really)
*
*
* Author : Jean Feydy, heavily based on the work of Joan Glauns and Benjamin Charlier.
*
*/
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include "specific/radial_kernels/radial_kernels.h"
#include "specific/radial_kernels/cuda_gradconv_xx.cx"
//////////////////////////////////////////////////////
/////////// CPU -> GPU -> CPU routines ///////////////
//////////////////////////////////////////////////////
template < typename TYPE, KernelFun KernelFp , KernelFun KernelFpp >
int KernelGpuGradConvXX(TYPE ooSigma2, // 1 / sigma^2
TYPE* e_h, // N-by-D array (same as x)
TYPE* alpha_h, TYPE* x_h, // N-by-E, N-by-D arrays
TYPE* y_h, TYPE* beta_h, // M-by-D, M-by-E arrays
TYPE* gamma_h, // Output: N-by-D (same as x)
int dimPoint, int dimVect, int nx, int ny) { // D, E, N, M
// Data on the device.
TYPE* e_d;
TYPE* alpha_d;
TYPE* x_d;
TYPE* y_d;
TYPE* beta_d;
TYPE* gamma_d;
// Allocate arrays on device.
hipMalloc((void**)&e_d, sizeof(TYPE)*(nx*dimPoint));
hipMalloc((void**)&alpha_d, sizeof(TYPE)*(nx*dimVect ));
hipMalloc((void**)&x_d, sizeof(TYPE)*(nx*dimPoint));
hipMalloc((void**)&y_d, sizeof(TYPE)*(ny*dimPoint));
hipMalloc((void**)&beta_d, sizeof(TYPE)*(ny*dimVect ));
hipMalloc((void**)&gamma_d, sizeof(TYPE)*(nx*dimPoint)); // Output: N-by-D (same as x)
// Send data from host to device.
hipMemcpy(e_d, e_h, sizeof(TYPE)*(nx*dimPoint), hipMemcpyHostToDevice);
hipMemcpy(alpha_d, alpha_h, sizeof(TYPE)*(nx*dimVect ), hipMemcpyHostToDevice);
hipMemcpy(x_d, x_h, sizeof(TYPE)*(nx*dimPoint), hipMemcpyHostToDevice);
hipMemcpy(y_d, y_h, sizeof(TYPE)*(ny*dimPoint), hipMemcpyHostToDevice);
hipMemcpy(beta_d, beta_h, sizeof(TYPE)*(ny*dimVect ), hipMemcpyHostToDevice);
// compute on device.
dim3 blockSize;
blockSize.x = CUDA_BLOCK_SIZE; // number of threads in each block
dim3 gridSize;
gridSize.x = nx / blockSize.x + (nx%blockSize.x==0 ? 0 : 1);
// Copy-paste templating, allowing us to pass the DIMPOINT and DIMVECT at compilation time :
if( dimPoint==1 && dimVect==1)
hipLaunchKernelGGL(( KernelGpuGradConvXXOnDevice<TYPE,1,1,KernelFp,KernelFpp>), dim3(gridSize),dim3(blockSize),blockSize.x*(dimPoint+dimVect)*sizeof(TYPE), 0,
ooSigma2, e_d, alpha_d, x_d, y_d, beta_d, gamma_d, nx, ny);
else if(dimPoint==2 && dimVect==1)
hipLaunchKernelGGL(( KernelGpuGradConvXXOnDevice<TYPE,2,1,KernelFp,KernelFpp>), dim3(gridSize),dim3(blockSize),blockSize.x*(dimPoint+dimVect)*sizeof(TYPE), 0,
ooSigma2, e_d, alpha_d, x_d, y_d, beta_d, gamma_d, nx, ny);
else if(dimPoint==3 && dimVect==1)
hipLaunchKernelGGL(( KernelGpuGradConvXXOnDevice<TYPE,3,1,KernelFp,KernelFpp>), dim3(gridSize),dim3(blockSize),blockSize.x*(dimPoint+dimVect)*sizeof(TYPE), 0,
ooSigma2, e_d, alpha_d, x_d, y_d, beta_d, gamma_d, nx, ny);
else if(dimPoint==4 && dimVect==1)
hipLaunchKernelGGL(( KernelGpuGradConvXXOnDevice<TYPE,4,1,KernelFp,KernelFpp>), dim3(gridSize),dim3(blockSize),blockSize.x*(dimPoint+dimVect)*sizeof(TYPE), 0,
ooSigma2, e_d, alpha_d, x_d, y_d, beta_d, gamma_d, nx, ny);
else if(dimPoint==2 && dimVect==2)
hipLaunchKernelGGL(( KernelGpuGradConvXXOnDevice<TYPE,2,2,KernelFp,KernelFpp>), dim3(gridSize),dim3(blockSize),blockSize.x*(dimPoint+dimVect)*sizeof(TYPE), 0,
ooSigma2, e_d, alpha_d, x_d, y_d, beta_d, gamma_d, nx, ny);
else if(dimPoint==3 && dimVect==3)
hipLaunchKernelGGL(( KernelGpuGradConvXXOnDevice<TYPE,3,3,KernelFp,KernelFpp>), dim3(gridSize),dim3(blockSize),blockSize.x*(dimPoint+dimVect)*sizeof(TYPE), 0,
ooSigma2, e_d, alpha_d, x_d, y_d, beta_d, gamma_d, nx, ny);
else if(dimPoint==4 && dimVect==4)
hipLaunchKernelGGL(( KernelGpuGradConvXXOnDevice<TYPE,4,4,KernelFp,KernelFpp>), dim3(gridSize),dim3(blockSize),blockSize.x*(dimPoint+dimVect)*sizeof(TYPE), 0,
ooSigma2, e_d, alpha_d, x_d, y_d, beta_d, gamma_d, nx, ny);
else {
printf("GaussGpuGradConvXX error: dimensions of Gauss kernel not implemented in cuda\nYou probably just need a copy-paste in the conda_gradconv_xx.cu file !");
hipFree(e_d);
hipFree(alpha_d);
hipFree(x_d);
hipFree(y_d);
hipFree(beta_d);
hipFree(gamma_d);
return(-1);
}
// block until the device has completed
hipDeviceSynchronize();
// Send data from device to host.
hipMemcpy(gamma_h, gamma_d, sizeof(TYPE)*(nx*dimPoint),hipMemcpyDeviceToHost); // Output: N-by-D (same as x)
// Free memory.
hipFree(e_d);
hipFree(alpha_d);
hipFree(x_d);
hipFree(y_d);
hipFree(beta_d);
hipFree(gamma_d);
return 0;
}
// Couldn't find a clean way to give a name to an explicit instantiation :-(
extern "C" int GaussGpuGradConvXX(__TYPE__ ooSigma2, __TYPE__* e_h, __TYPE__* alpha_h, __TYPE__* x_h, __TYPE__* y_h, __TYPE__* beta_h, __TYPE__* gamma_h, int dimPoint, int dimVect, int nx, int ny) {
return KernelGpuGradConvXX<__TYPE__,GaussFp,GaussFpp>(ooSigma2, e_h, alpha_h, x_h, y_h, beta_h, gamma_h, dimPoint, dimVect, nx, ny);
}
extern "C" int CauchyGpuGradConvXX(__TYPE__ ooSigma2, __TYPE__* e_h, __TYPE__* alpha_h, __TYPE__* x_h, __TYPE__* y_h, __TYPE__* beta_h, __TYPE__* gamma_h, int dimPoint, int dimVect, int nx, int ny) {
return KernelGpuGradConvXX<__TYPE__,CauchyFp,CauchyFpp>(ooSigma2, e_h, alpha_h, x_h, y_h, beta_h, gamma_h, dimPoint, dimVect, nx, ny);
}
extern "C" int LaplaceGpuGradConvXX(__TYPE__ ooSigma2, __TYPE__* e_h, __TYPE__* alpha_h, __TYPE__* x_h, __TYPE__* y_h, __TYPE__* beta_h, __TYPE__* gamma_h, int dimPoint, int dimVect, int nx, int ny) {
return KernelGpuGradConvXX<__TYPE__,LaplaceFp,LaplaceFpp>(ooSigma2, e_h, alpha_h, x_h, y_h, beta_h, gamma_h, dimPoint, dimVect, nx, ny);
}
extern "C" int InverseMultiquadricGpuGradConvXX(__TYPE__ ooSigma2, __TYPE__* e_h, __TYPE__* alpha_h, __TYPE__* x_h, __TYPE__* y_h, __TYPE__* beta_h, __TYPE__* gamma_h, int dimPoint, int dimVect, int nx, int ny) {
return KernelGpuGradConvXX<__TYPE__,InverseMultiquadricFp,InverseMultiquadricFpp>(ooSigma2, e_h, alpha_h, x_h, y_h, beta_h, gamma_h, dimPoint, dimVect, nx, ny);
}
void ExitFcn(void) {
hipDeviceReset();
}
| d5dc01686b8904535a2f94172b5dfa2623d99fd0.cu | /*
* This cuda routine allows one to compute the derivative wrt the point cloud 'x' of the derivative
* wrt 'x' of the expression
* K(x_i,y_j) @ b_j = sum_j f( |x_i-y_j|^2 ) b_j
*
*
* We're looking for the gradient with respect to x of
*
* < e, K(s,a,x,y,b) > = \sum_{i,j} f_s'( |x_i-y_j|^2 ) * < a_i, b_j > * 2 < e_i, x_i-y_j>,
*
* which is an N-by-D array g_i (i from 1 to N), where each line is equal to
*
* g_i = 2* \sum_j < a_i, b_j > * [ f_s'( |x_i-y_j|^2 ) * e_i
* + 2* < x_i-y_j, e_i > * f_s''( |x_i-y_j|^2 ) * (x_i-y_j) ]
*
* We will compute this sum over the index 'j' on the GPU, with 'one thread' = 'one index i'.
* Data will be stored as follow:
* - e_i in the thread memory
* - a_i in the thread memory
* - x_i in the thread memory
* - y_j in the SharedData
* - b_j in the SharedData (beta_j, really)
*
*
* Author : Jean Feydy, heavily based on the work of Joan Glaunès and Benjamin Charlier.
*
*/
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include "specific/radial_kernels/radial_kernels.h"
#include "specific/radial_kernels/cuda_gradconv_xx.cx"
//////////////////////////////////////////////////////
/////////// CPU -> GPU -> CPU routines ///////////////
//////////////////////////////////////////////////////
template < typename TYPE, KernelFun KernelFp , KernelFun KernelFpp >
int KernelGpuGradConvXX(TYPE ooSigma2, // 1 / sigma^2
TYPE* e_h, // N-by-D array (same as x)
TYPE* alpha_h, TYPE* x_h, // N-by-E, N-by-D arrays
TYPE* y_h, TYPE* beta_h, // M-by-D, M-by-E arrays
TYPE* gamma_h, // Output: N-by-D (same as x)
int dimPoint, int dimVect, int nx, int ny) { // D, E, N, M
// Data on the device.
TYPE* e_d;
TYPE* alpha_d;
TYPE* x_d;
TYPE* y_d;
TYPE* beta_d;
TYPE* gamma_d;
// Allocate arrays on device.
cudaMalloc((void**)&e_d, sizeof(TYPE)*(nx*dimPoint));
cudaMalloc((void**)&alpha_d, sizeof(TYPE)*(nx*dimVect ));
cudaMalloc((void**)&x_d, sizeof(TYPE)*(nx*dimPoint));
cudaMalloc((void**)&y_d, sizeof(TYPE)*(ny*dimPoint));
cudaMalloc((void**)&beta_d, sizeof(TYPE)*(ny*dimVect ));
cudaMalloc((void**)&gamma_d, sizeof(TYPE)*(nx*dimPoint)); // Output: N-by-D (same as x)
// Send data from host to device.
cudaMemcpy(e_d, e_h, sizeof(TYPE)*(nx*dimPoint), cudaMemcpyHostToDevice);
cudaMemcpy(alpha_d, alpha_h, sizeof(TYPE)*(nx*dimVect ), cudaMemcpyHostToDevice);
cudaMemcpy(x_d, x_h, sizeof(TYPE)*(nx*dimPoint), cudaMemcpyHostToDevice);
cudaMemcpy(y_d, y_h, sizeof(TYPE)*(ny*dimPoint), cudaMemcpyHostToDevice);
cudaMemcpy(beta_d, beta_h, sizeof(TYPE)*(ny*dimVect ), cudaMemcpyHostToDevice);
// compute on device.
dim3 blockSize;
blockSize.x = CUDA_BLOCK_SIZE; // number of threads in each block
dim3 gridSize;
gridSize.x = nx / blockSize.x + (nx%blockSize.x==0 ? 0 : 1);
// Copy-paste templating, allowing us to pass the DIMPOINT and DIMVECT at compilation time :
if( dimPoint==1 && dimVect==1)
KernelGpuGradConvXXOnDevice<TYPE,1,1,KernelFp,KernelFpp><<<gridSize,blockSize,blockSize.x*(dimPoint+dimVect)*sizeof(TYPE)>>>
(ooSigma2, e_d, alpha_d, x_d, y_d, beta_d, gamma_d, nx, ny);
else if(dimPoint==2 && dimVect==1)
KernelGpuGradConvXXOnDevice<TYPE,2,1,KernelFp,KernelFpp><<<gridSize,blockSize,blockSize.x*(dimPoint+dimVect)*sizeof(TYPE)>>>
(ooSigma2, e_d, alpha_d, x_d, y_d, beta_d, gamma_d, nx, ny);
else if(dimPoint==3 && dimVect==1)
KernelGpuGradConvXXOnDevice<TYPE,3,1,KernelFp,KernelFpp><<<gridSize,blockSize,blockSize.x*(dimPoint+dimVect)*sizeof(TYPE)>>>
(ooSigma2, e_d, alpha_d, x_d, y_d, beta_d, gamma_d, nx, ny);
else if(dimPoint==4 && dimVect==1)
KernelGpuGradConvXXOnDevice<TYPE,4,1,KernelFp,KernelFpp><<<gridSize,blockSize,blockSize.x*(dimPoint+dimVect)*sizeof(TYPE)>>>
(ooSigma2, e_d, alpha_d, x_d, y_d, beta_d, gamma_d, nx, ny);
else if(dimPoint==2 && dimVect==2)
KernelGpuGradConvXXOnDevice<TYPE,2,2,KernelFp,KernelFpp><<<gridSize,blockSize,blockSize.x*(dimPoint+dimVect)*sizeof(TYPE)>>>
(ooSigma2, e_d, alpha_d, x_d, y_d, beta_d, gamma_d, nx, ny);
else if(dimPoint==3 && dimVect==3)
KernelGpuGradConvXXOnDevice<TYPE,3,3,KernelFp,KernelFpp><<<gridSize,blockSize,blockSize.x*(dimPoint+dimVect)*sizeof(TYPE)>>>
(ooSigma2, e_d, alpha_d, x_d, y_d, beta_d, gamma_d, nx, ny);
else if(dimPoint==4 && dimVect==4)
KernelGpuGradConvXXOnDevice<TYPE,4,4,KernelFp,KernelFpp><<<gridSize,blockSize,blockSize.x*(dimPoint+dimVect)*sizeof(TYPE)>>>
(ooSigma2, e_d, alpha_d, x_d, y_d, beta_d, gamma_d, nx, ny);
else {
printf("GaussGpuGradConvXX error: dimensions of Gauss kernel not implemented in cuda\nYou probably just need a copy-paste in the conda_gradconv_xx.cu file !");
cudaFree(e_d);
cudaFree(alpha_d);
cudaFree(x_d);
cudaFree(y_d);
cudaFree(beta_d);
cudaFree(gamma_d);
return(-1);
}
// block until the device has completed
cudaDeviceSynchronize();
// Send data from device to host.
cudaMemcpy(gamma_h, gamma_d, sizeof(TYPE)*(nx*dimPoint),cudaMemcpyDeviceToHost); // Output: N-by-D (same as x)
// Free memory.
cudaFree(e_d);
cudaFree(alpha_d);
cudaFree(x_d);
cudaFree(y_d);
cudaFree(beta_d);
cudaFree(gamma_d);
return 0;
}
// Couldn't find a clean way to give a name to an explicit instantiation :-(
extern "C" int GaussGpuGradConvXX(__TYPE__ ooSigma2, __TYPE__* e_h, __TYPE__* alpha_h, __TYPE__* x_h, __TYPE__* y_h, __TYPE__* beta_h, __TYPE__* gamma_h, int dimPoint, int dimVect, int nx, int ny) {
return KernelGpuGradConvXX<__TYPE__,GaussFp,GaussFpp>(ooSigma2, e_h, alpha_h, x_h, y_h, beta_h, gamma_h, dimPoint, dimVect, nx, ny);
}
extern "C" int CauchyGpuGradConvXX(__TYPE__ ooSigma2, __TYPE__* e_h, __TYPE__* alpha_h, __TYPE__* x_h, __TYPE__* y_h, __TYPE__* beta_h, __TYPE__* gamma_h, int dimPoint, int dimVect, int nx, int ny) {
return KernelGpuGradConvXX<__TYPE__,CauchyFp,CauchyFpp>(ooSigma2, e_h, alpha_h, x_h, y_h, beta_h, gamma_h, dimPoint, dimVect, nx, ny);
}
extern "C" int LaplaceGpuGradConvXX(__TYPE__ ooSigma2, __TYPE__* e_h, __TYPE__* alpha_h, __TYPE__* x_h, __TYPE__* y_h, __TYPE__* beta_h, __TYPE__* gamma_h, int dimPoint, int dimVect, int nx, int ny) {
return KernelGpuGradConvXX<__TYPE__,LaplaceFp,LaplaceFpp>(ooSigma2, e_h, alpha_h, x_h, y_h, beta_h, gamma_h, dimPoint, dimVect, nx, ny);
}
extern "C" int InverseMultiquadricGpuGradConvXX(__TYPE__ ooSigma2, __TYPE__* e_h, __TYPE__* alpha_h, __TYPE__* x_h, __TYPE__* y_h, __TYPE__* beta_h, __TYPE__* gamma_h, int dimPoint, int dimVect, int nx, int ny) {
return KernelGpuGradConvXX<__TYPE__,InverseMultiquadricFp,InverseMultiquadricFpp>(ooSigma2, e_h, alpha_h, x_h, y_h, beta_h, gamma_h, dimPoint, dimVect, nx, ny);
}
void ExitFcn(void) {
cudaDeviceReset();
}
|
4ef09fb36d387800bca5b6788f15317a38223b82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDA.hpp"
//------------------------------------------------------------------------------
// miss program that gets called for any ray that did not have a
// valid intersection
// ------------------------------------------------------------------------------
extern "C" __global__ void __miss__radiance()
{
PRD &prd = *getPRD<PRD>();
// set to constant white as background color
prd.pixelColor = vec3f(1.f);
}
extern "C" __global__ void __miss__shadow()
{
// we didn't hit anything, so the light is visible
vec3f &prd = *getPRD<vec3f>();
prd = vec3f(1.f);
}
extern "C" __global__ void __miss__phase()
{
// set to constant white as background color
PRD &prd = *getPRD<PRD>();
prd.pixelColor = vec3f(1.f);
} | 4ef09fb36d387800bca5b6788f15317a38223b82.cu | #include "CUDA.hpp"
//------------------------------------------------------------------------------
// miss program that gets called for any ray that did not have a
// valid intersection
// ------------------------------------------------------------------------------
extern "C" __global__ void __miss__radiance()
{
PRD &prd = *getPRD<PRD>();
// set to constant white as background color
prd.pixelColor = vec3f(1.f);
}
extern "C" __global__ void __miss__shadow()
{
// we didn't hit anything, so the light is visible
vec3f &prd = *getPRD<vec3f>();
prd = vec3f(1.f);
}
extern "C" __global__ void __miss__phase()
{
// set to constant white as background color
PRD &prd = *getPRD<PRD>();
prd.pixelColor = vec3f(1.f);
} |
7121fb9715668d776fd5e3f4eed3818aa83cd420.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* histogram.cu
*
* Microbenchmark for histogram, a statistical computation
* for image processing.
*
* Build with: nvcc -I ../chLib <options> histogram.cu ..\chLib\pgm.cu -lnpp -lpthread -lrt
*
* Make sure to include pgm.cu for the image file I/O support.
*
* To avoid warnings about double precision support, specify the
* target gpu-architecture, e.g.:
* nvcc --gpu-architecture sm_13 -I ../chLib <options> histogram.cu ..\chLib\pgm.cu
*
* Requires: SM 1.1, for global atomics.
*
* Copyright (c) 2013, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <chError.h>
#include <chCommandLine.h>
#include <chAssert.h>
#include <chThread.h>
#include <chTimer.h>
#include <chUtil.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <assert.h>
#include "pgm.h"
texture<unsigned char, 2> texImage;
#include "histogramPerGrid.cuh"
#include "histogramPerBlock.cuh"
#include "histogramPerBlockOffset.cuh"
#include "histogramPerBlockReduce.cuh"
#include "histogramPerThread64.cuh"
#include "histogramPerThread4x64.cuh"
#include "histogramPerThread4x32.cuh"
#include "histogramNPP.cuh"
using namespace cudahandbook::threading;
workerThread *g_CPUThreadPool;
int g_numCPUCores;
int
bCompareHistograms( const unsigned int *p, const unsigned int *q, int N )
{
for ( int i = 0; i < N; i++ ) {
if ( p[i] != q[i] ) {
printf( "Histogram mismatch at %d: p[%d] == %d, q[%d] == %d\n", i, i, p[i], i, q[i] );
return 1;
}
}
return 0;
}
void
histCPU(
unsigned int *pHist,
int w, int h,
unsigned char *img, int imgPitch )
{
memset( pHist, 0, 256*sizeof(int) );
for ( int row = 0; row < h; row += 1 ) {
unsigned char *pi = img+row*imgPitch;
for ( int col = 0; col < w; col += 1 ) {
pHist[pi[col]] += 1;
}
}
}
float
hist1DCPU(
unsigned int *pHist,
unsigned char *p, size_t N )
{
chTimerTimestamp start, end;
chTimerGetTime( &start );
memset( pHist, 0, 256*sizeof(int) );
for ( size_t i = 0; i < N; i++ ) {
pHist[ p[i] ] += 1;
}
chTimerGetTime( &end );
return (float) chTimerElapsedTime( &start, &end ) * 1000.0f;
}
struct histDelegation {
// input data for this thread only
unsigned char *pData;
size_t N;
// output histogram for this thread only
unsigned int privateHist[256];
};
static void
histWorkerThread( void *_p )
{
histDelegation *p = (histDelegation *) _p;
unsigned char *pData = p->pData;
memset( p->privateHist, 0, sizeof(p->privateHist) );
for (size_t i = 0; i < p->N; i++ ) {
p->privateHist[ pData[i] ] += 1;
}
}
float
hist1DCPU_threaded(
unsigned int *pHist,
unsigned char *p, size_t N )
{
chTimerTimestamp start, end;
chTimerGetTime( &start );
histDelegation *phist = new histDelegation[ g_numCPUCores ];
size_t elementsPerCore = INTDIVIDE_CEILING( N, g_numCPUCores );
for ( size_t i = 0; i < g_numCPUCores; i++ ) {
phist[i].pData = p;
phist[i].N = (N) ? elementsPerCore : 0;
p += elementsPerCore;
N -= elementsPerCore;
g_CPUThreadPool[i].delegateAsynchronous(
histWorkerThread,
&phist[i] );
}
workerThread::waitAll( g_CPUThreadPool, g_numCPUCores );
memset( pHist, 0, 256*sizeof(unsigned int) );
for ( size_t i = 0; i < g_numCPUCores; i++ ) {
for ( int j = 0; j < 256; j++ ) {
pHist[j] += phist[i].privateHist[j];
}
}
delete[] phist;
chTimerGetTime( &end );
return (float) chTimerElapsedTime( &start, &end ) * 1000.0f;
}
bool
TestHistogram(
double *pixelsPerSecond, // passback to report performance
const char *name,
const unsigned char *dptrBase, size_t dPitch,
int w, int h, // width and height of input
const unsigned int *hrefHist, // host reference data
dim3 threads,
void (*pfnHistogram)(
float *ms,
unsigned int *pHist,
const unsigned char *dptrBase, size_t dPitch,
int xUL, int yUL, int w, int h,
dim3 threads ),
int cIterations = 1,
const char *outputFilename = NULL
)
{
hipError_t status;
bool ret = false;
// Histogram for 8-bit grayscale image (2^8=256)
unsigned int hHist[256];
unsigned int *dHist = NULL;
float ms;
CUDART_CHECK( hipMalloc( (void **) &dHist, 256*sizeof(int) ) );
CUDART_CHECK( hipMemset( dHist, 0, 256*sizeof(int) ) );
pfnHistogram( &ms, dHist, dptrBase, dPitch, 0, 0, w, h, threads );
CUDART_CHECK( hipMemcpy( hHist, dHist, sizeof(hHist), hipMemcpyDeviceToHost ) );
if ( bCompareHistograms( hHist, hrefHist, 256 ) ) {
printf( "%s: Histograms miscompare\n", name );
goto Error;
}
for ( int i = 0; i < cIterations; i++ ) {
pfnHistogram( &ms, dHist, dptrBase, dPitch, 0, 0, w, h, threads );
}
*pixelsPerSecond = (double) w*h*cIterations*1000.0 / ms;
CUDART_CHECK( hipMemcpy( hHist, dHist, sizeof(hHist), hipMemcpyDeviceToHost ) );
if ( outputFilename ) {
FILE *f = fopen( outputFilename, "w" );
if ( ! f )
goto Error;
for ( int i = 0; i < 256; i++ ) {
fprintf( f, "%d\t", hHist[i] );
}
fprintf( f, "\n" );
fclose( f );
}
ret = true;
Error:
hipFree( dHist );
return ret;
}
int
main(int argc, char *argv[])
{
int ret = 1;
hipError_t status;
int device = 0;
unsigned char *hidata = NULL;
unsigned char *didata = NULL;
unsigned int cpuHist[256];
unsigned int HostPitch, DevicePitch;
int w, h;
bool bTesla = false;
dim3 threads;
char *inputFilename = "coins.pgm";
char *outputFilename = NULL;
hipArray *pArrayImage = NULL;
hipChannelFormatDesc desc = hipCreateChannelDesc<unsigned char>();
{
g_numCPUCores = processorCount();
g_CPUThreadPool = new workerThread[g_numCPUCores];
for ( size_t i = 0; i < g_numCPUCores; i++ ) {
if ( ! g_CPUThreadPool[i].initialize( ) ) {
fprintf( stderr, "Error initializing thread pool\n" );
return 1;
}
}
}
if ( chCommandLineGetBool( "help", argc, argv ) ) {
printf( "Usage:\n" );
printf( " --input <filename>: specify input filename (must be PGM)\n" );
printf( " --output <filename>: Write PGM of correlation values (0..255) to <filename>.\n" );
printf( " --padWidth <value>: pad input image width to specified value\n" );
printf( " --padHeight <value>: pad input image height to specified value\n" );
printf( " --random <numvalues>: overrides input filename and fills image with random data in the range [0..numvalues)\n" );
printf( " --stride <value>: specifies stride for random values (e.g., 2 means use even values only)\n" );
printf( " The random parameter must be in the range 1..256, and random/stride must be 256 or less.\n" );
printf( "\nDefault values are coins.pgm and no output file or padding\n" );
return 0;
}
{
if ( chCommandLineGet( &device, "device", argc, argv ) ) {
CUDART_CHECK( hipSetDevice( device ) );
}
}
CUDART_CHECK( hipSetDeviceFlags( hipDeviceMapHost ) );
CUDART_CHECK( hipDeviceSetCacheConfig( hipFuncCachePreferShared ) );
{
hipDeviceProp_t prop;
CUDART_CHECK( hipGetDeviceProperties( &prop, device ) );
printf( "Testing histogram on %s (%d SMs)\n", prop.name, prop.multiProcessorCount );
}
if ( chCommandLineGet( &inputFilename, "input", argc, argv ) ) {
printf( "Reading from image file %s\n", inputFilename );
}
chCommandLineGet( &outputFilename, "output", argc, argv );
{
int padWidth = 1024;//0;
int padHeight = 1024;//0;
int numvalues = 0;
if ( chCommandLineGet( &padWidth, "padWidth", argc, argv ) ) {
if ( ! chCommandLineGet( &padHeight, "padHeight", argc, argv ) ) {
printf( "Must specify both --padWidth and --padHeight\n" );
goto Error;
}
}
else {
if ( chCommandLineGet( &padHeight, "padHeight", argc, argv ) ) {
printf( "Must specify both --padWidth and --padHeight\n" );
goto Error;
}
}
if ( chCommandLineGet( &numvalues, "random", argc, argv ) ) {
int stride = 1;
if ( chCommandLineGet( &stride, "stride", argc, argv ) ) {
if ( numvalues*stride > 256 ) {
printf( "stride*random must be <= 256\n" );
goto Error;
}
}
if ( 0==padWidth || 0==padHeight ) {
printf( "--random requires --padWidth and padHeight (to specify input size)\n" );
goto Error;
}
printf( "%d pixels, random, %d values with stride %d\n",
padWidth*padHeight, numvalues, stride );
w = padWidth;
h = padWidth;
hidata = (unsigned char *) malloc( w*h );
if ( ! hidata )
goto Error;
size_t dPitch;
CUDART_CHECK( hipMallocPitch( &didata, &dPitch, padWidth, padHeight ) );
DevicePitch = dPitch;
srand(time(NULL));
for ( int row = 0; row < h; row++ ) {
unsigned char *p = hidata+row*w;
for ( int col = 0; col < w; col++ ) {
int val = rand() % numvalues;
val *= stride;
p[col] = (unsigned char) val;
}
}
CUDART_CHECK( hipMemcpy2D( didata, DevicePitch, hidata, padWidth, padWidth, padHeight, hipMemcpyHostToDevice ) );
}
else {
if ( pgmLoad( inputFilename, &hidata, &HostPitch, &didata, &DevicePitch, &w, &h, padWidth, padHeight) ) {
printf( "%s not found\n", inputFilename );
goto Error;
}
printf( "%d pixels, sourced from image file %s\n", w*h, inputFilename );
}
}
CUDART_CHECK( hipMallocArray( &pArrayImage, &desc, w, h ) );
CUDART_CHECK( hipMemcpyToArray( pArrayImage, 0, 0, hidata, w*h, hipMemcpyHostToDevice ) );
CUDART_CHECK( hipBindTextureToArray( texImage, pArrayImage ) );
{
hipDeviceProp_t prop;
CUDART_CHECK( hipGetDeviceProperties( &prop, 0 ) );
if ( prop.major < 2 ) {
bTesla = true;
}
}
histCPU( cpuHist, w, h, hidata, w );
{
unsigned int cpuHist2[256], cpuHist3[256];
float timeST = hist1DCPU( cpuHist2, hidata, w*h );
if ( bCompareHistograms( cpuHist, cpuHist2, 256 ) ) {
printf( "Linear and 2D histograms do not agree\n" );
exit(1);
}
printf("Single-threaded: %.2f Mpix/s\n", w*h/timeST/1e3 );
float timeMT = hist1DCPU_threaded( cpuHist3, hidata, w*h );
if ( bCompareHistograms( cpuHist, cpuHist3, 256 ) ) {
printf( "Multithreaded and 2D histograms do not agree\n" );
exit(1);
}
double pixPerms = w*h/timeMT;
printf( "Multithreaded (%d cores) is %.2fx faster (%.2f Mpix/s)\n",
g_numCPUCores,
timeST/timeMT,
pixPerms/1e3 );
}
#define TEST_VECTOR( baseName, bPrintNeighborhood, cIterations, outfile ) \
{ \
double pixelsPerSecond; \
if ( ! TestHistogram( &pixelsPerSecond, \
#baseName, \
didata, DevicePitch, \
w, h, \
cpuHist, \
threads, \
baseName, \
cIterations, outfile ) ) { \
printf( "Error\n" ); \
ret = 1; \
goto Error; \
} \
printf( "%s: %.2f Mpix/s\n", \
#baseName, pixelsPerSecond/1e6 ); \
}
if ( w != DevicePitch ) {
printf( "1D versions only work if width and pitch are the same\n" );
}
threads = dim3( 32, 8, 1 );
TEST_VECTOR( GPUhistogramPerGrid, false, 1, NULL );
threads = dim3( 16, 4, 1 );
TEST_VECTOR( GPUhistogramPerBlock, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlock4x, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlockOffset, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlock4xOffset, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlockReduce, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlockReduceOffset, false, 1, NULL );
threads = dim3( 16, 4, 1 );
if ( ! bTesla ) {
TEST_VECTOR( GPUhistogramPerThread64, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerThread4x64, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerThread4x64_PeriodicMerge, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerThread4x32, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerThread4x32_PeriodicMerge, false, 1, NULL );
}
TEST_VECTOR( GPUhistogramNPP, false, 1, NULL );
ret = 0;
Error:
free( hidata );
hipFree(didata);
hipFreeArray(pArrayImage);
return ret;
}
| 7121fb9715668d776fd5e3f4eed3818aa83cd420.cu | /*
*
* histogram.cu
*
* Microbenchmark for histogram, a statistical computation
* for image processing.
*
* Build with: nvcc -I ../chLib <options> histogram.cu ..\chLib\pgm.cu -lnpp -lpthread -lrt
*
* Make sure to include pgm.cu for the image file I/O support.
*
* To avoid warnings about double precision support, specify the
* target gpu-architecture, e.g.:
* nvcc --gpu-architecture sm_13 -I ../chLib <options> histogram.cu ..\chLib\pgm.cu
*
* Requires: SM 1.1, for global atomics.
*
* Copyright (c) 2013, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <chError.h>
#include <chCommandLine.h>
#include <chAssert.h>
#include <chThread.h>
#include <chTimer.h>
#include <chUtil.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <assert.h>
#include "pgm.h"
texture<unsigned char, 2> texImage;
#include "histogramPerGrid.cuh"
#include "histogramPerBlock.cuh"
#include "histogramPerBlockOffset.cuh"
#include "histogramPerBlockReduce.cuh"
#include "histogramPerThread64.cuh"
#include "histogramPerThread4x64.cuh"
#include "histogramPerThread4x32.cuh"
#include "histogramNPP.cuh"
using namespace cudahandbook::threading;
workerThread *g_CPUThreadPool;
int g_numCPUCores;
int
bCompareHistograms( const unsigned int *p, const unsigned int *q, int N )
{
for ( int i = 0; i < N; i++ ) {
if ( p[i] != q[i] ) {
printf( "Histogram mismatch at %d: p[%d] == %d, q[%d] == %d\n", i, i, p[i], i, q[i] );
return 1;
}
}
return 0;
}
void
histCPU(
unsigned int *pHist,
int w, int h,
unsigned char *img, int imgPitch )
{
memset( pHist, 0, 256*sizeof(int) );
for ( int row = 0; row < h; row += 1 ) {
unsigned char *pi = img+row*imgPitch;
for ( int col = 0; col < w; col += 1 ) {
pHist[pi[col]] += 1;
}
}
}
float
hist1DCPU(
unsigned int *pHist,
unsigned char *p, size_t N )
{
chTimerTimestamp start, end;
chTimerGetTime( &start );
memset( pHist, 0, 256*sizeof(int) );
for ( size_t i = 0; i < N; i++ ) {
pHist[ p[i] ] += 1;
}
chTimerGetTime( &end );
return (float) chTimerElapsedTime( &start, &end ) * 1000.0f;
}
struct histDelegation {
// input data for this thread only
unsigned char *pData;
size_t N;
// output histogram for this thread only
unsigned int privateHist[256];
};
static void
histWorkerThread( void *_p )
{
histDelegation *p = (histDelegation *) _p;
unsigned char *pData = p->pData;
memset( p->privateHist, 0, sizeof(p->privateHist) );
for (size_t i = 0; i < p->N; i++ ) {
p->privateHist[ pData[i] ] += 1;
}
}
float
hist1DCPU_threaded(
unsigned int *pHist,
unsigned char *p, size_t N )
{
chTimerTimestamp start, end;
chTimerGetTime( &start );
histDelegation *phist = new histDelegation[ g_numCPUCores ];
size_t elementsPerCore = INTDIVIDE_CEILING( N, g_numCPUCores );
for ( size_t i = 0; i < g_numCPUCores; i++ ) {
phist[i].pData = p;
phist[i].N = (N) ? elementsPerCore : 0;
p += elementsPerCore;
N -= elementsPerCore;
g_CPUThreadPool[i].delegateAsynchronous(
histWorkerThread,
&phist[i] );
}
workerThread::waitAll( g_CPUThreadPool, g_numCPUCores );
memset( pHist, 0, 256*sizeof(unsigned int) );
for ( size_t i = 0; i < g_numCPUCores; i++ ) {
for ( int j = 0; j < 256; j++ ) {
pHist[j] += phist[i].privateHist[j];
}
}
delete[] phist;
chTimerGetTime( &end );
return (float) chTimerElapsedTime( &start, &end ) * 1000.0f;
}
bool
TestHistogram(
double *pixelsPerSecond, // passback to report performance
const char *name,
const unsigned char *dptrBase, size_t dPitch,
int w, int h, // width and height of input
const unsigned int *hrefHist, // host reference data
dim3 threads,
void (*pfnHistogram)(
float *ms,
unsigned int *pHist,
const unsigned char *dptrBase, size_t dPitch,
int xUL, int yUL, int w, int h,
dim3 threads ),
int cIterations = 1,
const char *outputFilename = NULL
)
{
cudaError_t status;
bool ret = false;
// Histogram for 8-bit grayscale image (2^8=256)
unsigned int hHist[256];
unsigned int *dHist = NULL;
float ms;
CUDART_CHECK( cudaMalloc( (void **) &dHist, 256*sizeof(int) ) );
CUDART_CHECK( cudaMemset( dHist, 0, 256*sizeof(int) ) );
pfnHistogram( &ms, dHist, dptrBase, dPitch, 0, 0, w, h, threads );
CUDART_CHECK( cudaMemcpy( hHist, dHist, sizeof(hHist), cudaMemcpyDeviceToHost ) );
if ( bCompareHistograms( hHist, hrefHist, 256 ) ) {
printf( "%s: Histograms miscompare\n", name );
goto Error;
}
for ( int i = 0; i < cIterations; i++ ) {
pfnHistogram( &ms, dHist, dptrBase, dPitch, 0, 0, w, h, threads );
}
*pixelsPerSecond = (double) w*h*cIterations*1000.0 / ms;
CUDART_CHECK( cudaMemcpy( hHist, dHist, sizeof(hHist), cudaMemcpyDeviceToHost ) );
if ( outputFilename ) {
FILE *f = fopen( outputFilename, "w" );
if ( ! f )
goto Error;
for ( int i = 0; i < 256; i++ ) {
fprintf( f, "%d\t", hHist[i] );
}
fprintf( f, "\n" );
fclose( f );
}
ret = true;
Error:
cudaFree( dHist );
return ret;
}
int
main(int argc, char *argv[])
{
int ret = 1;
cudaError_t status;
int device = 0;
unsigned char *hidata = NULL;
unsigned char *didata = NULL;
unsigned int cpuHist[256];
unsigned int HostPitch, DevicePitch;
int w, h;
bool bTesla = false;
dim3 threads;
char *inputFilename = "coins.pgm";
char *outputFilename = NULL;
cudaArray *pArrayImage = NULL;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
{
g_numCPUCores = processorCount();
g_CPUThreadPool = new workerThread[g_numCPUCores];
for ( size_t i = 0; i < g_numCPUCores; i++ ) {
if ( ! g_CPUThreadPool[i].initialize( ) ) {
fprintf( stderr, "Error initializing thread pool\n" );
return 1;
}
}
}
if ( chCommandLineGetBool( "help", argc, argv ) ) {
printf( "Usage:\n" );
printf( " --input <filename>: specify input filename (must be PGM)\n" );
printf( " --output <filename>: Write PGM of correlation values (0..255) to <filename>.\n" );
printf( " --padWidth <value>: pad input image width to specified value\n" );
printf( " --padHeight <value>: pad input image height to specified value\n" );
printf( " --random <numvalues>: overrides input filename and fills image with random data in the range [0..numvalues)\n" );
printf( " --stride <value>: specifies stride for random values (e.g., 2 means use even values only)\n" );
printf( " The random parameter must be in the range 1..256, and random/stride must be 256 or less.\n" );
printf( "\nDefault values are coins.pgm and no output file or padding\n" );
return 0;
}
{
if ( chCommandLineGet( &device, "device", argc, argv ) ) {
CUDART_CHECK( cudaSetDevice( device ) );
}
}
CUDART_CHECK( cudaSetDeviceFlags( cudaDeviceMapHost ) );
CUDART_CHECK( cudaDeviceSetCacheConfig( cudaFuncCachePreferShared ) );
{
cudaDeviceProp prop;
CUDART_CHECK( cudaGetDeviceProperties( &prop, device ) );
printf( "Testing histogram on %s (%d SMs)\n", prop.name, prop.multiProcessorCount );
}
if ( chCommandLineGet( &inputFilename, "input", argc, argv ) ) {
printf( "Reading from image file %s\n", inputFilename );
}
chCommandLineGet( &outputFilename, "output", argc, argv );
{
int padWidth = 1024;//0;
int padHeight = 1024;//0;
int numvalues = 0;
if ( chCommandLineGet( &padWidth, "padWidth", argc, argv ) ) {
if ( ! chCommandLineGet( &padHeight, "padHeight", argc, argv ) ) {
printf( "Must specify both --padWidth and --padHeight\n" );
goto Error;
}
}
else {
if ( chCommandLineGet( &padHeight, "padHeight", argc, argv ) ) {
printf( "Must specify both --padWidth and --padHeight\n" );
goto Error;
}
}
if ( chCommandLineGet( &numvalues, "random", argc, argv ) ) {
int stride = 1;
if ( chCommandLineGet( &stride, "stride", argc, argv ) ) {
if ( numvalues*stride > 256 ) {
printf( "stride*random must be <= 256\n" );
goto Error;
}
}
if ( 0==padWidth || 0==padHeight ) {
printf( "--random requires --padWidth and padHeight (to specify input size)\n" );
goto Error;
}
printf( "%d pixels, random, %d values with stride %d\n",
padWidth*padHeight, numvalues, stride );
w = padWidth;
h = padWidth;
hidata = (unsigned char *) malloc( w*h );
if ( ! hidata )
goto Error;
size_t dPitch;
CUDART_CHECK( cudaMallocPitch( &didata, &dPitch, padWidth, padHeight ) );
DevicePitch = dPitch;
srand(time(NULL));
for ( int row = 0; row < h; row++ ) {
unsigned char *p = hidata+row*w;
for ( int col = 0; col < w; col++ ) {
int val = rand() % numvalues;
val *= stride;
p[col] = (unsigned char) val;
}
}
CUDART_CHECK( cudaMemcpy2D( didata, DevicePitch, hidata, padWidth, padWidth, padHeight, cudaMemcpyHostToDevice ) );
}
else {
if ( pgmLoad( inputFilename, &hidata, &HostPitch, &didata, &DevicePitch, &w, &h, padWidth, padHeight) ) {
printf( "%s not found\n", inputFilename );
goto Error;
}
printf( "%d pixels, sourced from image file %s\n", w*h, inputFilename );
}
}
CUDART_CHECK( cudaMallocArray( &pArrayImage, &desc, w, h ) );
CUDART_CHECK( cudaMemcpyToArray( pArrayImage, 0, 0, hidata, w*h, cudaMemcpyHostToDevice ) );
CUDART_CHECK( cudaBindTextureToArray( texImage, pArrayImage ) );
{
cudaDeviceProp prop;
CUDART_CHECK( cudaGetDeviceProperties( &prop, 0 ) );
if ( prop.major < 2 ) {
bTesla = true;
}
}
histCPU( cpuHist, w, h, hidata, w );
{
unsigned int cpuHist2[256], cpuHist3[256];
float timeST = hist1DCPU( cpuHist2, hidata, w*h );
if ( bCompareHistograms( cpuHist, cpuHist2, 256 ) ) {
printf( "Linear and 2D histograms do not agree\n" );
exit(1);
}
printf("Single-threaded: %.2f Mpix/s\n", w*h/timeST/1e3 );
float timeMT = hist1DCPU_threaded( cpuHist3, hidata, w*h );
if ( bCompareHistograms( cpuHist, cpuHist3, 256 ) ) {
printf( "Multithreaded and 2D histograms do not agree\n" );
exit(1);
}
double pixPerms = w*h/timeMT;
printf( "Multithreaded (%d cores) is %.2fx faster (%.2f Mpix/s)\n",
g_numCPUCores,
timeST/timeMT,
pixPerms/1e3 );
}
#define TEST_VECTOR( baseName, bPrintNeighborhood, cIterations, outfile ) \
{ \
double pixelsPerSecond; \
if ( ! TestHistogram( &pixelsPerSecond, \
#baseName, \
didata, DevicePitch, \
w, h, \
cpuHist, \
threads, \
baseName, \
cIterations, outfile ) ) { \
printf( "Error\n" ); \
ret = 1; \
goto Error; \
} \
printf( "%s: %.2f Mpix/s\n", \
#baseName, pixelsPerSecond/1e6 ); \
}
if ( w != DevicePitch ) {
printf( "1D versions only work if width and pitch are the same\n" );
}
threads = dim3( 32, 8, 1 );
TEST_VECTOR( GPUhistogramPerGrid, false, 1, NULL );
threads = dim3( 16, 4, 1 );
TEST_VECTOR( GPUhistogramPerBlock, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlock4x, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlockOffset, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlock4xOffset, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlockReduce, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlockReduceOffset, false, 1, NULL );
threads = dim3( 16, 4, 1 );
if ( ! bTesla ) {
TEST_VECTOR( GPUhistogramPerThread64, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerThread4x64, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerThread4x64_PeriodicMerge, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerThread4x32, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerThread4x32_PeriodicMerge, false, 1, NULL );
}
TEST_VECTOR( GPUhistogramNPP, false, 1, NULL );
ret = 0;
Error:
free( hidata );
cudaFree(didata);
cudaFreeArray(pArrayImage);
return ret;
}
|
ecbf8aed416cd35b79e6a0277b5fe59f1b8661fd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#define BDIMX 16
/* This problem is simple once you understand that the initial values are the landID's (laneID = threadIdx.x % 32)
* if a width is given it becomes % width. In this case we are using a width of 16 anf are initial values are
* 0...15 and from here we add the values held in the the srclane's of the remaining half warp plus the offset two.
* The easy way to think about it is (laneID + 2) % 16 + initial value. (16 + 2) % 16 = 2 + 0 = 2 the first result and our last
* is (31 + 2) % 16 = 1 + 15 = 16 are last result. If you change BDIMX to any multiple of two it becomes very clear.
*/
// function for checking the CUDA runtime API results.
inline
void checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess)
{
printf_s("Error: %s : %d", __FILE__, __LINE__);
printf_s("CUDA Runtime Error: %d: %s\n", result, hipGetErrorString(result));
exit(1);
}
#endif
}
void printData(int *data, int isize)
{
for (int i = 0; i < isize; i++)
{
printf_s("%2d ", data[i]);
}
printf_s("\n");
}
__global__ void shfl_wrap(int *d_out, int *d_in)
{
int value = d_in[threadIdx.x];
value +=__shfl(value, threadIdx.x + 2, BDIMX);
d_out[threadIdx.x] = value;
}
int main(int argc, char **argv)
{
int dev = 0;
hipDeviceProp_t deviceProp;
checkCuda(hipGetDeviceProperties(&deviceProp, dev));
printf_s("> %s starting ", argv[0]);
printf_s("> on device %d: %s\n", dev, deviceProp.name);
checkCuda(hipSetDevice(dev));
// set number of elements and the size of the array's
int nElem = BDIMX;
int h_iData[BDIMX], h_oData[BDIMX];
for (int i = 0; i < nElem; i++) h_iData[i] = i;
printf("intial data\t\t: ");
printData(h_iData, nElem);
// configure kernel
int block = BDIMX;
// allocate device memory
size_t nBytes = nElem * sizeof(int);
int *d_iData, *d_oData;
checkCuda(hipMalloc(&d_iData, nBytes));
checkCuda(hipMalloc(&d_oData, nBytes));
// run kernels
checkCuda(hipMemcpy(d_iData, h_iData, nBytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( shfl_wrap) , dim3(1), dim3(block) , 0, 0, d_oData, d_iData);
checkCuda(hipDeviceSynchronize());
checkCuda(hipMemcpy(h_oData, d_oData, nBytes, hipMemcpyDeviceToHost));
printf_s("shfl wrap\t\t: ");
printData(h_oData, nElem);
// free memory
checkCuda(hipFree(d_iData));
checkCuda(hipFree(d_oData));
return EXIT_SUCCESS;
} | ecbf8aed416cd35b79e6a0277b5fe59f1b8661fd.cu | #include <stdio.h>
#include <stdlib.h>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
#define BDIMX 16
/* This problem is simple once you understand that the initial values are the landID's (laneID = threadIdx.x % 32)
* if a width is given it becomes % width. In this case we are using a width of 16 anf are initial values are
* 0...15 and from here we add the values held in the the srclane's of the remaining half warp plus the offset two.
* The easy way to think about it is (laneID + 2) % 16 + initial value. (16 + 2) % 16 = 2 + 0 = 2 the first result and our last
* is (31 + 2) % 16 = 1 + 15 = 16 are last result. If you change BDIMX to any multiple of two it becomes very clear.
*/
// function for checking the CUDA runtime API results.
inline
void checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess)
{
printf_s("Error: %s : %d", __FILE__, __LINE__);
printf_s("CUDA Runtime Error: %d: %s\n", result, cudaGetErrorString(result));
exit(1);
}
#endif
}
void printData(int *data, int isize)
{
for (int i = 0; i < isize; i++)
{
printf_s("%2d ", data[i]);
}
printf_s("\n");
}
__global__ void shfl_wrap(int *d_out, int *d_in)
{
int value = d_in[threadIdx.x];
value +=__shfl(value, threadIdx.x + 2, BDIMX);
d_out[threadIdx.x] = value;
}
int main(int argc, char **argv)
{
int dev = 0;
cudaDeviceProp deviceProp;
checkCuda(cudaGetDeviceProperties(&deviceProp, dev));
printf_s("> %s starting ", argv[0]);
printf_s("> on device %d: %s\n", dev, deviceProp.name);
checkCuda(cudaSetDevice(dev));
// set number of elements and the size of the array's
int nElem = BDIMX;
int h_iData[BDIMX], h_oData[BDIMX];
for (int i = 0; i < nElem; i++) h_iData[i] = i;
printf("intial data\t\t: ");
printData(h_iData, nElem);
// configure kernel
int block = BDIMX;
// allocate device memory
size_t nBytes = nElem * sizeof(int);
int *d_iData, *d_oData;
checkCuda(cudaMalloc(&d_iData, nBytes));
checkCuda(cudaMalloc(&d_oData, nBytes));
// run kernels
checkCuda(cudaMemcpy(d_iData, h_iData, nBytes, cudaMemcpyHostToDevice));
shfl_wrap <<<1, block >>> (d_oData, d_iData);
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaMemcpy(h_oData, d_oData, nBytes, cudaMemcpyDeviceToHost));
printf_s("shfl wrap\t\t: ");
printData(h_oData, nElem);
// free memory
checkCuda(cudaFree(d_iData));
checkCuda(cudaFree(d_oData));
return EXIT_SUCCESS;
} |
14a7b73a6ac08080044b7b2a967913c1199d5d6e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <float.h>
#include <algorithm>
#include <vector>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "db.h"
#include "search.h"
#ifdef PROFILE_CUDA
#define PROFILE_ON
#endif
#include "profile.h"
/* Use device 0 */
#define DEV_ID 0
typedef struct _ipoint_essence_t {
float vec[VEC_DIM] __attribute__((aligned (4)));
} __attribute__((packed)) ipoint_essence_t;
/* FIXME: The result is a bit different from CPU's */
__global__ void doSearchKernel (int shared_mem_size, int needle_idx,
ipoint_essence_t *needle, int needle_size,
ipoint_t *haystack, int haystack_size,
struct _interim *interim, int interim_size_local)
{
if (threadIdx.x + needle_idx >= needle_size)
return;
register float dist;//, temp;
int i, j, k;
int batch;
struct _interim *interim_local =
&(interim[(interim_size_local * blockIdx.x)
+ threadIdx.x + needle_idx]);
batch = haystack_size / gridDim.x;
int haystack_size_local = ((blockIdx.x + 1) * batch) > haystack_size ?
(haystack_size % batch) : batch;
/* Copy needle into local memory */
ipoint_essence_t needle_local;
for (i = 0; i < VEC_DIM; i++)
needle_local.vec[i] = needle[threadIdx.x + needle_idx].vec[i];
struct _interim interim_temp;
if (interim_local->lat_first == 0) {
interim_temp.dist_first = FLT_MAX;
interim_temp.dist_second = FLT_MAX;
}
else {
interim_temp.dist_first = interim_local->dist_first;
interim_temp.dist_second = interim_local->dist_second;
interim_temp.lat_first = interim_local->lat_first;
interim_temp.lng_first = interim_local->lng_first;
}
extern __shared__ ipoint_t haystack_shared[];
batch = shared_mem_size / sizeof(ipoint_t);
int iter;
for (k = 0; k <= (haystack_size_local / batch); k++) {
iter = ((k + 1) * batch) > haystack_size_local ?
(haystack_size_local % batch) : batch;
/* Copy haystack into shared memory */
if (threadIdx.x == 0)
for (i = 0; i < iter; i++)
haystack_shared[i] =
haystack[((haystack_size / gridDim.x) * blockIdx.x)
+ (k * batch) + i];
__syncthreads();
for (i = 0; i < iter; i++) {
dist = 0;
#if REG >= 128
register float hss[0x10];
for (j = 0; j < VEC_DIM; j += 0x10)
#else
register float hss[0x8];
for (j = 0; j < VEC_DIM; j += 0x8)
#endif
{
hss[0x0] = haystack_shared[i].vec[j];
hss[0x1] = haystack_shared[i].vec[j + 0x1];
hss[0x2] = haystack_shared[i].vec[j + 0x2];
hss[0x3] = haystack_shared[i].vec[j + 0x3];
hss[0x4] = haystack_shared[i].vec[j + 0x4];
hss[0x5] = haystack_shared[i].vec[j + 0x5];
hss[0x6] = haystack_shared[i].vec[j + 0x6];
hss[0x7] = haystack_shared[i].vec[j + 0x7];
#if REG >= 128
hss[0x8] = haystack_shared[i].vec[j + 0x8];
hss[0x9] = haystack_shared[i].vec[j + 0x9];
hss[0xA] = haystack_shared[i].vec[j + 0xA];
hss[0xB] = haystack_shared[i].vec[j + 0xB];
hss[0xC] = haystack_shared[i].vec[j + 0xC];
hss[0xD] = haystack_shared[i].vec[j + 0xD];
hss[0xE] = haystack_shared[i].vec[j + 0xE];
hss[0xF] = haystack_shared[i].vec[j + 0xF];
#endif
dist
+= ((needle_local.vec[j] - hss[0x0])
* (needle_local.vec[j] - hss[0x0]))
+ ((needle_local.vec[j + 0x1] - hss[0x1])
* (needle_local.vec[j + 0x1] - hss[0x1]))
+ ((needle_local.vec[j + 0x2] - hss[0x2])
* (needle_local.vec[j + 0x2] - hss[0x2]))
+ ((needle_local.vec[j + 0x3] - hss[0x3])
* (needle_local.vec[j + 0x3] - hss[0x3]))
+ ((needle_local.vec[j + 0x4] - hss[0x4])
* (needle_local.vec[j + 0x4] - hss[0x4]))
+ ((needle_local.vec[j + 0x5] - hss[0x5])
* (needle_local.vec[j + 0x5] - hss[0x5]))
+ ((needle_local.vec[j + 0x6] - hss[0x6])
* (needle_local.vec[j + 0x6] - hss[0x6]))
+ ((needle_local.vec[j + 0x7] - hss[0x7])
* (needle_local.vec[j + 0x7] - hss[0x7]));
#if REG >= 128
+ ((needle_local.vec[j + 0x8] - hss[0x8])
* (needle_local.vec[j + 0x8] - hss[0x8]))
+ ((needle_local.vec[j + 0x9] - hss[0x9])
* (needle_local.vec[j + 0x9] - hss[0x9]))
+ ((needle_local.vec[j + 0xA] - hss[0xA])
* (needle_local.vec[j + 0xA] - hss[0xA]))
+ ((needle_local.vec[j + 0xB] - hss[0xB])
* (needle_local.vec[j + 0xB] - hss[0xB]))
+ ((needle_local.vec[j + 0xC] - hss[0xC])
* (needle_local.vec[j + 0xC] - hss[0xC]))
+ ((needle_local.vec[j + 0xD] - hss[0xD])
* (needle_local.vec[j + 0xD] - hss[0xD]))
+ ((needle_local.vec[j = 0xE] - hss[0xE])
* (needle_local.vec[j + 0xE] - hss[0xE]))
+ ((needle_local.vec[j + 0xF] - hss[0xF])
* (needle_local.vec[j + 0xF] - hss[0xF]));
#endif
}
if (dist < interim_temp.dist_first) {
interim_temp.lat_first =
haystack_shared[i].latitude;
interim_temp.lng_first =
haystack_shared[i].longitude;
interim_temp.dist_second =
interim_temp.dist_first;
interim_temp.dist_first = dist;
}
else if (dist < interim_temp.dist_second)
interim_temp.dist_second = dist;
}
}
interim_local->lat_first = interim_temp.lat_first;
interim_local->lng_first = interim_temp.lng_first;
interim_local->dist_first = interim_temp.dist_first;
interim_local->dist_second = interim_temp.dist_second;
return;
}
static int doSearch (IpVec *needle, ipoint_t *haystack, int haystack_size,
struct _interim *result, int result_size)
{
hipSetDevice(DEV_ID);
hipSetDeviceFlags(hipDeviceScheduleBlockingSync);
PROFILE_INIT();
PROFILE_START();
PROFILE_VAR(init_device);
PROFILE_VAR(copy_needle);
PROFILE_VAR(copy_haystack);
PROFILE_VAR(run_kernel);
PROFILE_VAR(copy_result);
PROFILE_VAR(post_processing);
int i, j, iter;
float dist;
ipoint_essence_t *needle_essence_h, *needle_essence_d;
ipoint_t *haystack_d;
struct _interim *interim_h, *interim_d;
int needle_size = (*needle).size();
hipError_t err;
PROFILE_FROM(init_device);
#ifdef PROFILE_CUDA
hipDeviceSynchronize();
#endif
PROFILE_TO(init_device);
hipDeviceProp_t device_prop;
hipGetDeviceProperties(&device_prop, DEV_ID);
hipStream_t *stream;
unsigned int stream_dim = (unsigned int)device_prop.multiProcessorCount;
unsigned int grid_dim = 1;
unsigned int block_dim =
MIN(needle_size, (unsigned int)device_prop.maxThreadsPerBlock);
block_dim = MIN(block_dim, (unsigned int)(device_prop.regsPerBlock / REG));
stream = (hipStream_t *)malloc(stream_dim * sizeof(hipStream_t));
for (i = 0; i < (int)stream_dim; i++)
hipStreamCreate(&stream[i]);
needle_essence_h = (ipoint_essence_t *)malloc(
needle_size * sizeof(ipoint_essence_t));
for (i = 0; i < needle_size; i++)
for (j = 0; j < VEC_DIM; j++)
needle_essence_h[i].vec[j] = (*needle)[i].descriptor[j];
PROFILE_FROM(copy_needle);
/* Copy needle to device */
if (hipMalloc((void **)&needle_essence_d,
needle_size * sizeof(ipoint_essence_t)) != hipSuccess) {
fprintf(stderr, "hipMalloc(needle_essence_d) failed\n");
return -1;
}
if (hipMemcpy(needle_essence_d, needle_essence_h,
needle_size * sizeof(ipoint_essence_t),
hipMemcpyHostToDevice) != hipSuccess) {
fprintf(stderr,
"hipMemcpy(needle_essence_d, needle_essence_h) failed\n");
return -1;
}
#ifdef PROFILE_CUDA
hipDeviceSynchronize();
#endif
PROFILE_TO(copy_needle);
PROFILE_FROM(copy_haystack);
/* Copy haystack to device */
if (hipMalloc((void **)&haystack_d,
haystack_size * sizeof(ipoint_t)) != hipSuccess) {
fprintf(stderr, "hipMalloc(haystack_d) failed\n");
return -1;
}
#ifdef PROFILE_CUDA
hipDeviceSynchronize();
#endif
PROFILE_TO(copy_haystack);
/* Allocate memory for result
* TODO: Still the result must be copied from device is about
* hundreads of MB. Need to reduce them. */
if (hipMalloc((void **)&interim_d,
grid_dim * stream_dim * sizeof(struct _interim) * needle_size) != hipSuccess) {
fprintf(stderr, "hipMalloc(interim_d) failed\n");
return -1;
}
if (hipMemset(interim_d, 0,
grid_dim * stream_dim * sizeof(struct _interim) * needle_size) != hipSuccess) {
fprintf(stderr, "hipMemset(interim_d) failed\n");
return -1;
}
interim_h = (struct _interim *)malloc(
grid_dim * stream_dim * sizeof(struct _interim) * needle_size);
int stream_haystack_quota = haystack_size / stream_dim;
int stream_haystack_size;
for (j = 0; j < (int)stream_dim; j++) {
stream_haystack_size
= (j + 1) * stream_haystack_quota > haystack_size ?
(haystack_size % stream_haystack_quota) : stream_haystack_quota;
if (hipMemcpyAsync(
(ipoint_t *)(&haystack_d[stream_haystack_quota * j]),
(ipoint_t *)(&haystack[stream_haystack_quota * j]),
stream_haystack_size * sizeof(ipoint_t),
hipMemcpyHostToDevice, stream[j]) != hipSuccess) {
fprintf(stderr, "hipMemcpy(haystack_d, haystack) failed\n");
return -1;
}
}
for (i = 0; i <= needle_size / block_dim; i++) {
PROFILE_FROM(run_kernel);
/* Run CUDA kernel */
for (j = 0; j < (int)stream_dim; j++) {
stream_haystack_size
= (j + 1) * stream_haystack_quota > haystack_size ?
(haystack_size % stream_haystack_quota) : stream_haystack_quota;
hipLaunchKernelGGL(( doSearchKernel) ,
dim3( grid_dim),
(block_dim * (i + 1)) > needle_size ?
(needle_size % block_dim) : block_dim,
device_prop.sharedMemPerBlock,
stream[j] ,
device_prop.sharedMemPerBlock, i * block_dim,
needle_essence_d, needle_size,
(ipoint_t *)(&haystack_d[stream_haystack_quota * j]),
stream_haystack_size,
&interim_d[needle_size * j], needle_size);
}
#ifdef PROFILE_CUDA
hipDeviceSynchronize();
#endif
PROFILE_TO(run_kernel);
}
PROFILE_FROM(copy_result);
/* Copy result to host */
err = hipMemcpy(interim_h, interim_d,
grid_dim * stream_dim * sizeof(struct _interim) * needle_size,
hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "hipMemcpy(interim_h, interim_d): %s\n",
hipGetErrorString(err));
return -1;
}
#ifdef PROFILE_CUDA
hipDeviceSynchronize();
#endif
PROFILE_TO(copy_result);
PROFILE_FROM(post_processing);
iter = MIN((int)(*needle).size(), result_size);
for (i = 0; i < iter; i++) {
for (j = 0; j < (int)(grid_dim * stream_dim); j++) {
if (result[i].dist_first == FLT_MAX) {
result[i].lat_first =
interim_h[(j * needle_size) + i].lat_first;
result[i].lng_first =
interim_h[(j * needle_size) + i].lng_first;
result[i].dist_first =
interim_h[(j * needle_size) + i].dist_first;
result[i].dist_second =
interim_h[(j * needle_size) + i].dist_second;
continue;
}
dist = interim_h[(j * needle_size) + i].dist_first;
if (dist < result[i].dist_first) {
result[i].lat_first =
interim_h[(j * needle_size) + i].lat_first;
result[i].lng_first =
interim_h[(j * needle_size) + i].lng_first;
result[i].dist_second = result[i].dist_first;
result[i].dist_first = dist;
}
else if (dist < result[i].dist_second)
result[i].dist_second = dist;
dist = interim_h[(j * needle_size) + i].dist_second;
if (dist < result[i].dist_first) {
result[i].lat_first =
interim_h[(j * needle_size) + i].lat_first;
result[i].lng_first =
interim_h[(j * needle_size) + i].lng_first;
result[i].dist_second = result[i].dist_first;
result[i].dist_first = dist;
}
else if (dist < result[i].dist_second)
result[i].dist_second = dist;
}
}
PROFILE_TO(post_processing);
free(needle_essence_h);
free(interim_h);
hipFree(needle_essence_d);
hipFree(haystack_d);
hipFree(interim_d);
PROFILE_END();
PROFILE_PRINT(stdout);
return 0;
}
void *search_gpu_main(void *arg)
{
worker_t *me = (worker_t *)arg;
msg_t msg;
while (1) {
if (me->dead)
break;
msg_read(&me->msgbox, &msg);
task_t *task = (task_t *)msg.content;
doSearch(&task->needle, task->haystack, task->haystack_size,
task->result, (task->needle).size());
me->isbusy = false;
db_release(me->db,
task->haystack, task->haystack_size * sizeof(ipoint_t));
pthread_cond_signal(me->cd_wait_worker);
}
return NULL;
}
| 14a7b73a6ac08080044b7b2a967913c1199d5d6e.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <float.h>
#include <algorithm>
#include <vector>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include "db.h"
#include "search.h"
#ifdef PROFILE_CUDA
#define PROFILE_ON
#endif
#include "profile.h"
/* Use device 0 */
#define DEV_ID 0
typedef struct _ipoint_essence_t {
float vec[VEC_DIM] __attribute__((aligned (4)));
} __attribute__((packed)) ipoint_essence_t;
/* FIXME: The result is a bit different from CPU's */
__global__ void doSearchKernel (int shared_mem_size, int needle_idx,
ipoint_essence_t *needle, int needle_size,
ipoint_t *haystack, int haystack_size,
struct _interim *interim, int interim_size_local)
{
if (threadIdx.x + needle_idx >= needle_size)
return;
register float dist;//, temp;
int i, j, k;
int batch;
struct _interim *interim_local =
&(interim[(interim_size_local * blockIdx.x)
+ threadIdx.x + needle_idx]);
batch = haystack_size / gridDim.x;
int haystack_size_local = ((blockIdx.x + 1) * batch) > haystack_size ?
(haystack_size % batch) : batch;
/* Copy needle into local memory */
ipoint_essence_t needle_local;
for (i = 0; i < VEC_DIM; i++)
needle_local.vec[i] = needle[threadIdx.x + needle_idx].vec[i];
struct _interim interim_temp;
if (interim_local->lat_first == 0) {
interim_temp.dist_first = FLT_MAX;
interim_temp.dist_second = FLT_MAX;
}
else {
interim_temp.dist_first = interim_local->dist_first;
interim_temp.dist_second = interim_local->dist_second;
interim_temp.lat_first = interim_local->lat_first;
interim_temp.lng_first = interim_local->lng_first;
}
extern __shared__ ipoint_t haystack_shared[];
batch = shared_mem_size / sizeof(ipoint_t);
int iter;
for (k = 0; k <= (haystack_size_local / batch); k++) {
iter = ((k + 1) * batch) > haystack_size_local ?
(haystack_size_local % batch) : batch;
/* Copy haystack into shared memory */
if (threadIdx.x == 0)
for (i = 0; i < iter; i++)
haystack_shared[i] =
haystack[((haystack_size / gridDim.x) * blockIdx.x)
+ (k * batch) + i];
__syncthreads();
for (i = 0; i < iter; i++) {
dist = 0;
#if REG >= 128
register float hss[0x10];
for (j = 0; j < VEC_DIM; j += 0x10)
#else
register float hss[0x8];
for (j = 0; j < VEC_DIM; j += 0x8)
#endif
{
hss[0x0] = haystack_shared[i].vec[j];
hss[0x1] = haystack_shared[i].vec[j + 0x1];
hss[0x2] = haystack_shared[i].vec[j + 0x2];
hss[0x3] = haystack_shared[i].vec[j + 0x3];
hss[0x4] = haystack_shared[i].vec[j + 0x4];
hss[0x5] = haystack_shared[i].vec[j + 0x5];
hss[0x6] = haystack_shared[i].vec[j + 0x6];
hss[0x7] = haystack_shared[i].vec[j + 0x7];
#if REG >= 128
hss[0x8] = haystack_shared[i].vec[j + 0x8];
hss[0x9] = haystack_shared[i].vec[j + 0x9];
hss[0xA] = haystack_shared[i].vec[j + 0xA];
hss[0xB] = haystack_shared[i].vec[j + 0xB];
hss[0xC] = haystack_shared[i].vec[j + 0xC];
hss[0xD] = haystack_shared[i].vec[j + 0xD];
hss[0xE] = haystack_shared[i].vec[j + 0xE];
hss[0xF] = haystack_shared[i].vec[j + 0xF];
#endif
dist
+= ((needle_local.vec[j] - hss[0x0])
* (needle_local.vec[j] - hss[0x0]))
+ ((needle_local.vec[j + 0x1] - hss[0x1])
* (needle_local.vec[j + 0x1] - hss[0x1]))
+ ((needle_local.vec[j + 0x2] - hss[0x2])
* (needle_local.vec[j + 0x2] - hss[0x2]))
+ ((needle_local.vec[j + 0x3] - hss[0x3])
* (needle_local.vec[j + 0x3] - hss[0x3]))
+ ((needle_local.vec[j + 0x4] - hss[0x4])
* (needle_local.vec[j + 0x4] - hss[0x4]))
+ ((needle_local.vec[j + 0x5] - hss[0x5])
* (needle_local.vec[j + 0x5] - hss[0x5]))
+ ((needle_local.vec[j + 0x6] - hss[0x6])
* (needle_local.vec[j + 0x6] - hss[0x6]))
+ ((needle_local.vec[j + 0x7] - hss[0x7])
* (needle_local.vec[j + 0x7] - hss[0x7]));
#if REG >= 128
+ ((needle_local.vec[j + 0x8] - hss[0x8])
* (needle_local.vec[j + 0x8] - hss[0x8]))
+ ((needle_local.vec[j + 0x9] - hss[0x9])
* (needle_local.vec[j + 0x9] - hss[0x9]))
+ ((needle_local.vec[j + 0xA] - hss[0xA])
* (needle_local.vec[j + 0xA] - hss[0xA]))
+ ((needle_local.vec[j + 0xB] - hss[0xB])
* (needle_local.vec[j + 0xB] - hss[0xB]))
+ ((needle_local.vec[j + 0xC] - hss[0xC])
* (needle_local.vec[j + 0xC] - hss[0xC]))
+ ((needle_local.vec[j + 0xD] - hss[0xD])
* (needle_local.vec[j + 0xD] - hss[0xD]))
+ ((needle_local.vec[j = 0xE] - hss[0xE])
* (needle_local.vec[j + 0xE] - hss[0xE]))
+ ((needle_local.vec[j + 0xF] - hss[0xF])
* (needle_local.vec[j + 0xF] - hss[0xF]));
#endif
}
if (dist < interim_temp.dist_first) {
interim_temp.lat_first =
haystack_shared[i].latitude;
interim_temp.lng_first =
haystack_shared[i].longitude;
interim_temp.dist_second =
interim_temp.dist_first;
interim_temp.dist_first = dist;
}
else if (dist < interim_temp.dist_second)
interim_temp.dist_second = dist;
}
}
interim_local->lat_first = interim_temp.lat_first;
interim_local->lng_first = interim_temp.lng_first;
interim_local->dist_first = interim_temp.dist_first;
interim_local->dist_second = interim_temp.dist_second;
return;
}
static int doSearch (IpVec *needle, ipoint_t *haystack, int haystack_size,
struct _interim *result, int result_size)
{
cudaSetDevice(DEV_ID);
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
PROFILE_INIT();
PROFILE_START();
PROFILE_VAR(init_device);
PROFILE_VAR(copy_needle);
PROFILE_VAR(copy_haystack);
PROFILE_VAR(run_kernel);
PROFILE_VAR(copy_result);
PROFILE_VAR(post_processing);
int i, j, iter;
float dist;
ipoint_essence_t *needle_essence_h, *needle_essence_d;
ipoint_t *haystack_d;
struct _interim *interim_h, *interim_d;
int needle_size = (*needle).size();
cudaError_t err;
PROFILE_FROM(init_device);
#ifdef PROFILE_CUDA
cudaDeviceSynchronize();
#endif
PROFILE_TO(init_device);
cudaDeviceProp device_prop;
cudaGetDeviceProperties(&device_prop, DEV_ID);
cudaStream_t *stream;
unsigned int stream_dim = (unsigned int)device_prop.multiProcessorCount;
unsigned int grid_dim = 1;
unsigned int block_dim =
MIN(needle_size, (unsigned int)device_prop.maxThreadsPerBlock);
block_dim = MIN(block_dim, (unsigned int)(device_prop.regsPerBlock / REG));
stream = (cudaStream_t *)malloc(stream_dim * sizeof(cudaStream_t));
for (i = 0; i < (int)stream_dim; i++)
cudaStreamCreate(&stream[i]);
needle_essence_h = (ipoint_essence_t *)malloc(
needle_size * sizeof(ipoint_essence_t));
for (i = 0; i < needle_size; i++)
for (j = 0; j < VEC_DIM; j++)
needle_essence_h[i].vec[j] = (*needle)[i].descriptor[j];
PROFILE_FROM(copy_needle);
/* Copy needle to device */
if (cudaMalloc((void **)&needle_essence_d,
needle_size * sizeof(ipoint_essence_t)) != cudaSuccess) {
fprintf(stderr, "cudaMalloc(needle_essence_d) failed\n");
return -1;
}
if (cudaMemcpy(needle_essence_d, needle_essence_h,
needle_size * sizeof(ipoint_essence_t),
cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr,
"cudaMemcpy(needle_essence_d, needle_essence_h) failed\n");
return -1;
}
#ifdef PROFILE_CUDA
cudaDeviceSynchronize();
#endif
PROFILE_TO(copy_needle);
PROFILE_FROM(copy_haystack);
/* Copy haystack to device */
if (cudaMalloc((void **)&haystack_d,
haystack_size * sizeof(ipoint_t)) != cudaSuccess) {
fprintf(stderr, "cudaMalloc(haystack_d) failed\n");
return -1;
}
#ifdef PROFILE_CUDA
cudaDeviceSynchronize();
#endif
PROFILE_TO(copy_haystack);
/* Allocate memory for result
* TODO: Still the result must be copied from device is about
* hundreads of MB. Need to reduce them. */
if (cudaMalloc((void **)&interim_d,
grid_dim * stream_dim * sizeof(struct _interim) * needle_size) != cudaSuccess) {
fprintf(stderr, "cudaMalloc(interim_d) failed\n");
return -1;
}
if (cudaMemset(interim_d, 0,
grid_dim * stream_dim * sizeof(struct _interim) * needle_size) != cudaSuccess) {
fprintf(stderr, "cudaMemset(interim_d) failed\n");
return -1;
}
interim_h = (struct _interim *)malloc(
grid_dim * stream_dim * sizeof(struct _interim) * needle_size);
int stream_haystack_quota = haystack_size / stream_dim;
int stream_haystack_size;
for (j = 0; j < (int)stream_dim; j++) {
stream_haystack_size
= (j + 1) * stream_haystack_quota > haystack_size ?
(haystack_size % stream_haystack_quota) : stream_haystack_quota;
if (cudaMemcpyAsync(
(ipoint_t *)(&haystack_d[stream_haystack_quota * j]),
(ipoint_t *)(&haystack[stream_haystack_quota * j]),
stream_haystack_size * sizeof(ipoint_t),
cudaMemcpyHostToDevice, stream[j]) != cudaSuccess) {
fprintf(stderr, "cudaMemcpy(haystack_d, haystack) failed\n");
return -1;
}
}
for (i = 0; i <= needle_size / block_dim; i++) {
PROFILE_FROM(run_kernel);
/* Run CUDA kernel */
for (j = 0; j < (int)stream_dim; j++) {
stream_haystack_size
= (j + 1) * stream_haystack_quota > haystack_size ?
(haystack_size % stream_haystack_quota) : stream_haystack_quota;
doSearchKernel <<<
grid_dim,
(block_dim * (i + 1)) > needle_size ?
(needle_size % block_dim) : block_dim,
device_prop.sharedMemPerBlock,
stream[j] >>>
(device_prop.sharedMemPerBlock, i * block_dim,
needle_essence_d, needle_size,
(ipoint_t *)(&haystack_d[stream_haystack_quota * j]),
stream_haystack_size,
&interim_d[needle_size * j], needle_size);
}
#ifdef PROFILE_CUDA
cudaDeviceSynchronize();
#endif
PROFILE_TO(run_kernel);
}
PROFILE_FROM(copy_result);
/* Copy result to host */
err = cudaMemcpy(interim_h, interim_d,
grid_dim * stream_dim * sizeof(struct _interim) * needle_size,
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "cudaMemcpy(interim_h, interim_d): %s\n",
cudaGetErrorString(err));
return -1;
}
#ifdef PROFILE_CUDA
cudaDeviceSynchronize();
#endif
PROFILE_TO(copy_result);
PROFILE_FROM(post_processing);
iter = MIN((int)(*needle).size(), result_size);
for (i = 0; i < iter; i++) {
for (j = 0; j < (int)(grid_dim * stream_dim); j++) {
if (result[i].dist_first == FLT_MAX) {
result[i].lat_first =
interim_h[(j * needle_size) + i].lat_first;
result[i].lng_first =
interim_h[(j * needle_size) + i].lng_first;
result[i].dist_first =
interim_h[(j * needle_size) + i].dist_first;
result[i].dist_second =
interim_h[(j * needle_size) + i].dist_second;
continue;
}
dist = interim_h[(j * needle_size) + i].dist_first;
if (dist < result[i].dist_first) {
result[i].lat_first =
interim_h[(j * needle_size) + i].lat_first;
result[i].lng_first =
interim_h[(j * needle_size) + i].lng_first;
result[i].dist_second = result[i].dist_first;
result[i].dist_first = dist;
}
else if (dist < result[i].dist_second)
result[i].dist_second = dist;
dist = interim_h[(j * needle_size) + i].dist_second;
if (dist < result[i].dist_first) {
result[i].lat_first =
interim_h[(j * needle_size) + i].lat_first;
result[i].lng_first =
interim_h[(j * needle_size) + i].lng_first;
result[i].dist_second = result[i].dist_first;
result[i].dist_first = dist;
}
else if (dist < result[i].dist_second)
result[i].dist_second = dist;
}
}
PROFILE_TO(post_processing);
free(needle_essence_h);
free(interim_h);
cudaFree(needle_essence_d);
cudaFree(haystack_d);
cudaFree(interim_d);
PROFILE_END();
PROFILE_PRINT(stdout);
return 0;
}
void *search_gpu_main(void *arg)
{
worker_t *me = (worker_t *)arg;
msg_t msg;
while (1) {
if (me->dead)
break;
msg_read(&me->msgbox, &msg);
task_t *task = (task_t *)msg.content;
doSearch(&task->needle, task->haystack, task->haystack_size,
task->result, (task->needle).size());
me->isbusy = false;
db_release(me->db,
task->haystack, task->haystack_size * sizeof(ipoint_t));
pthread_cond_signal(me->cd_wait_worker);
}
return NULL;
}
|
8ccc5e0459a6c0ef341ab6f5d11433f198adbec4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#define N 8
#define intswap(A,B) {int temp=A;A=B;B=temp;}
__global__ void sort(int *c)
{
__shared__ int shared[N];
int i = threadIdx.x;
shared[i] = c[i];
__syncthreads();
for(int k=2;k<=8;k*=2){
for(int j=k/2;j>0;j/=2){
int xorres = i^j;
if(xorres>i){
if((i&k) == 0){
if(shared[i]>shared[xorres])
intswap(shared[i],shared[xorres]);
}
else{
if(shared[i]<shared[xorres])
intswap(shared[i],shared[xorres]);
}
}
__syncthreads();
}
}
c[i] = shared[i];
}
int main(){
int a[N] = {6,1,2,5,3,4,7,9};
int b[N];
int n = N;
printf("ORIGINAL ARRAY : \n");
for(int i=0;i<n;i++)
printf("%d ",a[i]);
int *c;
hipMalloc((void**)&c,sizeof(int)*N);
hipMemcpy(c,&a,sizeof(int)*N,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sort), dim3(1),dim3(N) , 0, 0, c);
10
hipMemcpy(b,c,sizeof(int)*N,hipMemcpyDeviceToHost);
printf("\nSORTED ARRAY : \n");
for(int i=0;i<N;i++)
printf("%d ",b[i]);
printf("\n");
hipFree(c);
}
| 8ccc5e0459a6c0ef341ab6f5d11433f198adbec4.cu | #include<stdio.h>
#define N 8
#define intswap(A,B) {int temp=A;A=B;B=temp;}
__global__ void sort(int *c)
{
__shared__ int shared[N];
int i = threadIdx.x;
shared[i] = c[i];
__syncthreads();
for(int k=2;k<=8;k*=2){
for(int j=k/2;j>0;j/=2){
int xorres = i^j;
if(xorres>i){
if((i&k) == 0){
if(shared[i]>shared[xorres])
intswap(shared[i],shared[xorres]);
}
else{
if(shared[i]<shared[xorres])
intswap(shared[i],shared[xorres]);
}
}
__syncthreads();
}
}
c[i] = shared[i];
}
int main(){
int a[N] = {6,1,2,5,3,4,7,9};
int b[N];
int n = N;
printf("ORIGINAL ARRAY : \n");
for(int i=0;i<n;i++)
printf("%d ",a[i]);
int *c;
cudaMalloc((void**)&c,sizeof(int)*N);
cudaMemcpy(c,&a,sizeof(int)*N,cudaMemcpyHostToDevice);
sort<<< 1,N >>>(c);
10
cudaMemcpy(b,c,sizeof(int)*N,cudaMemcpyDeviceToHost);
printf("\nSORTED ARRAY : \n");
for(int i=0;i<N;i++)
printf("%d ",b[i]);
printf("\n");
cudaFree(c);
}
|
a60e7c67a700f80303df47985545b9faba3ff1b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
int stride = blockDim.x*gridDim.x;
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
// int i = blockIdx.x*blockDim.x+threadIdx.x;
//if(i<n){
//}
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
int blockSize = 256;
int numBlocks = ( N + blockSize -1)/blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
} | a60e7c67a700f80303df47985545b9faba3ff1b6.cu | #include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
int stride = blockDim.x*gridDim.x;
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
// int i = blockIdx.x*blockDim.x+threadIdx.x;
//if(i<n){
//}
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
int blockSize = 256;
int numBlocks = ( N + blockSize -1)/blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
} |
cb27b11a49b56471dad546e61dd285913f89f56f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S2_6.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5952182591768,0.00128266400523176,0.780370393090429,0.780208222766858,0.000174041905078485,0.485370727173588,0.00293466121399432,0.999998357055344,1.92482840573537e-08,1.88428105751378e-05,0.999770837182767,1.00699532179645,0.999993733315635,4.75139548173797e-05,0.266377866651071,10.2975786179389,139.536672800382};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.5369194152843,0.000421161732329444,0.000123555730992675,0.000438546024943873,0.268273630830681,0.123585165023946,0.171035514336793,5.02847725301225,0.0110176202871206,1.84752137000130,1095.52052508604,0.000393152126659795,0.528629865494676,0.00975540076461500,0.00491948125354052,8.11442676720905e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| cb27b11a49b56471dad546e61dd285913f89f56f.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S2_6.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5952182591768,0.00128266400523176,0.780370393090429,0.780208222766858,0.000174041905078485,0.485370727173588,0.00293466121399432,0.999998357055344,1.92482840573537e-08,1.88428105751378e-05,0.999770837182767,1.00699532179645,0.999993733315635,4.75139548173797e-05,0.266377866651071,10.2975786179389,139.536672800382};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.5369194152843,0.000421161732329444,0.000123555730992675,0.000438546024943873,0.268273630830681,0.123585165023946,0.171035514336793,5.02847725301225,0.0110176202871206,1.84752137000130,1095.52052508604,0.000393152126659795,0.528629865494676,0.00975540076461500,0.00491948125354052,8.11442676720905e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
43e0a387a3b199abd6adff0eecdb410980e7e444.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "src/cuda/query_blocksize.cuh"
#include "src/cuda/relayout_format/relayout_format_kern.cuh"
using namespace megdnn;
using namespace cuda;
using namespace relayout_format;
using namespace internal;
namespace {
template <int pack_w>
struct rwtype_helper;
template <>
struct rwtype_helper<2> {
using InnerDtype = char;
};
template <>
struct rwtype_helper<8> {
using InnerDtype = unsigned;
};
} // namespace
void relayout_format::relayout_format_cuda_nchw_nhwc(
const TensorND& src, const TensorND& dst, const hipStream_t& stream,
const float src_scale, const float dst_scale, const uint8_t src_zero_point,
const uint8_t dst_zero_point) {
auto&& stype = src.layout.dtype;
auto&& dtype = dst.layout.dtype;
auto& src_layout = src.layout;
auto& dst_layout = dst.layout;
int n = src.layout[0];
int ic = src.layout[1];
int h = src.layout[2];
int w = src.layout[3];
int w_pad = DIVUP(w, 2) * 2;
int hw = h * w_pad;
int n_stride_src = src_layout.stride[0];
int ic_stride = src_layout.stride[1];
int n_stride_dst = dst_layout.stride[0];
int hw_stride = dst_layout.stride[2];
static constexpr int chan_blk = 8;
static constexpr int pack_oc = 8;
int problem_size = n * DIVUP(ic, chan_blk) * hw;
int oc = dst.layout[3];
bool same_scale = src_scale == dst_scale;
bool padding = w % 2 != 0;
#define DISPATCH_RAW( \
_padding, _same_scale, _pack_w, _src_type, _dst_type, _src_c_type, \
_dst_c_type, _size_nbits) \
if (padding == _padding && same_scale == _same_scale && hw % _pack_w == 0 && \
stype.enumv().ev == DTypeEnum::Ev::_src_type && \
dtype.enumv().ev == DTypeEnum::Ev::_dst_type) { \
using InnerDtype_ = typename rwtype_helper<_pack_w>::InnerDtype; \
using SrcIterator_ = TensorIteratorOverChannel< \
InnerDtype_, 1, chan_blk, _pack_w, _size_nbits>; \
using DstIterator_ = typename TensorIteratorPolicy< \
_padding, _dst_c_type, pack_oc, chan_blk, _pack_w, _size_nbits, \
LayoutType::NHWC>::TensorIterator; \
using CudaPostProcess_ = \
CudaPostProcess<dtype::_src_type, dtype::_dst_type, _same_scale>; \
using Transpose_ = Translayout< \
_pack_w, chan_blk, InnerDtype_, dtype::_src_type, dtype::_dst_type, \
_same_scale>; \
using RelayoutProblem_ = RelayoutProblem< \
SrcIterator_, DstIterator_, Transpose_, CudaPostProcess_>; \
n_stride_src = n_stride_src * _size_nbits / (8 * sizeof(InnerDtype_)); \
ic_stride = ic_stride * _size_nbits / (8 * sizeof(InnerDtype_)); \
n_stride_dst = n_stride_dst * _size_nbits / (8 * sizeof(_dst_c_type)); \
hw_stride = hw_stride * _size_nbits / (8 * sizeof(_dst_c_type)); \
typename RelayoutProblem_::Param param{ \
SrcIterator_{(InnerDtype_*)src.raw_ptr(), ic_stride, ic, w, w_pad}, \
DstIterator_{(_dst_c_type*)dst.raw_ptr(), hw_stride, oc, w, w_pad}, \
CudaPostProcess_{ \
src_scale, src_zero_point, dst_scale, dst_zero_point}, \
n_stride_src, \
n_stride_dst, \
n, \
ic, \
hw, \
src_zero_point}; \
auto kernel = relayout_kern<RelayoutProblem_>; \
int nr_threads = query_blocksize_for_kernel(kernel); \
nr_threads = ::min(nr_threads, DIVUP(problem_size, _pack_w)); \
const dim3 block_dim(DIVUP(problem_size, nr_threads* _pack_w)); \
const dim3 thread_dim(nr_threads); \
returnhipLaunchKernelGGL(( kernel), dim3(block_dim), dim3(thread_dim), 0, stream, param); \
}
#define DISPATCH_4BITS(_src_type, _dst_type) \
DISPATCH_RAW(true, true, 8, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(true, false, 8, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(true, true, 2, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(true, false, 2, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(false, true, 8, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(false, false, 8, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(false, true, 2, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(false, false, 2, _src_type, _dst_type, char, char, 4);
DISPATCH_4BITS(QuantizedS4, QuantizedS4);
DISPATCH_4BITS(Quantized4Asymm, Quantized4Asymm);
#undef DISPATCH_4BITS
#undef DISPATCH_RAW
megdnn_assert(
false, "Unsupported data type(src:%s, dst:%s) or image size(%dx%d).",
stype.name(), dtype.name(), h, w);
}
void relayout_format::relayout_format_cuda_nhwc_nchw(
const TensorND& src, const TensorND& dst, const hipStream_t& stream,
const float src_scale, const float dst_scale, const uint8_t src_zero_point,
const uint8_t dst_zero_point) {
auto&& stype = src.layout.dtype;
auto&& dtype = dst.layout.dtype;
auto& src_layout = src.layout;
auto& dst_layout = dst.layout;
int n = src.layout[0];
int h = src.layout[1];
int w = src.layout[2];
int ic = src.layout[3];
int w_pad = DIVUP(w, 2) * 2;
int hw = h * w_pad;
int n_stride_src = src_layout.stride[0];
int hw_stride = src_layout.stride[2];
int n_stride_dst = dst_layout.stride[0];
int oc_stride = dst_layout.stride[1];
static constexpr int chan_blk = 8;
static constexpr int pack_oc = 8;
int problem_size = n * DIVUP(ic, chan_blk) * hw;
int oc = dst.layout[1];
bool same_scale = src_scale == dst_scale;
bool padding = w % 2 != 0;
#define DISPATCH_RAW( \
_padding, _same_scale, _pack_w, _src_type, _dst_type, _src_c_type, \
_dst_c_type, _size_nbits) \
if (padding == _padding && same_scale == _same_scale && hw % _pack_w == 0 && \
stype.enumv().ev == DTypeEnum::Ev::_src_type && \
dtype.enumv().ev == DTypeEnum::Ev::_dst_type) { \
using SrcIterator_ = typename TensorIteratorPolicy< \
_padding, _src_c_type, pack_oc, chan_blk, _pack_w, _size_nbits, \
LayoutType::NHWC>::TensorIterator; \
using InnerDtype_ = typename rwtype_helper<_pack_w>::InnerDtype; \
using DstIterator_ = TensorIteratorOverChannel< \
InnerDtype_, 1, chan_blk, _pack_w, _size_nbits>; \
using CudaPostProcess_ = \
CudaPostProcess<dtype::_src_type, dtype::_dst_type, _same_scale>; \
using Transpose_ = Translayout< \
chan_blk, _pack_w, _src_c_type, dtype::_src_type, dtype::_dst_type, \
_same_scale>; \
using RelayoutProblem_ = RelayoutProblem< \
SrcIterator_, DstIterator_, Transpose_, CudaPostProcess_>; \
n_stride_src = n_stride_src * _size_nbits / (8 * sizeof(_src_c_type)); \
hw_stride = hw_stride * _size_nbits / (8 * sizeof(_src_c_type)); \
n_stride_dst = n_stride_dst * _size_nbits / (8 * sizeof(InnerDtype_)); \
oc_stride = oc_stride * _size_nbits / (8 * sizeof(InnerDtype_)); \
typename RelayoutProblem_::Param param{ \
SrcIterator_{(_src_c_type*)src.raw_ptr(), hw_stride, ic, w, w_pad}, \
DstIterator_{(InnerDtype_*)dst.raw_ptr(), oc_stride, oc, w, w_pad}, \
CudaPostProcess_{ \
src_scale, src_zero_point, dst_scale, dst_zero_point}, \
n_stride_src, \
n_stride_dst, \
n, \
ic, \
hw, \
src_zero_point}; \
auto kernel = relayout_kern<RelayoutProblem_>; \
int nr_threads = query_blocksize_for_kernel(kernel); \
nr_threads = ::min(nr_threads, DIVUP(problem_size, _pack_w)); \
const dim3 block_dim(DIVUP(problem_size, nr_threads* _pack_w)); \
const dim3 thread_dim(nr_threads); \
returnhipLaunchKernelGGL(( kernel), dim3(block_dim), dim3(thread_dim), 0, stream, param); \
}
#define DISPATCH_4BITS(_src_type, _dst_type) \
DISPATCH_RAW(true, true, 8, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(true, false, 8, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(true, true, 2, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(true, false, 2, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(false, true, 8, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(false, false, 8, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(false, true, 2, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(false, false, 2, _src_type, _dst_type, char, char, 4);
DISPATCH_4BITS(QuantizedS4, QuantizedS4);
DISPATCH_4BITS(Quantized4Asymm, Quantized4Asymm);
#undef DISPATCH_4BITS
#undef DISPATCH_RAW
megdnn_assert(
false, "Unsupported data type(src:%s, dst:%s) or image size(%dx%d).",
stype.name(), dtype.name(), h, w);
}
| 43e0a387a3b199abd6adff0eecdb410980e7e444.cu | #include "src/cuda/query_blocksize.cuh"
#include "src/cuda/relayout_format/relayout_format_kern.cuh"
using namespace megdnn;
using namespace cuda;
using namespace relayout_format;
using namespace internal;
namespace {
template <int pack_w>
struct rwtype_helper;
template <>
struct rwtype_helper<2> {
using InnerDtype = char;
};
template <>
struct rwtype_helper<8> {
using InnerDtype = unsigned;
};
} // namespace
void relayout_format::relayout_format_cuda_nchw_nhwc(
const TensorND& src, const TensorND& dst, const cudaStream_t& stream,
const float src_scale, const float dst_scale, const uint8_t src_zero_point,
const uint8_t dst_zero_point) {
auto&& stype = src.layout.dtype;
auto&& dtype = dst.layout.dtype;
auto& src_layout = src.layout;
auto& dst_layout = dst.layout;
int n = src.layout[0];
int ic = src.layout[1];
int h = src.layout[2];
int w = src.layout[3];
int w_pad = DIVUP(w, 2) * 2;
int hw = h * w_pad;
int n_stride_src = src_layout.stride[0];
int ic_stride = src_layout.stride[1];
int n_stride_dst = dst_layout.stride[0];
int hw_stride = dst_layout.stride[2];
static constexpr int chan_blk = 8;
static constexpr int pack_oc = 8;
int problem_size = n * DIVUP(ic, chan_blk) * hw;
int oc = dst.layout[3];
bool same_scale = src_scale == dst_scale;
bool padding = w % 2 != 0;
#define DISPATCH_RAW( \
_padding, _same_scale, _pack_w, _src_type, _dst_type, _src_c_type, \
_dst_c_type, _size_nbits) \
if (padding == _padding && same_scale == _same_scale && hw % _pack_w == 0 && \
stype.enumv().ev == DTypeEnum::Ev::_src_type && \
dtype.enumv().ev == DTypeEnum::Ev::_dst_type) { \
using InnerDtype_ = typename rwtype_helper<_pack_w>::InnerDtype; \
using SrcIterator_ = TensorIteratorOverChannel< \
InnerDtype_, 1, chan_blk, _pack_w, _size_nbits>; \
using DstIterator_ = typename TensorIteratorPolicy< \
_padding, _dst_c_type, pack_oc, chan_blk, _pack_w, _size_nbits, \
LayoutType::NHWC>::TensorIterator; \
using CudaPostProcess_ = \
CudaPostProcess<dtype::_src_type, dtype::_dst_type, _same_scale>; \
using Transpose_ = Translayout< \
_pack_w, chan_blk, InnerDtype_, dtype::_src_type, dtype::_dst_type, \
_same_scale>; \
using RelayoutProblem_ = RelayoutProblem< \
SrcIterator_, DstIterator_, Transpose_, CudaPostProcess_>; \
n_stride_src = n_stride_src * _size_nbits / (8 * sizeof(InnerDtype_)); \
ic_stride = ic_stride * _size_nbits / (8 * sizeof(InnerDtype_)); \
n_stride_dst = n_stride_dst * _size_nbits / (8 * sizeof(_dst_c_type)); \
hw_stride = hw_stride * _size_nbits / (8 * sizeof(_dst_c_type)); \
typename RelayoutProblem_::Param param{ \
SrcIterator_{(InnerDtype_*)src.raw_ptr(), ic_stride, ic, w, w_pad}, \
DstIterator_{(_dst_c_type*)dst.raw_ptr(), hw_stride, oc, w, w_pad}, \
CudaPostProcess_{ \
src_scale, src_zero_point, dst_scale, dst_zero_point}, \
n_stride_src, \
n_stride_dst, \
n, \
ic, \
hw, \
src_zero_point}; \
auto kernel = relayout_kern<RelayoutProblem_>; \
int nr_threads = query_blocksize_for_kernel(kernel); \
nr_threads = std::min(nr_threads, DIVUP(problem_size, _pack_w)); \
const dim3 block_dim(DIVUP(problem_size, nr_threads* _pack_w)); \
const dim3 thread_dim(nr_threads); \
return kernel<<<block_dim, thread_dim, 0, stream>>>(param); \
}
#define DISPATCH_4BITS(_src_type, _dst_type) \
DISPATCH_RAW(true, true, 8, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(true, false, 8, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(true, true, 2, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(true, false, 2, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(false, true, 8, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(false, false, 8, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(false, true, 2, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(false, false, 2, _src_type, _dst_type, char, char, 4);
DISPATCH_4BITS(QuantizedS4, QuantizedS4);
DISPATCH_4BITS(Quantized4Asymm, Quantized4Asymm);
#undef DISPATCH_4BITS
#undef DISPATCH_RAW
megdnn_assert(
false, "Unsupported data type(src:%s, dst:%s) or image size(%dx%d).",
stype.name(), dtype.name(), h, w);
}
void relayout_format::relayout_format_cuda_nhwc_nchw(
const TensorND& src, const TensorND& dst, const cudaStream_t& stream,
const float src_scale, const float dst_scale, const uint8_t src_zero_point,
const uint8_t dst_zero_point) {
auto&& stype = src.layout.dtype;
auto&& dtype = dst.layout.dtype;
auto& src_layout = src.layout;
auto& dst_layout = dst.layout;
int n = src.layout[0];
int h = src.layout[1];
int w = src.layout[2];
int ic = src.layout[3];
int w_pad = DIVUP(w, 2) * 2;
int hw = h * w_pad;
int n_stride_src = src_layout.stride[0];
int hw_stride = src_layout.stride[2];
int n_stride_dst = dst_layout.stride[0];
int oc_stride = dst_layout.stride[1];
static constexpr int chan_blk = 8;
static constexpr int pack_oc = 8;
int problem_size = n * DIVUP(ic, chan_blk) * hw;
int oc = dst.layout[1];
bool same_scale = src_scale == dst_scale;
bool padding = w % 2 != 0;
#define DISPATCH_RAW( \
_padding, _same_scale, _pack_w, _src_type, _dst_type, _src_c_type, \
_dst_c_type, _size_nbits) \
if (padding == _padding && same_scale == _same_scale && hw % _pack_w == 0 && \
stype.enumv().ev == DTypeEnum::Ev::_src_type && \
dtype.enumv().ev == DTypeEnum::Ev::_dst_type) { \
using SrcIterator_ = typename TensorIteratorPolicy< \
_padding, _src_c_type, pack_oc, chan_blk, _pack_w, _size_nbits, \
LayoutType::NHWC>::TensorIterator; \
using InnerDtype_ = typename rwtype_helper<_pack_w>::InnerDtype; \
using DstIterator_ = TensorIteratorOverChannel< \
InnerDtype_, 1, chan_blk, _pack_w, _size_nbits>; \
using CudaPostProcess_ = \
CudaPostProcess<dtype::_src_type, dtype::_dst_type, _same_scale>; \
using Transpose_ = Translayout< \
chan_blk, _pack_w, _src_c_type, dtype::_src_type, dtype::_dst_type, \
_same_scale>; \
using RelayoutProblem_ = RelayoutProblem< \
SrcIterator_, DstIterator_, Transpose_, CudaPostProcess_>; \
n_stride_src = n_stride_src * _size_nbits / (8 * sizeof(_src_c_type)); \
hw_stride = hw_stride * _size_nbits / (8 * sizeof(_src_c_type)); \
n_stride_dst = n_stride_dst * _size_nbits / (8 * sizeof(InnerDtype_)); \
oc_stride = oc_stride * _size_nbits / (8 * sizeof(InnerDtype_)); \
typename RelayoutProblem_::Param param{ \
SrcIterator_{(_src_c_type*)src.raw_ptr(), hw_stride, ic, w, w_pad}, \
DstIterator_{(InnerDtype_*)dst.raw_ptr(), oc_stride, oc, w, w_pad}, \
CudaPostProcess_{ \
src_scale, src_zero_point, dst_scale, dst_zero_point}, \
n_stride_src, \
n_stride_dst, \
n, \
ic, \
hw, \
src_zero_point}; \
auto kernel = relayout_kern<RelayoutProblem_>; \
int nr_threads = query_blocksize_for_kernel(kernel); \
nr_threads = std::min(nr_threads, DIVUP(problem_size, _pack_w)); \
const dim3 block_dim(DIVUP(problem_size, nr_threads* _pack_w)); \
const dim3 thread_dim(nr_threads); \
return kernel<<<block_dim, thread_dim, 0, stream>>>(param); \
}
#define DISPATCH_4BITS(_src_type, _dst_type) \
DISPATCH_RAW(true, true, 8, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(true, false, 8, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(true, true, 2, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(true, false, 2, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(false, true, 8, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(false, false, 8, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(false, true, 2, _src_type, _dst_type, char, char, 4); \
DISPATCH_RAW(false, false, 2, _src_type, _dst_type, char, char, 4);
DISPATCH_4BITS(QuantizedS4, QuantizedS4);
DISPATCH_4BITS(Quantized4Asymm, Quantized4Asymm);
#undef DISPATCH_4BITS
#undef DISPATCH_RAW
megdnn_assert(
false, "Unsupported data type(src:%s, dst:%s) or image size(%dx%d).",
stype.name(), dtype.name(), h, w);
}
|
7c272de1d2b6b69db5ab1b23d9e80d7d529ef86f.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <internal_shared.hpp>
#include <opencv2/gpu/device/transform.hpp>
#include <opencv2/gpu/device/color.hpp>
#include <cvt_colot_internal.h>
namespace cv { namespace gpu { namespace device
{
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_x = 8 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr555_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr555_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr565_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr565_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_bgra_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_rgba_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_bgra_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_rgba_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr555_traits::functor_type)
{
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr565_traits::functor_type)
{
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_yuv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_yuv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_YCrCb4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_YCrCb4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_xyz4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_xyz4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hsv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hsv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hls4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hls4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hls4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hls4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, traits) \
void name(const PtrStepSzb& src, const PtrStepSzb& dst, hipStream_t stream) \
{ \
traits::functor_type functor = traits::create_functor(); \
typedef typename traits::functor_type::argument_type src_t; \
typedef typename traits::functor_type::result_type dst_t; \
cv::gpu::device::transform((PtrStepSz<src_t>)src, (PtrStepSz<dst_t>)dst, functor, WithOutMask(), stream); \
}
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, name ## _traits)
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _16u, name ## _traits<ushort>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>)
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_8u, name ## _full_traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_32f, name ## _full_traits<float>)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_bgra)
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */ | 7c272de1d2b6b69db5ab1b23d9e80d7d529ef86f.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <internal_shared.hpp>
#include <opencv2/gpu/device/transform.hpp>
#include <opencv2/gpu/device/color.hpp>
#include <cvt_colot_internal.h>
namespace cv { namespace gpu { namespace device
{
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_x = 8 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr555_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr555_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr565_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr565_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_bgra_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_rgba_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_bgra_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_rgba_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr555_traits::functor_type)
{
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr565_traits::functor_type)
{
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_yuv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_yuv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_YCrCb4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_YCrCb4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_xyz4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_xyz4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hsv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hsv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hls4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hls4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hls4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hls4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, traits) \
void name(const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream) \
{ \
traits::functor_type functor = traits::create_functor(); \
typedef typename traits::functor_type::argument_type src_t; \
typedef typename traits::functor_type::result_type dst_t; \
cv::gpu::device::transform((PtrStepSz<src_t>)src, (PtrStepSz<dst_t>)dst, functor, WithOutMask(), stream); \
}
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, name ## _traits)
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _16u, name ## _traits<ushort>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>)
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_8u, name ## _full_traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_32f, name ## _full_traits<float>)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_bgra)
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */ |
4603a779df4db51d09185a2693dd3d1ef681139e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <iostream>
#define TILE_WIDTH 16
__global__ void sum_matrices(float *ma, float *mb, float *mc, int height, int width)
{
int row = blockIdx.y * blockDim.y + threadIdx.y; // linie
int col = blockIdx.x * blockDim.x + threadIdx.x; // coloana
// se poate si invers, adica row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < height && col < width) {
mc[row * height + col] = ma[row * height + col] + mb[row * height + col];
}
}
int main() {
// size
const size_t n = 1 << 6;
// setam dimensiunea unui bloc pentru linie, respectiv coloana
const dim3 block_size(TILE_WIDTH, TILE_WIDTH);
// determinam numarul de blocuri pentru linie, respectiv coloana
const dim3 num_blocks(n / block_size.x, n / block_size.y);
// nu avem dim2 in CUDA
// alocam memorie pentru host
float *host_a = 0, *host_b = 0, *host_c = 0;
host_a = (float *) malloc(n * n * sizeof(float));
host_b = (float *) malloc(n * n * sizeof(float));
host_c = (float *) malloc(n * n * sizeof(float));
for (int i = 0; i < n * n; i++) {
host_a[i] = 2;
host_b[i] = 4;
host_c[i] = 0;
}
// alocam memorie pentru device
float *device_a = 0, *device_b = 0, *device_c = 0;
hipMalloc((void**)&device_a, sizeof(float) * n * n);
hipMalloc((void**)&device_b, sizeof(float) * n * n);
hipMalloc((void**)&device_c, sizeof(float) * n * n);
// transfer date CPU -> GPU
hipMemcpy(device_a, &host_a[0], sizeof(float) * n * n, hipMemcpyHostToDevice);
hipMemcpy(device_b, &host_b[0], sizeof(float) * n * n, hipMemcpyHostToDevice);
hipMemcpy(device_c, &host_c[0], sizeof(float) * n * n, hipMemcpyHostToDevice);
// evenimente CUDA, pe care le folosim pentru masurarea timpului de executie
hipEvent_t launch_begin, launch_end;
// creeam evenimentele
hipEventCreate(&launch_begin);
hipEventCreate(&launch_end);
// lansam in executie evenimentul pentru start
hipEventRecord(launch_begin);
// lansam kernel-ul in executie
hipLaunchKernelGGL(( sum_matrices), dim3(num_blocks), dim3(block_size), 0, 0, device_a, device_b, device_c, n, n);
// lansam in executie evenimentul pentru stop
hipEventRecord(launch_end);
// in loc sa folosim hipDeviceSynchronize, folosim hipEventSynchronize
// prin care se asteapta terminarea thread-urilor
hipEventSynchronize(launch_end);
float time = 0;
// determinam timpul de executie
hipEventElapsedTime(&time, launch_begin, launch_end);
std::cout << "Time = " << time << std::endl;
hipMemcpy(host_c, &device_c[0], sizeof(float) * n * n, hipMemcpyDeviceToHost);
for (int i = 0; i < 20; i++) {
std::cout << host_c[i] << " ";
}
std::cout << std::endl;
// distrugem evenimentele
hipEventDestroy(launch_begin);
hipEventDestroy(launch_end);
hipFree(device_a);
hipFree(device_b);
hipFree(device_c);
free(host_a);
free(host_b);
free(host_c);
return 0;
}
| 4603a779df4db51d09185a2693dd3d1ef681139e.cu |
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <iostream>
#define TILE_WIDTH 16
__global__ void sum_matrices(float *ma, float *mb, float *mc, int height, int width)
{
int row = blockIdx.y * blockDim.y + threadIdx.y; // linie
int col = blockIdx.x * blockDim.x + threadIdx.x; // coloana
// se poate si invers, adica row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < height && col < width) {
mc[row * height + col] = ma[row * height + col] + mb[row * height + col];
}
}
int main() {
// size
const size_t n = 1 << 6;
// setam dimensiunea unui bloc pentru linie, respectiv coloana
const dim3 block_size(TILE_WIDTH, TILE_WIDTH);
// determinam numarul de blocuri pentru linie, respectiv coloana
const dim3 num_blocks(n / block_size.x, n / block_size.y);
// nu avem dim2 in CUDA
// alocam memorie pentru host
float *host_a = 0, *host_b = 0, *host_c = 0;
host_a = (float *) malloc(n * n * sizeof(float));
host_b = (float *) malloc(n * n * sizeof(float));
host_c = (float *) malloc(n * n * sizeof(float));
for (int i = 0; i < n * n; i++) {
host_a[i] = 2;
host_b[i] = 4;
host_c[i] = 0;
}
// alocam memorie pentru device
float *device_a = 0, *device_b = 0, *device_c = 0;
cudaMalloc((void**)&device_a, sizeof(float) * n * n);
cudaMalloc((void**)&device_b, sizeof(float) * n * n);
cudaMalloc((void**)&device_c, sizeof(float) * n * n);
// transfer date CPU -> GPU
cudaMemcpy(device_a, &host_a[0], sizeof(float) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(device_b, &host_b[0], sizeof(float) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(device_c, &host_c[0], sizeof(float) * n * n, cudaMemcpyHostToDevice);
// evenimente CUDA, pe care le folosim pentru masurarea timpului de executie
cudaEvent_t launch_begin, launch_end;
// creeam evenimentele
cudaEventCreate(&launch_begin);
cudaEventCreate(&launch_end);
// lansam in executie evenimentul pentru start
cudaEventRecord(launch_begin);
// lansam kernel-ul in executie
sum_matrices<<<num_blocks, block_size>>>(device_a, device_b, device_c, n, n);
// lansam in executie evenimentul pentru stop
cudaEventRecord(launch_end);
// in loc sa folosim cudaDeviceSynchronize, folosim cudaEventSynchronize
// prin care se asteapta terminarea thread-urilor
cudaEventSynchronize(launch_end);
float time = 0;
// determinam timpul de executie
cudaEventElapsedTime(&time, launch_begin, launch_end);
std::cout << "Time = " << time << std::endl;
cudaMemcpy(host_c, &device_c[0], sizeof(float) * n * n, cudaMemcpyDeviceToHost);
for (int i = 0; i < 20; i++) {
std::cout << host_c[i] << " ";
}
std::cout << std::endl;
// distrugem evenimentele
cudaEventDestroy(launch_begin);
cudaEventDestroy(launch_end);
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
free(host_a);
free(host_b);
free(host_c);
return 0;
}
|
4c6efc4d4b49f911cc29c7ae50e46e66328b4a47.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Host Side Code for Cross-correlation in GPU
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
#include "corr2Mex.h"
#include "normXcorr_GPUKernel_V4.cu"
using namespace std;
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width,int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
bool CompareResults(float* A, float* B, int elements, float eps);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void CorrelationOnDevice(const Matrix M, const Matrix N, float *CorrH, params parameters,float *qualityH);
int main(int argc,char** argv) {
// Input Parameters
if(argc!=9)
{
printf("Usage %s Parameters missing\n",argv[0]);
return 1;
}
int imageWidth = atoi(argv[1]);
int imageHeight = atoi(argv[2]);
int SEARCH_X = atoi(argv[3]);
int SEARCH_Y = atoi(argv[4]);
int KERNEL_X = atoi(argv[5]);
int KERNEL_Y = atoi(argv[6]);
int numX = atoi(argv[7]);
int numY = atoi(argv[8]);
/*
int imageWidth = 31;
int imageHeight = 31;
int SEARCH_X = 5;
int SEARCH_Y = 5;
int KERNEL_X = 11;
int KERNEL_Y = 11;
int numX = 1;
int numY = 1;
*/
int DisplacementSize = numX*numY;
int Corr_size = SEARCH_X*SEARCH_Y;
Matrix Pre;
Matrix Post;
float OVERLAP = 50.0;
params parameters = {SEARCH_Y,SEARCH_X,KERNEL_Y,KERNEL_X,OVERLAP,numX,numY};
Pre = AllocateMatrix(imageHeight,imageWidth, 1);
Post = AllocateMatrix(imageHeight,imageWidth, 1);
float gpuTime=0.f;
// Allocating Host-side Memory for Cross-correlation
float *CorrH;
//CorrH = (float*)malloc(Corr_size*DisplacementSize*sizeof(float));
hipHostMalloc((void**)&CorrH, Corr_size*DisplacementSize*sizeof(float));
float *qualityH;
qualityH = (float*) malloc(sizeof(float)*parameters.numX*parameters.numY);
float elapsedTime_inc;
hipEvent_t startEvent_inc, stopEvent_inc;
hipEventCreate(&startEvent_inc);
hipEventCreate(&stopEvent_inc);
hipEventRecord(startEvent_inc,0); // starting timing for inclusive
CorrelationOnDevice(Pre, Post, CorrH, parameters, qualityH); // Execution Model for GPU is set up in this function
hipEventRecord(stopEvent_inc,0); //ending timing for inclusive
hipEventSynchronize(stopEvent_inc);
hipEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc);
gpuTime = elapsedTime_inc;
// Printing Cross-correlation Matrix for Block:0
//for(int h=0;h<DisplacementSize;h++){
/*int h =DisplacementSize - 1;
for(int z=0;z<SEARCH_X;z++){
for(int g=0;g<SEARCH_Y;g++){
printf("%0.4f ",CorrH[g+SEARCH_X*(z+SEARCH_Y*h)]);
}
printf("\n");
}
printf("\n");
//}*/
for(int h=0;h<DisplacementSize;h++){
for(int g=0;g<SEARCH_Y;g++){
for(int z=0;z<SEARCH_X;z++){
printf("%f ",CorrH[(h*SEARCH_Y+g)*SEARCH_X+z]);
}
printf("\n");
}
printf("\n");
}
printf("\n");
// Printing for Quality Verification
printf("%f\n",qualityH[0]);
printf("\n");
printf("Elasped Time = %f\n",gpuTime);
// Free matrices
FreeMatrix(&Pre);
FreeMatrix(&Post);
hipHostFree(&CorrH);
hipHostFree(&qualityH);
return 0;
}
//// Cuda Kernel Call //////
void CorrelationOnDevice(const Matrix Pre, const Matrix Post, float *CorrH, params parameters,float *qualityH)
{
// Load Pre and Post to the device
Matrix Pred = AllocateDeviceMatrix(Pre);
CopyToDeviceMatrix(Pred, Pre);
Matrix Postd = AllocateDeviceMatrix(Post);
CopyToDeviceMatrix(Postd, Post);
// Allocate Space for Pre-Mean
float *preMean;
float *preVar;
hipMalloc((void **)&preMean,sizeof(float)*parameters.numX*parameters.numY);
hipMalloc((void **)&preVar,sizeof(float)*parameters.numX*parameters.numY);
//Allocate Space for Post-mean
float *postMean;
float *postVar;
hipMalloc((void **)&postMean,sizeof(float)*parameters.searchX*parameters.searchY*parameters.numX*parameters.numY);
hipMalloc((void **)&postVar,sizeof(float)*parameters.searchX*parameters.searchY*parameters.numX*parameters.numY);
// Temporary host corr to find max
float *tempCorrHost;
int modx = parameters.searchX%2;
int mody = parameters.searchY%2;
hipMalloc((void **)&tempCorrHost,sizeof(float)*(parameters.searchX+modx)*(parameters.searchY+mody)*parameters.numX*parameters.numY);
// CC Value Matrix
float *qualityD;
hipMalloc((void **)&qualityD,sizeof(float)*parameters.numX*parameters.numY);
// Device Memory Allocation for Cross-correlation Result
float *CorrD;
hipMalloc((void **)&CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY);
//hipMalloc((SoA_Corr **)&CorrD,sizeof(SoA_Corr)*parameters.numX*parameters.numY);
// Setup the execution configuration
dim3 dimBlock(parameters.searchX, parameters.searchY);
dim3 dimGrid(parameters.numX, parameters.numY);
// Launch the device computation threads!
hipLaunchKernelGGL(( normXcorr_GPU), dim3(dimGrid), dim3(dimBlock), 0, 0, Pred,Postd,CorrD,parameters,preMean,preVar,postMean,postVar,tempCorrHost,qualityD);
hipMemcpy(CorrH,CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY,hipMemcpyDeviceToHost);
hipMemcpy(qualityH,qualityD,sizeof(float)*parameters.numX*parameters.numY,hipMemcpyDeviceToHost);
// Free device matrices
FreeDeviceMatrix(&Pred);
FreeDeviceMatrix(&Postd);
hipFree(CorrD);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
Matrix AllocateMatrix(int height, int width,int init) // 1 is file read/ 0 is just allocation
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
FILE *fp;
//fp = fopen("trialNumbers.inp","r");
fp = fopen("Real_Data_US.inp","r");
// don't allocate memory on option 2
hipHostMalloc((void**)&M.elements, size*sizeof(float));
if(init)
{
for(unsigned int i = 0; i < M.width * M.height; i++)
{
fscanf(fp,"%f",&M.elements[i]);
}
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
//hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice);
hipMemcpyAsync(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice,0);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
//hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost);
hipMemcpyAsync(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost,0);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
hipHostFree(M->elements);
M->elements = NULL;
}
| 4c6efc4d4b49f911cc29c7ae50e46e66328b4a47.cu | // Host Side Code for Cross-correlation in GPU
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
#include "corr2Mex.h"
#include "normXcorr_GPUKernel_V4.cu"
using namespace std;
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width,int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
bool CompareResults(float* A, float* B, int elements, float eps);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void CorrelationOnDevice(const Matrix M, const Matrix N, float *CorrH, params parameters,float *qualityH);
int main(int argc,char** argv) {
// Input Parameters
if(argc!=9)
{
printf("Usage %s Parameters missing\n",argv[0]);
return 1;
}
int imageWidth = atoi(argv[1]);
int imageHeight = atoi(argv[2]);
int SEARCH_X = atoi(argv[3]);
int SEARCH_Y = atoi(argv[4]);
int KERNEL_X = atoi(argv[5]);
int KERNEL_Y = atoi(argv[6]);
int numX = atoi(argv[7]);
int numY = atoi(argv[8]);
/*
int imageWidth = 31;
int imageHeight = 31;
int SEARCH_X = 5;
int SEARCH_Y = 5;
int KERNEL_X = 11;
int KERNEL_Y = 11;
int numX = 1;
int numY = 1;
*/
int DisplacementSize = numX*numY;
int Corr_size = SEARCH_X*SEARCH_Y;
Matrix Pre;
Matrix Post;
float OVERLAP = 50.0;
params parameters = {SEARCH_Y,SEARCH_X,KERNEL_Y,KERNEL_X,OVERLAP,numX,numY};
Pre = AllocateMatrix(imageHeight,imageWidth, 1);
Post = AllocateMatrix(imageHeight,imageWidth, 1);
float gpuTime=0.f;
// Allocating Host-side Memory for Cross-correlation
float *CorrH;
//CorrH = (float*)malloc(Corr_size*DisplacementSize*sizeof(float));
cudaMallocHost((void**)&CorrH, Corr_size*DisplacementSize*sizeof(float));
float *qualityH;
qualityH = (float*) malloc(sizeof(float)*parameters.numX*parameters.numY);
float elapsedTime_inc;
cudaEvent_t startEvent_inc, stopEvent_inc;
cudaEventCreate(&startEvent_inc);
cudaEventCreate(&stopEvent_inc);
cudaEventRecord(startEvent_inc,0); // starting timing for inclusive
CorrelationOnDevice(Pre, Post, CorrH, parameters, qualityH); // Execution Model for GPU is set up in this function
cudaEventRecord(stopEvent_inc,0); //ending timing for inclusive
cudaEventSynchronize(stopEvent_inc);
cudaEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc);
gpuTime = elapsedTime_inc;
// Printing Cross-correlation Matrix for Block:0
//for(int h=0;h<DisplacementSize;h++){
/*int h =DisplacementSize - 1;
for(int z=0;z<SEARCH_X;z++){
for(int g=0;g<SEARCH_Y;g++){
printf("%0.4f ",CorrH[g+SEARCH_X*(z+SEARCH_Y*h)]);
}
printf("\n");
}
printf("\n");
//}*/
for(int h=0;h<DisplacementSize;h++){
for(int g=0;g<SEARCH_Y;g++){
for(int z=0;z<SEARCH_X;z++){
printf("%f ",CorrH[(h*SEARCH_Y+g)*SEARCH_X+z]);
}
printf("\n");
}
printf("\n");
}
printf("\n");
// Printing for Quality Verification
printf("%f\n",qualityH[0]);
printf("\n");
printf("Elasped Time = %f\n",gpuTime);
// Free matrices
FreeMatrix(&Pre);
FreeMatrix(&Post);
cudaFreeHost(&CorrH);
cudaFreeHost(&qualityH);
return 0;
}
//// Cuda Kernel Call //////
void CorrelationOnDevice(const Matrix Pre, const Matrix Post, float *CorrH, params parameters,float *qualityH)
{
// Load Pre and Post to the device
Matrix Pred = AllocateDeviceMatrix(Pre);
CopyToDeviceMatrix(Pred, Pre);
Matrix Postd = AllocateDeviceMatrix(Post);
CopyToDeviceMatrix(Postd, Post);
// Allocate Space for Pre-Mean
float *preMean;
float *preVar;
cudaMalloc((void **)&preMean,sizeof(float)*parameters.numX*parameters.numY);
cudaMalloc((void **)&preVar,sizeof(float)*parameters.numX*parameters.numY);
//Allocate Space for Post-mean
float *postMean;
float *postVar;
cudaMalloc((void **)&postMean,sizeof(float)*parameters.searchX*parameters.searchY*parameters.numX*parameters.numY);
cudaMalloc((void **)&postVar,sizeof(float)*parameters.searchX*parameters.searchY*parameters.numX*parameters.numY);
// Temporary host corr to find max
float *tempCorrHost;
int modx = parameters.searchX%2;
int mody = parameters.searchY%2;
cudaMalloc((void **)&tempCorrHost,sizeof(float)*(parameters.searchX+modx)*(parameters.searchY+mody)*parameters.numX*parameters.numY);
// CC Value Matrix
float *qualityD;
cudaMalloc((void **)&qualityD,sizeof(float)*parameters.numX*parameters.numY);
// Device Memory Allocation for Cross-correlation Result
float *CorrD;
cudaMalloc((void **)&CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY);
//cudaMalloc((SoA_Corr **)&CorrD,sizeof(SoA_Corr)*parameters.numX*parameters.numY);
// Setup the execution configuration
dim3 dimBlock(parameters.searchX, parameters.searchY);
dim3 dimGrid(parameters.numX, parameters.numY);
// Launch the device computation threads!
normXcorr_GPU<<<dimGrid, dimBlock>>>(Pred,Postd,CorrD,parameters,preMean,preVar,postMean,postVar,tempCorrHost,qualityD);
cudaMemcpy(CorrH,CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY,cudaMemcpyDeviceToHost);
cudaMemcpy(qualityH,qualityD,sizeof(float)*parameters.numX*parameters.numY,cudaMemcpyDeviceToHost);
// Free device matrices
FreeDeviceMatrix(&Pred);
FreeDeviceMatrix(&Postd);
cudaFree(CorrD);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
Matrix AllocateMatrix(int height, int width,int init) // 1 is file read/ 0 is just allocation
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
FILE *fp;
//fp = fopen("trialNumbers.inp","r");
fp = fopen("Real_Data_US.inp","r");
// don't allocate memory on option 2
cudaMallocHost((void**)&M.elements, size*sizeof(float));
if(init)
{
for(unsigned int i = 0; i < M.width * M.height; i++)
{
fscanf(fp,"%f",&M.elements[i]);
}
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
//cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice);
cudaMemcpyAsync(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice,0);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
//cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost);
cudaMemcpyAsync(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost,0);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
cudaFreeHost(M->elements);
M->elements = NULL;
}
|
1ad61de5f9f8798f0b1b9c0b06273a6f4623a0fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void hello() {
printf("Hello world! I\'m a thread in block %d\n", blockIdx.x);
}
int main(int argc, char** argv) {
hipLaunchKernelGGL(( hello), dim3(16), dim3(1), 0, 0, );
// this statement will make the printfs() to flush to stdout
hipDeviceSynchronize();
return 0;
}
| 1ad61de5f9f8798f0b1b9c0b06273a6f4623a0fe.cu | #include <stdio.h>
__global__ void hello() {
printf("Hello world! I\'m a thread in block %d\n", blockIdx.x);
}
int main(int argc, char** argv) {
hello<<<16, 1>>>();
// this statement will make the printfs() to flush to stdout
cudaDeviceSynchronize();
return 0;
}
|
4e889f52ccd4b7e708048dc5efd436862edb8eca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime_api.h>
#include<cuda.h>
#include <stdio.h>
#include "Matrix.h"
#include <vector>
#include<random>
#include<iostream>
#include <hip/device_functions.h>
#include<time.h>
using namespace std;
void load_matrix_to_device(Matrix* pA, Matrix* pdA) {
int ht = pA->height;
int wt = pA->width;
pdA->width = wt;
pdA->height = ht;
size_t size = ht*wt * sizeof(double);
hipMalloc(&(pdA->array_), size);
hipMemcpy(pdA->array_, pA->array_, size, hipMemcpyHostToDevice);
}
void load_matrix_to_host(Matrix* pA, Matrix*pdA) {
int ht = pdA->height;
int wt = pdA->width;
size_t size = ht*wt * sizeof(double);
hipMemcpy(pA->array_, pdA->array_, size, hipMemcpyDeviceToHost);
}
__global__ void MatMulKernel(Matrix d_W, Matrix d_H, Matrix d_AR) {
int row = threadIdx.y + blockIdx.y*blockDim.y; // t
int col = threadIdx.x + blockIdx.x*blockDim.x;
double res = 0;
for (int e = 0; e < d_W.width; ++e) {
res += d_W.array_[row * d_W.width + e] * d_H.array_[col * d_H.width + e];
}
d_AR.array_[row * d_AR.width + col] = res;
}
__global__ void extractKernel(Matrix d_W, Matrix d_W_col, int t) {
int index = threadIdx.y + blockIdx.y*blockDim.y;
d_W_col.array_[index] = d_W.array_[t + index*d_W.width];
}
__global__ void ConstructR_hatKernel (Matrix d_R_hat,Matrix d_W_col, Matrix d_H_col) {
int col = threadIdx.y + blockIdx.y*blockDim.y;
int row = threadIdx.x + blockIdx.x*blockDim.x;
if (d_R_hat.array_[col + row*d_R_hat.width] != 0.0) {
d_R_hat.array_[col + row*d_R_hat.width] = d_R_hat.array_[col + row*d_R_hat.width] +d_W_col.array_[row]*d_H_col.array_[col] ;
}
}
__global__ void updateR(Matrix W_col, Matrix H_col, Matrix R_hat, Matrix R) {
int col = threadIdx.y + blockIdx.y*blockDim.y;
int row = threadIdx.x + blockIdx.x*blockDim.x;
if (R.array_[col + row*R.width] != 0.0) {
R.array_[col + row*R.width] = R_hat.array_[col + row*R.width] - W_col.array_[row] * H_col.array_[col];
}
}
__global__ void CCDPPKernel(Matrix W_col, Matrix H_col, Matrix R_hat, double lambda) {
int row = threadIdx.y + blockIdx.y*blockDim.y; // t
int m = R_hat.height;
int n = R_hat.width;
double z_star = 0;
double num_z_star = 0;
double denum_z_star = lambda;
double s_star = 0;
double num_s_star = 0;
double denum_s_star = lambda;
// this array will enable us to update the R array
if (row < m) {
// we're still updating t-th column of W
for (int j = 0; j < n; ++j) {
if (R_hat.array_[row*n + j] != 0.0) {
num_z_star += (R_hat.array_[row*n + j])*H_col.array_[j];
denum_z_star += H_col.array_[j] *H_col.array_[j];
}
}
denum_z_star += lambda;
z_star = num_z_star / denum_z_star;
W_col.array_[row] = z_star;
}
// we must synchronyze threads before updating H
void __syncthreads();
if (row >= m) {
// we're now updating H_col
for (int i = 0; i < m; ++i) {
if (R_hat.array_[i*n + row - m] != 0.0) {
num_s_star += R_hat.array_[i*n + row - m] * W_col.array_[i];
denum_s_star += W_col.array_[i]*W_col.array_[i];
}
}
denum_s_star += lambda;
s_star = num_s_star / denum_s_star;
H_col.array_[row - m] = s_star;
}
}
int main() {
for (int iter = 1; iter < 50; iter++) {
clock_t tStart = clock();
//int iter = 3;
double lambda = 500;
// height <-> rows , width <-> column
// matrix A is a squared matrix with missing values
// we first generate A randomly
double* ele = new double[9216];
double* el = new double[9216];
double* elem = new double[9216];
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis(0, 5);
for (int i = 0; i < 9216; i++) {
double num_gene = dis(gen);
if (num_gene <= 1) {
ele[i] = 0.0;
}
else {
ele[i] = num_gene;
}
elem[i] = 0.0;
el[i] = num_gene;
}
Matrix A = Matrix(96, 96, ele);
Matrix W = Matrix(96, 96, elem); // zeros
Matrix H = Matrix(96, 96, el); // zeros
Matrix R = A;
Matrix R_hat;
// load A,W,H,R to the device memory
Matrix d_A;
Matrix d_W;
Matrix d_H;
Matrix d_R;
Matrix d_R_hat;
// Invoke kernel
//vector<double> error;
dim3 dimBlock(1, 192);
dim3 dimBlockR(16, 16);
dim3 dimBlockcol(1, 96);
int gri_wd = W.height + H.height;
// verify division
dim3 dimGrid(1, 1);
dim3 dimGridR(6, 6);
dim3 dimGridcol(1, 1);
// prepare column for w and h to be used
double* a = new double[W.height];
double* b = new double[H.height];
Matrix W_col = Matrix(W.height,1,a );
Matrix H_col = Matrix(H.height, 1, b);
Matrix d_W_col;
Matrix d_H_col;
for (int t = 0; t < W.width ; ++t) {
// contruct R_hat
R_hat=R;
// get the t-th column of W
load_matrix_to_device(&W_col, &d_W_col);
load_matrix_to_device(&W, &d_W);
extractKernel << <dimGridcol,dimBlockcol >> > (d_W, d_W_col, t);
load_matrix_to_host(&W, &d_W);
load_matrix_to_host(&W_col, &d_W_col);
//////////////////////////////////
// get the t-th column of H
load_matrix_to_device(&H_col, &d_H_col);
load_matrix_to_device(&H, &d_H);
extractKernel << <dimGridcol, dimBlockcol >> > (d_H, d_H_col, t);
load_matrix_to_host(&H, &d_H);
load_matrix_to_host(&H_col, &d_H_col);
////////////////////////////////////
//W
load_matrix_to_device(&W_col, &d_W_col);
//H
load_matrix_to_device(&H_col, &d_H_col);
// R_hat
load_matrix_to_device(&R_hat, &d_R_hat);
ConstructR_hatKernel <<<dimGridR, dimBlockR >> > (d_R_hat,d_W_col, d_H_col);
load_matrix_to_host(&W_col, &d_W_col);
load_matrix_to_host(&H_col, &d_H_col);
load_matrix_to_host(&R_hat, &d_R_hat);
load_matrix_to_device(&W_col, &d_W_col);
//H
load_matrix_to_device(&H_col, &d_H_col);
// R_hat
load_matrix_to_device(&R_hat, &d_R_hat);
// ccd++ algorithm
CCDPPKernel << <dimGrid, dimBlock >> > (d_W_col, d_H_col, d_R_hat, lambda);
load_matrix_to_host(&W_col, &d_W_col);
load_matrix_to_host(&H_col, &d_H_col);
load_matrix_to_host(&R_hat, &d_R_hat);
// update W and H
for (int i = 0; i < W.height; i++) {
W.array_[t + i*W.width] = W_col.array_[i];
}
for (int i = 0; i < H.height; i++) {
H.array_[t + i*H.width] = H_col.array_[i];
}
load_matrix_to_device(&H_col, &d_H_col);
load_matrix_to_device(&W_col, &d_W_col);
load_matrix_to_device(&R_hat, &d_R_hat);
load_matrix_to_device(&R, &d_R);
updateR << <dimGridR, dimBlockR >> > (d_W_col, d_H_col, d_R_hat, d_R);
load_matrix_to_host(&H_col, &d_H_col);
load_matrix_to_host(&W_col, &d_W_col);
load_matrix_to_host(&R_hat, &d_R_hat);
load_matrix_to_host(&R, &d_R);
}
dim3 dimBlock1(16, 16);
dim3 dimGrid1(6, 6);
Matrix AR = Matrix(96, 96, elem);
Matrix d_AR;
load_matrix_to_device(&AR, &d_AR);
MatMulKernel << <dimGrid1, dimBlock1 >> > (d_W, d_H, d_AR);
load_matrix_to_host(&AR, &d_AR);
// Read W,H,R from device memory
double erro = 0;
for (int i = 0; i < 9216; i++) {
if (A.array_[i] != 0.0) {
erro += (AR.array_[i] - A.array_[i])*(AR.array_[i] - A.array_[i]);
}
}
erro = erro / 9216;
cout << sqrt(erro) << " iter : " << iter ;
printf("Time taken: %.2fs\n", (double)(clock() - tStart) / CLOCKS_PER_SEC);
// Free device memory
hipFree(d_W.array_); hipFree(d_H.array_); hipFree(d_R.array_); hipFree(d_A.array_);
}
system("pause");
} | 4e889f52ccd4b7e708048dc5efd436862edb8eca.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda_runtime_api.h>
#include<cuda.h>
#include <stdio.h>
#include "Matrix.h"
#include <vector>
#include<random>
#include<iostream>
#include <device_functions.h>
#include<time.h>
using namespace std;
void load_matrix_to_device(Matrix* pA, Matrix* pdA) {
int ht = pA->height;
int wt = pA->width;
pdA->width = wt;
pdA->height = ht;
size_t size = ht*wt * sizeof(double);
cudaMalloc(&(pdA->array_), size);
cudaMemcpy(pdA->array_, pA->array_, size, cudaMemcpyHostToDevice);
}
void load_matrix_to_host(Matrix* pA, Matrix*pdA) {
int ht = pdA->height;
int wt = pdA->width;
size_t size = ht*wt * sizeof(double);
cudaMemcpy(pA->array_, pdA->array_, size, cudaMemcpyDeviceToHost);
}
__global__ void MatMulKernel(Matrix d_W, Matrix d_H, Matrix d_AR) {
int row = threadIdx.y + blockIdx.y*blockDim.y; // t
int col = threadIdx.x + blockIdx.x*blockDim.x;
double res = 0;
for (int e = 0; e < d_W.width; ++e) {
res += d_W.array_[row * d_W.width + e] * d_H.array_[col * d_H.width + e];
}
d_AR.array_[row * d_AR.width + col] = res;
}
__global__ void extractKernel(Matrix d_W, Matrix d_W_col, int t) {
int index = threadIdx.y + blockIdx.y*blockDim.y;
d_W_col.array_[index] = d_W.array_[t + index*d_W.width];
}
__global__ void ConstructR_hatKernel (Matrix d_R_hat,Matrix d_W_col, Matrix d_H_col) {
int col = threadIdx.y + blockIdx.y*blockDim.y;
int row = threadIdx.x + blockIdx.x*blockDim.x;
if (d_R_hat.array_[col + row*d_R_hat.width] != 0.0) {
d_R_hat.array_[col + row*d_R_hat.width] = d_R_hat.array_[col + row*d_R_hat.width] +d_W_col.array_[row]*d_H_col.array_[col] ;
}
}
__global__ void updateR(Matrix W_col, Matrix H_col, Matrix R_hat, Matrix R) {
int col = threadIdx.y + blockIdx.y*blockDim.y;
int row = threadIdx.x + blockIdx.x*blockDim.x;
if (R.array_[col + row*R.width] != 0.0) {
R.array_[col + row*R.width] = R_hat.array_[col + row*R.width] - W_col.array_[row] * H_col.array_[col];
}
}
__global__ void CCDPPKernel(Matrix W_col, Matrix H_col, Matrix R_hat, double lambda) {
int row = threadIdx.y + blockIdx.y*blockDim.y; // t
int m = R_hat.height;
int n = R_hat.width;
double z_star = 0;
double num_z_star = 0;
double denum_z_star = lambda;
double s_star = 0;
double num_s_star = 0;
double denum_s_star = lambda;
// this array will enable us to update the R array
if (row < m) {
// we're still updating t-th column of W
for (int j = 0; j < n; ++j) {
if (R_hat.array_[row*n + j] != 0.0) {
num_z_star += (R_hat.array_[row*n + j])*H_col.array_[j];
denum_z_star += H_col.array_[j] *H_col.array_[j];
}
}
denum_z_star += lambda;
z_star = num_z_star / denum_z_star;
W_col.array_[row] = z_star;
}
// we must synchronyze threads before updating H
void __syncthreads();
if (row >= m) {
// we're now updating H_col
for (int i = 0; i < m; ++i) {
if (R_hat.array_[i*n + row - m] != 0.0) {
num_s_star += R_hat.array_[i*n + row - m] * W_col.array_[i];
denum_s_star += W_col.array_[i]*W_col.array_[i];
}
}
denum_s_star += lambda;
s_star = num_s_star / denum_s_star;
H_col.array_[row - m] = s_star;
}
}
int main() {
for (int iter = 1; iter < 50; iter++) {
clock_t tStart = clock();
//int iter = 3;
double lambda = 500;
// height <-> rows , width <-> column
// matrix A is a squared matrix with missing values
// we first generate A randomly
double* ele = new double[9216];
double* el = new double[9216];
double* elem = new double[9216];
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis(0, 5);
for (int i = 0; i < 9216; i++) {
double num_gene = dis(gen);
if (num_gene <= 1) {
ele[i] = 0.0;
}
else {
ele[i] = num_gene;
}
elem[i] = 0.0;
el[i] = num_gene;
}
Matrix A = Matrix(96, 96, ele);
Matrix W = Matrix(96, 96, elem); // zeros
Matrix H = Matrix(96, 96, el); // zeros
Matrix R = A;
Matrix R_hat;
// load A,W,H,R to the device memory
Matrix d_A;
Matrix d_W;
Matrix d_H;
Matrix d_R;
Matrix d_R_hat;
// Invoke kernel
//vector<double> error;
dim3 dimBlock(1, 192);
dim3 dimBlockR(16, 16);
dim3 dimBlockcol(1, 96);
int gri_wd = W.height + H.height;
// verify division
dim3 dimGrid(1, 1);
dim3 dimGridR(6, 6);
dim3 dimGridcol(1, 1);
// prepare column for w and h to be used
double* a = new double[W.height];
double* b = new double[H.height];
Matrix W_col = Matrix(W.height,1,a );
Matrix H_col = Matrix(H.height, 1, b);
Matrix d_W_col;
Matrix d_H_col;
for (int t = 0; t < W.width ; ++t) {
// contruct R_hat
R_hat=R;
// get the t-th column of W
load_matrix_to_device(&W_col, &d_W_col);
load_matrix_to_device(&W, &d_W);
extractKernel << <dimGridcol,dimBlockcol >> > (d_W, d_W_col, t);
load_matrix_to_host(&W, &d_W);
load_matrix_to_host(&W_col, &d_W_col);
//////////////////////////////////
// get the t-th column of H
load_matrix_to_device(&H_col, &d_H_col);
load_matrix_to_device(&H, &d_H);
extractKernel << <dimGridcol, dimBlockcol >> > (d_H, d_H_col, t);
load_matrix_to_host(&H, &d_H);
load_matrix_to_host(&H_col, &d_H_col);
////////////////////////////////////
//W
load_matrix_to_device(&W_col, &d_W_col);
//H
load_matrix_to_device(&H_col, &d_H_col);
// R_hat
load_matrix_to_device(&R_hat, &d_R_hat);
ConstructR_hatKernel <<<dimGridR, dimBlockR >> > (d_R_hat,d_W_col, d_H_col);
load_matrix_to_host(&W_col, &d_W_col);
load_matrix_to_host(&H_col, &d_H_col);
load_matrix_to_host(&R_hat, &d_R_hat);
load_matrix_to_device(&W_col, &d_W_col);
//H
load_matrix_to_device(&H_col, &d_H_col);
// R_hat
load_matrix_to_device(&R_hat, &d_R_hat);
// ccd++ algorithm
CCDPPKernel << <dimGrid, dimBlock >> > (d_W_col, d_H_col, d_R_hat, lambda);
load_matrix_to_host(&W_col, &d_W_col);
load_matrix_to_host(&H_col, &d_H_col);
load_matrix_to_host(&R_hat, &d_R_hat);
// update W and H
for (int i = 0; i < W.height; i++) {
W.array_[t + i*W.width] = W_col.array_[i];
}
for (int i = 0; i < H.height; i++) {
H.array_[t + i*H.width] = H_col.array_[i];
}
load_matrix_to_device(&H_col, &d_H_col);
load_matrix_to_device(&W_col, &d_W_col);
load_matrix_to_device(&R_hat, &d_R_hat);
load_matrix_to_device(&R, &d_R);
updateR << <dimGridR, dimBlockR >> > (d_W_col, d_H_col, d_R_hat, d_R);
load_matrix_to_host(&H_col, &d_H_col);
load_matrix_to_host(&W_col, &d_W_col);
load_matrix_to_host(&R_hat, &d_R_hat);
load_matrix_to_host(&R, &d_R);
}
dim3 dimBlock1(16, 16);
dim3 dimGrid1(6, 6);
Matrix AR = Matrix(96, 96, elem);
Matrix d_AR;
load_matrix_to_device(&AR, &d_AR);
MatMulKernel << <dimGrid1, dimBlock1 >> > (d_W, d_H, d_AR);
load_matrix_to_host(&AR, &d_AR);
// Read W,H,R from device memory
double erro = 0;
for (int i = 0; i < 9216; i++) {
if (A.array_[i] != 0.0) {
erro += (AR.array_[i] - A.array_[i])*(AR.array_[i] - A.array_[i]);
}
}
erro = erro / 9216;
cout << sqrt(erro) << " iter : " << iter ;
printf("Time taken: %.2fs\n", (double)(clock() - tStart) / CLOCKS_PER_SEC);
// Free device memory
cudaFree(d_W.array_); cudaFree(d_H.array_); cudaFree(d_R.array_); cudaFree(d_A.array_);
}
system("pause");
} |
7b39634f961b14244cf34b73a122f0dae08e733c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void spmvCSRKernel(float *out, int *matCols, int *matRows,
float *matData, float *vec, int dim) {
//@@ insert spmv kernel for csr format
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= dim) return;
float dot = 0;
for (int i = matRows[row]; i < matRows[row+1]; i++) {
dot += matData[i] * vec[matCols[i]];
}
out[row] = dot;
}
__global__ void spmvJDSKernel(float *out, int *matColStart, int *matCols,
int *matRowPerm, int *matRows,
float *matData, float *vec, int dim) {
//@@ insert spmv kernel for jds format
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= dim) return;
float dot = 0;
for (int i = 0; i < matRows[row]; i++) {
int index = row + matColStart[i];
dot += matData[index] * vec[matCols[index]];
}
out[matRowPerm[row]] = dot;
}
static void spmvCSR(float *out, int *matCols, int *matRows, float *matData,
float *vec, int dim) {
int block = 1024;
int grid = (dim%block) == 0 ? (dim/block) : 1 + (dim/block);
hipLaunchKernelGGL(( spmvCSRKernel), dim3(grid), dim3(block), 0, 0, out, matCols, matRows, matData, vec, dim);
}
static void spmvJDS(float *out, int *matColStart, int *matCols,
int *matRowPerm, int *matRows, float *matData,
float *vec, int dim) {
//@@ invoke spmv kernel for jds format
int block = 1024;
int grid = (dim%block) == 0 ? (dim/block) : 1 + (dim/block);
hipLaunchKernelGGL(( spmvJDSKernel), dim3(grid), dim3(block), 0, 0, out, matColStart, matCols,
matRowPerm, matRows, matData, vec, dim);
}
int main(int argc, char **argv) {
wbArg_t args;
bool usingJDSQ;
int *hostCSRCols;
int *hostCSRRows;
float *hostCSRData;
int *hostJDSColStart;
int *hostJDSCols;
int *hostJDSRowPerm;
int *hostJDSRows;
float *hostJDSData;
float *hostVector;
float *hostOutput;
int *deviceCSRCols;
int *deviceCSRRows;
float *deviceCSRData;
int *deviceJDSColStart;
int *deviceJDSCols;
int *deviceJDSRowPerm;
int *deviceJDSRows;
float *deviceJDSData;
float *deviceVector;
float *deviceOutput;
int dim, ncols, nrows, ndata;
int maxRowNNZ;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
usingJDSQ = wbImport_flag(wbArg_getInputFile(args, 0)) == 1;
hostCSRCols =
(int *)wbImport(wbArg_getInputFile(args, 1), &ncols, "Integer");
hostCSRRows =
(int *)wbImport(wbArg_getInputFile(args, 2), &nrows, "Integer");
hostCSRData =
(float *)wbImport(wbArg_getInputFile(args, 3), &ndata, "Real");
hostVector =
(float *)wbImport(wbArg_getInputFile(args, 4), &dim, "Real");
hostOutput = (float *)malloc(sizeof(float) * dim);
wbTime_stop(Generic, "Importing data and creating memory on host");
if (usingJDSQ) {
CSRToJDS(dim, hostCSRRows, hostCSRCols, hostCSRData, &hostJDSRowPerm,
&hostJDSRows, &hostJDSColStart, &hostJDSCols, &hostJDSData);
maxRowNNZ = hostJDSRows[0];
}
wbTime_start(GPU, "Allocating GPU memory.");
if (usingJDSQ) {
hipMalloc((void **)&deviceJDSColStart, sizeof(int) * maxRowNNZ);
hipMalloc((void **)&deviceJDSCols, sizeof(int) * ndata);
hipMalloc((void **)&deviceJDSRowPerm, sizeof(int) * dim);
hipMalloc((void **)&deviceJDSRows, sizeof(int) * dim);
hipMalloc((void **)&deviceJDSData, sizeof(float) * ndata);
} else {
hipMalloc((void **)&deviceCSRCols, sizeof(int) * ncols);
hipMalloc((void **)&deviceCSRRows, sizeof(int) * nrows);
hipMalloc((void **)&deviceCSRData, sizeof(float) * ndata);
}
hipMalloc((void **)&deviceVector, sizeof(float) * dim);
hipMalloc((void **)&deviceOutput, sizeof(float) * dim);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
if (usingJDSQ) {
hipMemcpy(deviceJDSColStart, hostJDSColStart, sizeof(int) * maxRowNNZ,
hipMemcpyHostToDevice);
hipMemcpy(deviceJDSCols, hostJDSCols, sizeof(int) * ndata,
hipMemcpyHostToDevice);
hipMemcpy(deviceJDSRowPerm, hostJDSRowPerm, sizeof(int) * dim,
hipMemcpyHostToDevice);
hipMemcpy(deviceJDSRows, hostJDSRows, sizeof(int) * dim,
hipMemcpyHostToDevice);
hipMemcpy(deviceJDSData, hostJDSData, sizeof(float) * ndata,
hipMemcpyHostToDevice);
} else {
hipMemcpy(deviceCSRCols, hostCSRCols, sizeof(int) * ncols,
hipMemcpyHostToDevice);
hipMemcpy(deviceCSRRows, hostCSRRows, sizeof(int) * nrows,
hipMemcpyHostToDevice);
hipMemcpy(deviceCSRData, hostCSRData, sizeof(float) * ndata,
hipMemcpyHostToDevice);
}
hipMemcpy(deviceVector, hostVector, sizeof(float) * dim,
hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
wbTime_start(Compute, "Performing CUDA computation");
if (usingJDSQ) {
spmvJDS(deviceOutput, deviceJDSColStart, deviceJDSCols,
deviceJDSRowPerm, deviceJDSRows, deviceJDSData, deviceVector,
dim);
} else {
spmvCSR(deviceOutput, deviceCSRCols, deviceCSRRows, deviceCSRData,
deviceVector, dim);
}
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
hipMemcpy(hostOutput, deviceOutput, sizeof(float) * dim,
hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceCSRCols);
hipFree(deviceCSRRows);
hipFree(deviceCSRData);
hipFree(deviceVector);
hipFree(deviceOutput);
if (usingJDSQ) {
hipFree(deviceJDSColStart);
hipFree(deviceJDSCols);
hipFree(deviceJDSRowPerm);
hipFree(deviceJDSRows);
hipFree(deviceJDSData);
}
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, dim);
free(hostCSRCols);
free(hostCSRRows);
free(hostCSRData);
free(hostVector);
free(hostOutput);
if (usingJDSQ) {
free(hostJDSColStart);
free(hostJDSCols);
free(hostJDSRowPerm);
free(hostJDSRows);
free(hostJDSData);
}
return 0;
} | 7b39634f961b14244cf34b73a122f0dae08e733c.cu | #include <wb.h>
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void spmvCSRKernel(float *out, int *matCols, int *matRows,
float *matData, float *vec, int dim) {
//@@ insert spmv kernel for csr format
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= dim) return;
float dot = 0;
for (int i = matRows[row]; i < matRows[row+1]; i++) {
dot += matData[i] * vec[matCols[i]];
}
out[row] = dot;
}
__global__ void spmvJDSKernel(float *out, int *matColStart, int *matCols,
int *matRowPerm, int *matRows,
float *matData, float *vec, int dim) {
//@@ insert spmv kernel for jds format
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= dim) return;
float dot = 0;
for (int i = 0; i < matRows[row]; i++) {
int index = row + matColStart[i];
dot += matData[index] * vec[matCols[index]];
}
out[matRowPerm[row]] = dot;
}
static void spmvCSR(float *out, int *matCols, int *matRows, float *matData,
float *vec, int dim) {
int block = 1024;
int grid = (dim%block) == 0 ? (dim/block) : 1 + (dim/block);
spmvCSRKernel<<<grid, block>>>(out, matCols, matRows, matData, vec, dim);
}
static void spmvJDS(float *out, int *matColStart, int *matCols,
int *matRowPerm, int *matRows, float *matData,
float *vec, int dim) {
//@@ invoke spmv kernel for jds format
int block = 1024;
int grid = (dim%block) == 0 ? (dim/block) : 1 + (dim/block);
spmvJDSKernel<<<grid, block>>> (out, matColStart, matCols,
matRowPerm, matRows, matData, vec, dim);
}
int main(int argc, char **argv) {
wbArg_t args;
bool usingJDSQ;
int *hostCSRCols;
int *hostCSRRows;
float *hostCSRData;
int *hostJDSColStart;
int *hostJDSCols;
int *hostJDSRowPerm;
int *hostJDSRows;
float *hostJDSData;
float *hostVector;
float *hostOutput;
int *deviceCSRCols;
int *deviceCSRRows;
float *deviceCSRData;
int *deviceJDSColStart;
int *deviceJDSCols;
int *deviceJDSRowPerm;
int *deviceJDSRows;
float *deviceJDSData;
float *deviceVector;
float *deviceOutput;
int dim, ncols, nrows, ndata;
int maxRowNNZ;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
usingJDSQ = wbImport_flag(wbArg_getInputFile(args, 0)) == 1;
hostCSRCols =
(int *)wbImport(wbArg_getInputFile(args, 1), &ncols, "Integer");
hostCSRRows =
(int *)wbImport(wbArg_getInputFile(args, 2), &nrows, "Integer");
hostCSRData =
(float *)wbImport(wbArg_getInputFile(args, 3), &ndata, "Real");
hostVector =
(float *)wbImport(wbArg_getInputFile(args, 4), &dim, "Real");
hostOutput = (float *)malloc(sizeof(float) * dim);
wbTime_stop(Generic, "Importing data and creating memory on host");
if (usingJDSQ) {
CSRToJDS(dim, hostCSRRows, hostCSRCols, hostCSRData, &hostJDSRowPerm,
&hostJDSRows, &hostJDSColStart, &hostJDSCols, &hostJDSData);
maxRowNNZ = hostJDSRows[0];
}
wbTime_start(GPU, "Allocating GPU memory.");
if (usingJDSQ) {
cudaMalloc((void **)&deviceJDSColStart, sizeof(int) * maxRowNNZ);
cudaMalloc((void **)&deviceJDSCols, sizeof(int) * ndata);
cudaMalloc((void **)&deviceJDSRowPerm, sizeof(int) * dim);
cudaMalloc((void **)&deviceJDSRows, sizeof(int) * dim);
cudaMalloc((void **)&deviceJDSData, sizeof(float) * ndata);
} else {
cudaMalloc((void **)&deviceCSRCols, sizeof(int) * ncols);
cudaMalloc((void **)&deviceCSRRows, sizeof(int) * nrows);
cudaMalloc((void **)&deviceCSRData, sizeof(float) * ndata);
}
cudaMalloc((void **)&deviceVector, sizeof(float) * dim);
cudaMalloc((void **)&deviceOutput, sizeof(float) * dim);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
if (usingJDSQ) {
cudaMemcpy(deviceJDSColStart, hostJDSColStart, sizeof(int) * maxRowNNZ,
cudaMemcpyHostToDevice);
cudaMemcpy(deviceJDSCols, hostJDSCols, sizeof(int) * ndata,
cudaMemcpyHostToDevice);
cudaMemcpy(deviceJDSRowPerm, hostJDSRowPerm, sizeof(int) * dim,
cudaMemcpyHostToDevice);
cudaMemcpy(deviceJDSRows, hostJDSRows, sizeof(int) * dim,
cudaMemcpyHostToDevice);
cudaMemcpy(deviceJDSData, hostJDSData, sizeof(float) * ndata,
cudaMemcpyHostToDevice);
} else {
cudaMemcpy(deviceCSRCols, hostCSRCols, sizeof(int) * ncols,
cudaMemcpyHostToDevice);
cudaMemcpy(deviceCSRRows, hostCSRRows, sizeof(int) * nrows,
cudaMemcpyHostToDevice);
cudaMemcpy(deviceCSRData, hostCSRData, sizeof(float) * ndata,
cudaMemcpyHostToDevice);
}
cudaMemcpy(deviceVector, hostVector, sizeof(float) * dim,
cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
wbTime_start(Compute, "Performing CUDA computation");
if (usingJDSQ) {
spmvJDS(deviceOutput, deviceJDSColStart, deviceJDSCols,
deviceJDSRowPerm, deviceJDSRows, deviceJDSData, deviceVector,
dim);
} else {
spmvCSR(deviceOutput, deviceCSRCols, deviceCSRRows, deviceCSRData,
deviceVector, dim);
}
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
cudaMemcpy(hostOutput, deviceOutput, sizeof(float) * dim,
cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceCSRCols);
cudaFree(deviceCSRRows);
cudaFree(deviceCSRData);
cudaFree(deviceVector);
cudaFree(deviceOutput);
if (usingJDSQ) {
cudaFree(deviceJDSColStart);
cudaFree(deviceJDSCols);
cudaFree(deviceJDSRowPerm);
cudaFree(deviceJDSRows);
cudaFree(deviceJDSData);
}
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, dim);
free(hostCSRCols);
free(hostCSRRows);
free(hostCSRData);
free(hostVector);
free(hostOutput);
if (usingJDSQ) {
free(hostJDSColStart);
free(hostJDSCols);
free(hostJDSRowPerm);
free(hostJDSRows);
free(hostJDSData);
}
return 0;
} |
c333a959b4fd2ba0143bc8e9eae7fee4ec8656e8.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/operators/rank_attention.cu.h"
#include "paddle/fluid/operators/rank_attention_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
using framework::Tensor;
template <typename DeviceContext, typename T>
class RankAttentionCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto *X = ctx.Input<Tensor>("X");
auto *rank_offset = ctx.Input<Tensor>("RankOffset");
auto *param = ctx.Input<Tensor>("RankParam");
auto *input_help = ctx.Output<Tensor>("InputHelp");
auto *ins_rank = ctx.Output<Tensor>("InsRank");
int max_rank = ctx.Attr<int>("MaxRank");
int64_t max_size = ctx.Attr<int>("MaxSize");
auto *Out = ctx.Output<Tensor>("Out");
// check dims
auto x_dims = X->dims();
auto ins_num = x_dims[0];
auto x_fea_dim = x_dims[1];
auto para_dims = param->dims();
auto para_row = para_dims[0];
auto para_col = para_dims[1];
auto rank_offset_dims = rank_offset->dims();
PADDLE_ENFORCE_EQ(
rank_offset_dims[0], ins_num,
platform::errors::InvalidArgument("Input(RankOffset) has wrong rows."));
PADDLE_ENFORCE_EQ((rank_offset_dims[1] - 1) / 2, max_rank,
platform::errors::InvalidArgument(
"Input(RankOffset) has wrong columns."));
PADDLE_ENFORCE_EQ(
max_rank * max_rank * x_fea_dim, para_row,
platform::errors::InvalidArgument("Input(RankParam) has wrong rows."));
int block_matrix_row = max_rank * x_fea_dim;
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
int max_ins = ::max(ins_num, max_size);
Tensor param_help;
param_help = ctx.AllocateTmpTensor<T, DeviceContext>(
{max_ins * block_matrix_row, para_col}, dev_ctx);
param_help.mutable_data<T>(ctx.GetPlace());
input_help->Resize({max_ins, block_matrix_row});
ins_rank->Resize({max_ins, 1});
input_help->mutable_data<T>(ctx.GetPlace());
ins_rank->mutable_data<T>(ctx.GetPlace());
Out->mutable_data<T>(ctx.GetPlace());
// initialize
auto param_help_eigen = framework::EigenVector<T>::Flatten(param_help);
auto input_help_eigen = framework::EigenVector<T>::Flatten(*input_help);
auto ins_rank_eigen = framework::EigenVector<T>::Flatten(*ins_rank);
auto out_eigen = framework::EigenVector<T>::Flatten(*Out);
auto &place = *ctx.template device_context<platform::CUDADeviceContext>()
.eigen_device();
param_help_eigen.device(place) =
param_help_eigen.constant(static_cast<T>(0));
input_help_eigen.device(place) =
input_help_eigen.constant(static_cast<T>(0));
ins_rank_eigen.device(place) = ins_rank_eigen.constant(static_cast<T>(-1));
out_eigen.device(place) = out_eigen.constant(static_cast<T>(0));
// get data ptr
T *input_help_data = input_help->data<T>();
T *param_help_data = param_help.data<T>();
T *ins_rank_data = ins_rank->data<T>();
T *out_data = Out->data<T>();
expand_rank_attention_input(
ctx.cuda_device_context().stream(), X->data<T>(), ins_num, x_fea_dim,
input_help_data, ins_num, block_matrix_row, rank_offset->data<int>(),
rank_offset_dims[0], rank_offset_dims[1], ins_rank_data, max_rank);
expand_rank_attention_param(
ctx.cuda_device_context().stream(), X->data<T>(), ins_num, x_fea_dim,
rank_offset->data<int>(), rank_offset_dims[0], rank_offset_dims[1],
param->data<T>(), para_row, para_col, param_help_data,
ins_num * block_matrix_row, para_col, max_rank);
CBLAS_TRANSPOSE transA = CblasNoTrans;
CBLAS_TRANSPOSE transB = CblasNoTrans;
T alpha = 1;
T beta = 0;
int64_t strideA = block_matrix_row;
int64_t strideB = block_matrix_row * para_col;
auto blas = phi::funcs::GetBlas<platform::CUDADeviceContext, T>(dev_ctx);
blas.BatchedGEMM(transA, transB, 1, para_col, block_matrix_row, alpha,
input_help_data, param_help_data, beta, out_data, ins_num,
strideA, strideB);
}
};
template <typename DeviceContext, typename T>
class RankAttentionGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto *X = ctx.Input<Tensor>("X"); // not use data
auto *rank_offset = ctx.Input<Tensor>("RankOffset"); // not use data
auto *param = ctx.Input<Tensor>("RankParam"); // not use data
auto *input_help = ctx.Input<Tensor>("InputHelp");
auto *ins_rank = ctx.Input<Tensor>("InsRank");
auto *dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
int64_t max_size = ctx.Attr<int>("MaxSize");
auto *drank_para = ctx.Output<Tensor>(framework::GradVarName("RankParam"));
// get dim
auto x_dims = X->dims();
auto ins_num = x_dims[0];
auto x_fea_dim = x_dims[1];
auto para_dims = param->dims();
auto para_row = para_dims[0];
auto para_col = para_dims[1];
auto rank_offset_dims = rank_offset->dims();
auto max_rank = (rank_offset_dims[1] - 1) / 2;
int block_matrix_row = max_rank * x_fea_dim;
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto &place = *ctx.template device_context<platform::CUDADeviceContext>()
.eigen_device();
int max_ins = ::max(ins_num, max_size);
// initialize out grad
drank_para->mutable_data<T>(ctx.GetPlace());
auto drank_para_eigen = framework::EigenVector<T>::Flatten(*drank_para);
drank_para_eigen.device(place) =
drank_para_eigen.constant(static_cast<T>(0));
// copy data
Tensor param_grad;
param_grad = ctx.AllocateTmpTensor<T, DeviceContext>(
{max_ins * block_matrix_row, para_col}, dev_ctx);
param_grad.mutable_data<T>(ctx.GetPlace());
// initialize
auto param_grad_eigen = framework::EigenVector<T>::Flatten(param_grad);
param_grad_eigen.device(place) =
param_grad_eigen.constant(static_cast<T>(0));
// get data ptr
const T *input_help_data = input_help->data<T>();
const T *ins_rank_data = ins_rank->data<T>();
T *param_grad_data = param_grad.data<T>();
auto blas = phi::funcs::GetBlas<platform::CUDADeviceContext, T>(dev_ctx);
T alpha = 1;
T beta = 0;
// get param_grad
CBLAS_TRANSPOSE transA = CblasTrans;
CBLAS_TRANSPOSE transB = CblasNoTrans;
int64_t strideA = block_matrix_row;
int64_t strideB = para_col;
blas.BatchedGEMM(transA, transB, block_matrix_row, para_col, 1, alpha,
input_help_data, dout->data<T>(), beta, param_grad_data,
ins_num, strideA, strideB);
// merge param_grad to get drank_para
merge_rank_attention_param_grad(
ctx.cuda_device_context().stream(), param_grad_data,
ins_num * block_matrix_row, para_col, drank_para->data<T>(), para_row,
para_col, ins_rank_data, ins_num, max_rank, x_fea_dim);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using GPUCtx = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(rank_attention,
ops::RankAttentionCUDAKernel<GPUCtx, float>,
ops::RankAttentionCUDAKernel<GPUCtx, double>);
REGISTER_OP_CUDA_KERNEL(rank_attention_grad,
ops::RankAttentionGradOpCUDAKernel<GPUCtx, float>,
ops::RankAttentionGradOpCUDAKernel<GPUCtx, double>);
| c333a959b4fd2ba0143bc8e9eae7fee4ec8656e8.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/operators/rank_attention.cu.h"
#include "paddle/fluid/operators/rank_attention_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
using framework::Tensor;
template <typename DeviceContext, typename T>
class RankAttentionCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto *X = ctx.Input<Tensor>("X");
auto *rank_offset = ctx.Input<Tensor>("RankOffset");
auto *param = ctx.Input<Tensor>("RankParam");
auto *input_help = ctx.Output<Tensor>("InputHelp");
auto *ins_rank = ctx.Output<Tensor>("InsRank");
int max_rank = ctx.Attr<int>("MaxRank");
int64_t max_size = ctx.Attr<int>("MaxSize");
auto *Out = ctx.Output<Tensor>("Out");
// check dims
auto x_dims = X->dims();
auto ins_num = x_dims[0];
auto x_fea_dim = x_dims[1];
auto para_dims = param->dims();
auto para_row = para_dims[0];
auto para_col = para_dims[1];
auto rank_offset_dims = rank_offset->dims();
PADDLE_ENFORCE_EQ(
rank_offset_dims[0], ins_num,
platform::errors::InvalidArgument("Input(RankOffset) has wrong rows."));
PADDLE_ENFORCE_EQ((rank_offset_dims[1] - 1) / 2, max_rank,
platform::errors::InvalidArgument(
"Input(RankOffset) has wrong columns."));
PADDLE_ENFORCE_EQ(
max_rank * max_rank * x_fea_dim, para_row,
platform::errors::InvalidArgument("Input(RankParam) has wrong rows."));
int block_matrix_row = max_rank * x_fea_dim;
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
int max_ins = std::max(ins_num, max_size);
Tensor param_help;
param_help = ctx.AllocateTmpTensor<T, DeviceContext>(
{max_ins * block_matrix_row, para_col}, dev_ctx);
param_help.mutable_data<T>(ctx.GetPlace());
input_help->Resize({max_ins, block_matrix_row});
ins_rank->Resize({max_ins, 1});
input_help->mutable_data<T>(ctx.GetPlace());
ins_rank->mutable_data<T>(ctx.GetPlace());
Out->mutable_data<T>(ctx.GetPlace());
// initialize
auto param_help_eigen = framework::EigenVector<T>::Flatten(param_help);
auto input_help_eigen = framework::EigenVector<T>::Flatten(*input_help);
auto ins_rank_eigen = framework::EigenVector<T>::Flatten(*ins_rank);
auto out_eigen = framework::EigenVector<T>::Flatten(*Out);
auto &place = *ctx.template device_context<platform::CUDADeviceContext>()
.eigen_device();
param_help_eigen.device(place) =
param_help_eigen.constant(static_cast<T>(0));
input_help_eigen.device(place) =
input_help_eigen.constant(static_cast<T>(0));
ins_rank_eigen.device(place) = ins_rank_eigen.constant(static_cast<T>(-1));
out_eigen.device(place) = out_eigen.constant(static_cast<T>(0));
// get data ptr
T *input_help_data = input_help->data<T>();
T *param_help_data = param_help.data<T>();
T *ins_rank_data = ins_rank->data<T>();
T *out_data = Out->data<T>();
expand_rank_attention_input(
ctx.cuda_device_context().stream(), X->data<T>(), ins_num, x_fea_dim,
input_help_data, ins_num, block_matrix_row, rank_offset->data<int>(),
rank_offset_dims[0], rank_offset_dims[1], ins_rank_data, max_rank);
expand_rank_attention_param(
ctx.cuda_device_context().stream(), X->data<T>(), ins_num, x_fea_dim,
rank_offset->data<int>(), rank_offset_dims[0], rank_offset_dims[1],
param->data<T>(), para_row, para_col, param_help_data,
ins_num * block_matrix_row, para_col, max_rank);
CBLAS_TRANSPOSE transA = CblasNoTrans;
CBLAS_TRANSPOSE transB = CblasNoTrans;
T alpha = 1;
T beta = 0;
int64_t strideA = block_matrix_row;
int64_t strideB = block_matrix_row * para_col;
auto blas = phi::funcs::GetBlas<platform::CUDADeviceContext, T>(dev_ctx);
blas.BatchedGEMM(transA, transB, 1, para_col, block_matrix_row, alpha,
input_help_data, param_help_data, beta, out_data, ins_num,
strideA, strideB);
}
};
template <typename DeviceContext, typename T>
class RankAttentionGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto *X = ctx.Input<Tensor>("X"); // not use data
auto *rank_offset = ctx.Input<Tensor>("RankOffset"); // not use data
auto *param = ctx.Input<Tensor>("RankParam"); // not use data
auto *input_help = ctx.Input<Tensor>("InputHelp");
auto *ins_rank = ctx.Input<Tensor>("InsRank");
auto *dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
int64_t max_size = ctx.Attr<int>("MaxSize");
auto *drank_para = ctx.Output<Tensor>(framework::GradVarName("RankParam"));
// get dim
auto x_dims = X->dims();
auto ins_num = x_dims[0];
auto x_fea_dim = x_dims[1];
auto para_dims = param->dims();
auto para_row = para_dims[0];
auto para_col = para_dims[1];
auto rank_offset_dims = rank_offset->dims();
auto max_rank = (rank_offset_dims[1] - 1) / 2;
int block_matrix_row = max_rank * x_fea_dim;
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto &place = *ctx.template device_context<platform::CUDADeviceContext>()
.eigen_device();
int max_ins = std::max(ins_num, max_size);
// initialize out grad
drank_para->mutable_data<T>(ctx.GetPlace());
auto drank_para_eigen = framework::EigenVector<T>::Flatten(*drank_para);
drank_para_eigen.device(place) =
drank_para_eigen.constant(static_cast<T>(0));
// copy data
Tensor param_grad;
param_grad = ctx.AllocateTmpTensor<T, DeviceContext>(
{max_ins * block_matrix_row, para_col}, dev_ctx);
param_grad.mutable_data<T>(ctx.GetPlace());
// initialize
auto param_grad_eigen = framework::EigenVector<T>::Flatten(param_grad);
param_grad_eigen.device(place) =
param_grad_eigen.constant(static_cast<T>(0));
// get data ptr
const T *input_help_data = input_help->data<T>();
const T *ins_rank_data = ins_rank->data<T>();
T *param_grad_data = param_grad.data<T>();
auto blas = phi::funcs::GetBlas<platform::CUDADeviceContext, T>(dev_ctx);
T alpha = 1;
T beta = 0;
// get param_grad
CBLAS_TRANSPOSE transA = CblasTrans;
CBLAS_TRANSPOSE transB = CblasNoTrans;
int64_t strideA = block_matrix_row;
int64_t strideB = para_col;
blas.BatchedGEMM(transA, transB, block_matrix_row, para_col, 1, alpha,
input_help_data, dout->data<T>(), beta, param_grad_data,
ins_num, strideA, strideB);
// merge param_grad to get drank_para
merge_rank_attention_param_grad(
ctx.cuda_device_context().stream(), param_grad_data,
ins_num * block_matrix_row, para_col, drank_para->data<T>(), para_row,
para_col, ins_rank_data, ins_num, max_rank, x_fea_dim);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using GPUCtx = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(rank_attention,
ops::RankAttentionCUDAKernel<GPUCtx, float>,
ops::RankAttentionCUDAKernel<GPUCtx, double>);
REGISTER_OP_CUDA_KERNEL(rank_attention_grad,
ops::RankAttentionGradOpCUDAKernel<GPUCtx, float>,
ops::RankAttentionGradOpCUDAKernel<GPUCtx, double>);
|
580b81cd8acaacac8d87a35f6907287c604a10d8.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <algorithm>
#include <vector>
#include <hip/hip_runtime.h>
#include "matrix_build.h"
#include <rocblas.h>
#include <magma_lapack.h>
#include <magma_v2.h>
using namespace std;
int main() {
magma_init();
magma_int_t dev_t = 0;
magma_queue_t queue_qr = NULL;
magma_queue_create(dev_t,&queue_qr);
//set up device and queue
int numFluid = 2268;
int numBoundary = 1903;
int numGhost = 0;
int numParticle = numFluid + numBoundary + numGhost;
int numNeighbourone = 177;
int numNeighbour = 177;
// read data from txt file
double* inPressure = new double[numParticle];
double* inVolume = new double[numParticle];
double* inSoundSpeed = new double[numParticle];
double* inVelocity = new double[numParticle];
int* neighbourlist0 = new int[numFluid*numNeighbourone];
int* neighbourlist1 = new int[numFluid*numNeighbourone];
int* neighboursize0 =new int[numFluid];
int* neighboursize1 = new int[numFluid];
int* LPFOrder0 = new int[numFluid];
int* LPFOrder1 = new int[numFluid];
double* xPosition = new double[numParticle];
double* yPosition = new double[numParticle];
//store data into array
ifstream myfile;
myfile.open("xPosition.txt");
for(int i=0;i<numParticle;i++){
double tem;
myfile>>tem;
xPosition[i] = tem;
}
myfile.close();
myfile.open("yPosition.txt");
for(int i=0;i<numParticle;i++){
double tem;
myfile>>tem;
yPosition[i] = tem;
}
myfile.close();
myfile.open("inPressure.txt");
for(int i=0;i<numParticle;i++){
double tem;
myfile>>tem;
inPressure[i]=tem;
}
myfile.close();
myfile.open("inVelocity.txt");
for(int i=0;i<numParticle;i++){
double tem;
myfile>>tem;
inVelocity[i]=tem;
}
myfile.close();
myfile.open("inSoundSpeed.txt");
for(int i=0;i<numParticle;i++){
double tem;
myfile>>tem;
inSoundSpeed[i]=tem;
}
myfile.close();
myfile.open("inVolume.txt");
for(int i=0;i<numParticle;i++){
double tem;
myfile>>tem;
inVolume[i]=tem;
}
myfile.close();
myfile.open("neighbourlist0.txt");
for(int i=0;i<numFluid*numNeighbourone;i++){
int tem;
myfile>>tem;
neighbourlist0[i]=tem;
}
myfile.close();
myfile.open("neighbourlist1.txt");
for(int i=0;i<numFluid*numNeighbourone;i++){
int tem;
myfile>>tem;
neighbourlist1[i]=tem;
}
myfile.close();
myfile.open("neighboursize0.txt");
for(int i=0;i<numFluid;i++){
double tem;
myfile>>tem;
neighboursize0[i]=tem;
}
myfile.close();
myfile.open("neighboursize1.txt");
for(int i=0;i<numFluid;i++){
double tem;
myfile>>tem;
neighboursize1[i]=tem;
}
myfile.close();
fill_n(LPFOrder0,numFluid,1);
fill_n(LPFOrder1,numFluid,1);
//device arrays which need copy
double* d_xPosition;
double* d_yPosition;
double* d_inPressure;
double* d_inVolume;
double* d_inSoundSpeed;
double* d_inVelocity;
int* d_neighbourlist0;
int* d_neighbourlist1;
int* d_neighboursize0;
int* d_neighboursize1;
int* d_LPFOrder0;
int* d_LPFOrder1;
// device arrays which dont need memcopy
int* d_numRow;
int* d_numCol;
hipMalloc((void**)&d_xPosition,sizeof(double)*numParticle);
hipMalloc((void**)&d_yPosition,sizeof(double)*numParticle);
hipMalloc((void**)&d_inPressure,sizeof(double)*numParticle);
hipMalloc((void**)&d_inVolume,sizeof(double)*numParticle );
hipMalloc((void**)&d_inVelocity, sizeof(double)*numParticle);
hipMalloc((void**)&d_inSoundSpeed,sizeof(double)*numParticle);
hipMalloc((void**)&d_neighbourlist0,sizeof(int)*numFluid*numNeighbourone);
hipMalloc((void**)&d_neighbourlist1,sizeof(int)*numFluid*numNeighbourone);
hipMalloc((void**)&d_neighboursize0,sizeof(int)*numFluid);
hipMalloc((void**)&d_neighboursize1,sizeof(int)*numFluid);
hipMalloc((void**)&d_LPFOrder0,sizeof(int)*numFluid);
hipMalloc((void**)&d_LPFOrder1,sizeof(int)*numFluid);
hipMalloc((void**)&d_numRow,sizeof(int)*numFluid);
hipMalloc((void**)&d_numCol,sizeof(int)*numFluid);
cout<<"-------------------------cuda allocate done----------------------------------"<<endl;
//memory copy
hipMemcpy(d_xPosition,xPosition,sizeof(double)*numParticle,hipMemcpyHostToDevice);
hipMemcpy(d_yPosition,yPosition,sizeof(double)*numParticle,hipMemcpyHostToDevice);
hipMemcpy(d_inPressure,inPressure,sizeof(double)*numParticle,hipMemcpyHostToDevice);
hipMemcpy(d_inVolume,inVolume,sizeof(double)*numParticle,hipMemcpyHostToDevice);
hipMemcpy(d_inVelocity,inVelocity,sizeof(double)*numParticle,hipMemcpyHostToDevice);
hipMemcpy(d_inSoundSpeed,inSoundSpeed,sizeof(double)*numParticle,hipMemcpyHostToDevice);
hipMemcpy(d_neighbourlist0,neighbourlist0,sizeof(int)*numFluid*numNeighbourone,hipMemcpyHostToDevice);
hipMemcpy(d_neighbourlist1,neighbourlist1,sizeof(int)*numFluid*numNeighbourone,hipMemcpyHostToDevice);
hipMemcpy(d_neighboursize0,neighboursize0,sizeof(int)*numFluid,hipMemcpyHostToDevice);
hipMemcpy(d_neighbourlist1,neighboursize1,sizeof(int)*numFluid,hipMemcpyHostToDevice);
hipMemcpy(d_LPFOrder0,LPFOrder0,sizeof(int)*numFluid,hipMemcpyHostToDevice);
hipMemcpy(d_LPFOrder1,LPFOrder1,sizeof(int)*numFluid,hipMemcpyHostToDevice);
cout<<"----------------------------mem allocate and copy done--------------------------------"<<endl;
//----------------OUTPUT-------------------------
/*
double* outVelocity = new double[numParticle];
double* outPressure = new double[numParticle];
double* outSoundSpeed = new double[numParticle];
double* outVolume = new double[numParticle];
*/
cout<<"--------------------------------Testing---------------------------------------"<<endl;
dim3 blocks(128,1);
dim3 threads(128,1);
hipLaunchKernelGGL(( computeRowandCol), dim3(blocks),dim3(threads), 0, 0, d_neighboursize0,d_numRow,d_numCol,d_LPFOrder0,numFluid);
//build double device pointer A
double** A;
hipMalloc((void**)&A,sizeof(double*)*numFluid);
double** A_temp = new double*[numFluid];
for(int i=0;i<numFluid;i++){
hipMalloc((void**)&A_temp[i],sizeof(double)*5*numNeighbourone);
}
hipMemcpy(A, A_temp,sizeof(double*)*numFluid,hipMemcpyHostToDevice);
//build distance array
double* d_distance;
hipMalloc((void**)&d_distance,sizeof(double)*numFluid);
cout<<"------------------------------Testing2---------------------------"<<endl;
dim3 blocks1(128,1);
dim3 threads1(128,1);
hipLaunchKernelGGL(( computeA2D), dim3(blocks1),dim3(threads1), 0, 0, d_neighbourlist0,d_LPFOrder0,d_numRow,d_xPosition,d_yPosition, numFluid,numNeighbourone,A,d_distance);
cout<<"-----------------------Testing Done------------------------------"<<endl;
/*
for(int i=0;i<numFluid;i++){
cout<<"A of number: "<<i<<endl;
magma_dprint_gpu(6,1,A_temp[i],6,queue_qr);
}
*/
//Process QR batched mode
magma_int_t m = 3;
magma_int_t n = 2;
magma_int_t lda = 3;
magma_int_t min_mn = min(m,n);
double **Tau;
hipMalloc((void**)&Tau,numFluid*sizeof(double*));
double** Tau_temp = new double*[numFluid];
for(int i=0;i<numFluid;i++){
hipMalloc((void**)&Tau_temp[i],sizeof(double)*min_mn);
}
hipMemcpy(Tau, Tau_temp, sizeof(double*)*numFluid, hipMemcpyHostToDevice);
magma_int_t* info;
hipMalloc((void**)&info,numFluid*sizeof(magma_int_t));
magma_int_t batchid = numFluid;
//Start QR
magma_dgeqrf_batched(m,n,A,lda,Tau,info,batchid,queue_qr);
cout<<"-------------------------QR DONE----------------------------------"<<endl;
//build right hand side B
double** B;
hipMalloc((void**)&B,sizeof(double*)*numFluid);
double** B_temp = new double*[numFluid];
for(int i=0;i<numFluid;i++){
hipMalloc((void**)&B_temp[i],sizeof(double)*numNeighbourone);
}
hipMemcpy(B,B_temp,sizeof(double*)*numFluid,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( computeB), dim3(blocks),dim3(threads), 0, 0, d_neighbourlist0, d_numRow, d_inPressure, numNeighbourone, numFluid, B);
/*
for(int i=0;i<numFluid;i++){
cout<<"number: "<<i<<endl;
magma_dprint_gpu(3,1,B_temp[i],3,queue_qr);
}
*/
//solver linear system
double **result;
hipMalloc((void**)&result,numFluid*sizeof(double*));
double** result_temp = new double*[numFluid];
for(int i=0;i<numFluid;i++){
hipMalloc((void**)&result_temp[i],sizeof(double)*min_mn);
}
hipMemcpy(result, result_temp, sizeof(double*)*numFluid, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( computeLS), dim3(blocks),dim3(threads), 0, 0, A,B,Tau,d_numRow,d_numCol,numFluid, result);
cout<<"BIG SUCCESS!!"<<endl;
for(int i=0;i<numFluid;i++){
cout<<"number: "<<i<<endl;
magma_dprint_gpu(2,1,result_temp[i],2,queue_qr);
}
magma_queue_destroy(queue_qr);
magma_finalize();
//release memory
delete[] inPressure;
delete[] inVolume;
delete[] inVelocity;
delete[] inSoundSpeed;
delete[] neighbourlist0;
delete[] neighbourlist1;
delete[] neighboursize0;
delete[] neighboursize1;
delete[] LPFOrder0;
delete[] LPFOrder1;
delete[] A_temp;
delete[] xPosition;
delete[] yPosition;
delete[] B_temp;
delete[] result_temp;
hipFree(d_neighboursize0);
hipFree(d_neighboursize1);
hipFree(d_neighbourlist0);
hipFree(d_neighbourlist1);
hipFree(d_LPFOrder0);
hipFree(d_LPFOrder1);
hipFree(d_inPressure);
hipFree(d_inVolume);
hipFree(d_inSoundSpeed);
hipFree(d_inVelocity);
hipFree(d_numRow);
hipFree(d_numCol);
hipFree(A);
hipFree(result);
for(int i=0;i<numParticle;i++){
hipFree(A_temp[i]);
hipFree(B_temp[i]);
hipFree(result_temp[i]);
}
hipFree(d_distance);
hipFree(B);
// QR
delete[] Tau_temp;
hipFree(Tau);
for(int i=0;i<numParticle;i++){
hipFree(Tau_temp[i]);
}
hipFree(info);
}
| 580b81cd8acaacac8d87a35f6907287c604a10d8.cu | #include <iostream>
#include <fstream>
#include <algorithm>
#include <vector>
#include <cuda_runtime.h>
#include "matrix_build.h"
#include <cublas_v2.h>
#include <magma_lapack.h>
#include <magma_v2.h>
using namespace std;
int main() {
magma_init();
magma_int_t dev_t = 0;
magma_queue_t queue_qr = NULL;
magma_queue_create(dev_t,&queue_qr);
//set up device and queue
int numFluid = 2268;
int numBoundary = 1903;
int numGhost = 0;
int numParticle = numFluid + numBoundary + numGhost;
int numNeighbourone = 177;
int numNeighbour = 177;
// read data from txt file
double* inPressure = new double[numParticle];
double* inVolume = new double[numParticle];
double* inSoundSpeed = new double[numParticle];
double* inVelocity = new double[numParticle];
int* neighbourlist0 = new int[numFluid*numNeighbourone];
int* neighbourlist1 = new int[numFluid*numNeighbourone];
int* neighboursize0 =new int[numFluid];
int* neighboursize1 = new int[numFluid];
int* LPFOrder0 = new int[numFluid];
int* LPFOrder1 = new int[numFluid];
double* xPosition = new double[numParticle];
double* yPosition = new double[numParticle];
//store data into array
ifstream myfile;
myfile.open("xPosition.txt");
for(int i=0;i<numParticle;i++){
double tem;
myfile>>tem;
xPosition[i] = tem;
}
myfile.close();
myfile.open("yPosition.txt");
for(int i=0;i<numParticle;i++){
double tem;
myfile>>tem;
yPosition[i] = tem;
}
myfile.close();
myfile.open("inPressure.txt");
for(int i=0;i<numParticle;i++){
double tem;
myfile>>tem;
inPressure[i]=tem;
}
myfile.close();
myfile.open("inVelocity.txt");
for(int i=0;i<numParticle;i++){
double tem;
myfile>>tem;
inVelocity[i]=tem;
}
myfile.close();
myfile.open("inSoundSpeed.txt");
for(int i=0;i<numParticle;i++){
double tem;
myfile>>tem;
inSoundSpeed[i]=tem;
}
myfile.close();
myfile.open("inVolume.txt");
for(int i=0;i<numParticle;i++){
double tem;
myfile>>tem;
inVolume[i]=tem;
}
myfile.close();
myfile.open("neighbourlist0.txt");
for(int i=0;i<numFluid*numNeighbourone;i++){
int tem;
myfile>>tem;
neighbourlist0[i]=tem;
}
myfile.close();
myfile.open("neighbourlist1.txt");
for(int i=0;i<numFluid*numNeighbourone;i++){
int tem;
myfile>>tem;
neighbourlist1[i]=tem;
}
myfile.close();
myfile.open("neighboursize0.txt");
for(int i=0;i<numFluid;i++){
double tem;
myfile>>tem;
neighboursize0[i]=tem;
}
myfile.close();
myfile.open("neighboursize1.txt");
for(int i=0;i<numFluid;i++){
double tem;
myfile>>tem;
neighboursize1[i]=tem;
}
myfile.close();
fill_n(LPFOrder0,numFluid,1);
fill_n(LPFOrder1,numFluid,1);
//device arrays which need copy
double* d_xPosition;
double* d_yPosition;
double* d_inPressure;
double* d_inVolume;
double* d_inSoundSpeed;
double* d_inVelocity;
int* d_neighbourlist0;
int* d_neighbourlist1;
int* d_neighboursize0;
int* d_neighboursize1;
int* d_LPFOrder0;
int* d_LPFOrder1;
// device arrays which dont need memcopy
int* d_numRow;
int* d_numCol;
cudaMalloc((void**)&d_xPosition,sizeof(double)*numParticle);
cudaMalloc((void**)&d_yPosition,sizeof(double)*numParticle);
cudaMalloc((void**)&d_inPressure,sizeof(double)*numParticle);
cudaMalloc((void**)&d_inVolume,sizeof(double)*numParticle );
cudaMalloc((void**)&d_inVelocity, sizeof(double)*numParticle);
cudaMalloc((void**)&d_inSoundSpeed,sizeof(double)*numParticle);
cudaMalloc((void**)&d_neighbourlist0,sizeof(int)*numFluid*numNeighbourone);
cudaMalloc((void**)&d_neighbourlist1,sizeof(int)*numFluid*numNeighbourone);
cudaMalloc((void**)&d_neighboursize0,sizeof(int)*numFluid);
cudaMalloc((void**)&d_neighboursize1,sizeof(int)*numFluid);
cudaMalloc((void**)&d_LPFOrder0,sizeof(int)*numFluid);
cudaMalloc((void**)&d_LPFOrder1,sizeof(int)*numFluid);
cudaMalloc((void**)&d_numRow,sizeof(int)*numFluid);
cudaMalloc((void**)&d_numCol,sizeof(int)*numFluid);
cout<<"-------------------------cuda allocate done----------------------------------"<<endl;
//memory copy
cudaMemcpy(d_xPosition,xPosition,sizeof(double)*numParticle,cudaMemcpyHostToDevice);
cudaMemcpy(d_yPosition,yPosition,sizeof(double)*numParticle,cudaMemcpyHostToDevice);
cudaMemcpy(d_inPressure,inPressure,sizeof(double)*numParticle,cudaMemcpyHostToDevice);
cudaMemcpy(d_inVolume,inVolume,sizeof(double)*numParticle,cudaMemcpyHostToDevice);
cudaMemcpy(d_inVelocity,inVelocity,sizeof(double)*numParticle,cudaMemcpyHostToDevice);
cudaMemcpy(d_inSoundSpeed,inSoundSpeed,sizeof(double)*numParticle,cudaMemcpyHostToDevice);
cudaMemcpy(d_neighbourlist0,neighbourlist0,sizeof(int)*numFluid*numNeighbourone,cudaMemcpyHostToDevice);
cudaMemcpy(d_neighbourlist1,neighbourlist1,sizeof(int)*numFluid*numNeighbourone,cudaMemcpyHostToDevice);
cudaMemcpy(d_neighboursize0,neighboursize0,sizeof(int)*numFluid,cudaMemcpyHostToDevice);
cudaMemcpy(d_neighbourlist1,neighboursize1,sizeof(int)*numFluid,cudaMemcpyHostToDevice);
cudaMemcpy(d_LPFOrder0,LPFOrder0,sizeof(int)*numFluid,cudaMemcpyHostToDevice);
cudaMemcpy(d_LPFOrder1,LPFOrder1,sizeof(int)*numFluid,cudaMemcpyHostToDevice);
cout<<"----------------------------mem allocate and copy done--------------------------------"<<endl;
//----------------OUTPUT-------------------------
/*
double* outVelocity = new double[numParticle];
double* outPressure = new double[numParticle];
double* outSoundSpeed = new double[numParticle];
double* outVolume = new double[numParticle];
*/
cout<<"--------------------------------Testing---------------------------------------"<<endl;
dim3 blocks(128,1);
dim3 threads(128,1);
computeRowandCol<<<blocks,threads>>>(d_neighboursize0,d_numRow,d_numCol,d_LPFOrder0,numFluid);
//build double device pointer A
double** A;
cudaMalloc((void**)&A,sizeof(double*)*numFluid);
double** A_temp = new double*[numFluid];
for(int i=0;i<numFluid;i++){
cudaMalloc((void**)&A_temp[i],sizeof(double)*5*numNeighbourone);
}
cudaMemcpy(A, A_temp,sizeof(double*)*numFluid,cudaMemcpyHostToDevice);
//build distance array
double* d_distance;
cudaMalloc((void**)&d_distance,sizeof(double)*numFluid);
cout<<"------------------------------Testing2---------------------------"<<endl;
dim3 blocks1(128,1);
dim3 threads1(128,1);
computeA2D<<<blocks1,threads1>>>(d_neighbourlist0,d_LPFOrder0,d_numRow,d_xPosition,d_yPosition, numFluid,numNeighbourone,A,d_distance);
cout<<"-----------------------Testing Done------------------------------"<<endl;
/*
for(int i=0;i<numFluid;i++){
cout<<"A of number: "<<i<<endl;
magma_dprint_gpu(6,1,A_temp[i],6,queue_qr);
}
*/
//Process QR batched mode
magma_int_t m = 3;
magma_int_t n = 2;
magma_int_t lda = 3;
magma_int_t min_mn = min(m,n);
double **Tau;
cudaMalloc((void**)&Tau,numFluid*sizeof(double*));
double** Tau_temp = new double*[numFluid];
for(int i=0;i<numFluid;i++){
cudaMalloc((void**)&Tau_temp[i],sizeof(double)*min_mn);
}
cudaMemcpy(Tau, Tau_temp, sizeof(double*)*numFluid, cudaMemcpyHostToDevice);
magma_int_t* info;
cudaMalloc((void**)&info,numFluid*sizeof(magma_int_t));
magma_int_t batchid = numFluid;
//Start QR
magma_dgeqrf_batched(m,n,A,lda,Tau,info,batchid,queue_qr);
cout<<"-------------------------QR DONE----------------------------------"<<endl;
//build right hand side B
double** B;
cudaMalloc((void**)&B,sizeof(double*)*numFluid);
double** B_temp = new double*[numFluid];
for(int i=0;i<numFluid;i++){
cudaMalloc((void**)&B_temp[i],sizeof(double)*numNeighbourone);
}
cudaMemcpy(B,B_temp,sizeof(double*)*numFluid,cudaMemcpyHostToDevice);
computeB<<<blocks,threads>>>(d_neighbourlist0, d_numRow, d_inPressure, numNeighbourone, numFluid, B);
/*
for(int i=0;i<numFluid;i++){
cout<<"number: "<<i<<endl;
magma_dprint_gpu(3,1,B_temp[i],3,queue_qr);
}
*/
//solver linear system
double **result;
cudaMalloc((void**)&result,numFluid*sizeof(double*));
double** result_temp = new double*[numFluid];
for(int i=0;i<numFluid;i++){
cudaMalloc((void**)&result_temp[i],sizeof(double)*min_mn);
}
cudaMemcpy(result, result_temp, sizeof(double*)*numFluid, cudaMemcpyHostToDevice);
computeLS<<<blocks,threads>>>(A,B,Tau,d_numRow,d_numCol,numFluid, result);
cout<<"BIG SUCCESS!!"<<endl;
for(int i=0;i<numFluid;i++){
cout<<"number: "<<i<<endl;
magma_dprint_gpu(2,1,result_temp[i],2,queue_qr);
}
magma_queue_destroy(queue_qr);
magma_finalize();
//release memory
delete[] inPressure;
delete[] inVolume;
delete[] inVelocity;
delete[] inSoundSpeed;
delete[] neighbourlist0;
delete[] neighbourlist1;
delete[] neighboursize0;
delete[] neighboursize1;
delete[] LPFOrder0;
delete[] LPFOrder1;
delete[] A_temp;
delete[] xPosition;
delete[] yPosition;
delete[] B_temp;
delete[] result_temp;
cudaFree(d_neighboursize0);
cudaFree(d_neighboursize1);
cudaFree(d_neighbourlist0);
cudaFree(d_neighbourlist1);
cudaFree(d_LPFOrder0);
cudaFree(d_LPFOrder1);
cudaFree(d_inPressure);
cudaFree(d_inVolume);
cudaFree(d_inSoundSpeed);
cudaFree(d_inVelocity);
cudaFree(d_numRow);
cudaFree(d_numCol);
cudaFree(A);
cudaFree(result);
for(int i=0;i<numParticle;i++){
cudaFree(A_temp[i]);
cudaFree(B_temp[i]);
cudaFree(result_temp[i]);
}
cudaFree(d_distance);
cudaFree(B);
// QR
delete[] Tau_temp;
cudaFree(Tau);
for(int i=0;i<numParticle;i++){
cudaFree(Tau_temp[i]);
}
cudaFree(info);
}
|
6a48f053a5b75e17dea98222ce0e8f7b7178e8a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "darknet.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <rocblas.h>
#include <float.h>
#include "activations.h"
#include "dark_cuda.h"
__device__ float lhtan_activate_kernel(float x)
{
if(x < 0) return .001*x;
if(x > 1) return .001*(x-1) + 1;
return x;
}
__device__ float lhtan_gradient_kernel(float x)
{
if(x > 0 && x < 1) return 1;
return .001;
}
__device__ float hardtan_activate_kernel(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
__device__ float linear_activate_kernel(float x){return x;}
__device__ float logistic_activate_kernel(float x){return 1.f/(1.f + expf(-x));}
__device__ float loggy_activate_kernel(float x){return 2.f/(1.f + expf(-x)) - 1;}
__device__ float relu_activate_kernel(float x){return x*(x>0);}
__device__ float relu6_activate_kernel(float x) { return min_val_cmp(max_val_cmp(x, 0), 6); }
__device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(expf(x)-1);}
__device__ float selu_activate_kernel(float x) { return (x >= 0)*1.0507f*x + (x < 0)*1.0507f*1.6732f*(expf(x) - 1); }
__device__ float relie_activate_kernel(float x){return (x>0) ? x : .01f*x;}
__device__ float ramp_activate_kernel(float x){return x*(x>0)+.1f*x;}
__device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1f*x;}
__device__ float tanh_activate_kernel(float x){return (2/(1 + expf(-2*x)) - 1);}
__device__ float gelu_activate_kernel(float x){return (0.5*x*(1 + tanhf(0.797885*x + 0.035677*powf(x, 3))));}
__device__ float softplus_kernel(float x, float threshold = 20) {
if (x > threshold) return x; // too large
else if (x < -threshold) return expf(x); // too small
return log1pf(expf(x));
//return logf(expf(x) + 1);
}
__device__ float plse_activate_kernel(float x)
{
if(x < -4) return .01f * (x + 4);
if(x > 4) return .01f * (x - 4) + 1;
return .125f*x + .5f;
}
__device__ float stair_activate_kernel(float x)
{
int n = floorf(x);
if (n%2 == 0) return floorf(x/2.f);
else return (x - n) + floorf(x/2.f);
}
__device__ float hardtan_gradient_kernel(float x)
{
if (x > -1 && x < 1) return 1;
return 0;
}
__device__ float linear_gradient_kernel(float x){return 1;}
__device__ float logistic_gradient_kernel(float x){return (1-x)*x;}
__device__ float loggy_gradient_kernel(float x)
{
float y = (x+1.F)/2.F;
return 2*(1-y)*y;
}
__device__ float relu_gradient_kernel(float x){return (x>0);}
__device__ float relu6_gradient_kernel(float x) { return (x > 0 && x < 6); }
__device__ float elu_gradient_kernel(float x){return (x >= 0) + (x < 0)*(x + 1);}
__device__ float selu_gradient_kernel(float x) { return (x >= 0)*1.0507f + (x < 0)*(x + 1.0507f*1.6732f); }
__device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01f;}
__device__ float ramp_gradient_kernel(float x){return (x>0)+.1f;}
__device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1f;}
__device__ float tanh_gradient_kernel(float x){return 1-x*x;}
__device__ float sech_gpu(float x) { return 2 / (expf(x) + expf(-x)); }
__device__ float gelu_gradient_kernel(float x) {
const float x3 = powf(x, 3);
return 0.5*tanhf(0.0356774*x3 + 0.797885*x) + (0.0535161*x3 + 0.398942*x) * powf(sech_gpu(0.0356774*x3 + 0.797885*x), 2) + 0.5;
}
__device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01f : .125f;}
__device__ float stair_gradient_kernel(float x)
{
if (floor(x) == x) return 0;
return 1;
}
__device__ float activate_kernel(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case RELU6:
return relu6_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case SELU:
return selu_activate_kernel(x);
case GELU:
return gelu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}
__device__ float gradient_kernel(float x, ACTIVATION a)
{
switch (a) {
case LINEAR:
return linear_gradient_kernel(x);
case LOGISTIC:
return logistic_gradient_kernel(x);
case LOGGY:
return loggy_gradient_kernel(x);
case RELU:
return relu_gradient_kernel(x);
case RELU6:
return relu6_gradient_kernel(x);
case NORM_CHAN:
return relu_gradient_kernel(x);
case ELU:
return elu_gradient_kernel(x);
case SELU:
return selu_gradient_kernel(x);
case GELU:
return gelu_gradient_kernel(x);
case RELIE:
return relie_gradient_kernel(x);
case RAMP:
return ramp_gradient_kernel(x);
case LEAKY:
return leaky_gradient_kernel(x);
case TANH:
return tanh_gradient_kernel(x);
case PLSE:
return plse_gradient_kernel(x);
case STAIR:
return stair_gradient_kernel(x);
case HARDTAN:
return hardtan_gradient_kernel(x);
case LHTAN:
return lhtan_gradient_kernel(x);
}
return 0;
}
__global__ void binary_gradient_array_kernel(float *x, float *dy, int n, int s, BINARY_ACTIVATION a, float *dx)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int i = id % s;
int b = id / s;
float x1 = x[b*s + i];
float x2 = x[b*s + s / 2 + i];
if (id < n) {
float de = dy[id];
dx[b*s + i] = x2*de;
dx[b*s + s / 2 + i] = x1*de;
}
}
extern "C" void binary_gradient_array_gpu(float *x, float *dx, int n, int size, BINARY_ACTIVATION a, float *y)
{
binary_gradient_array_kernel << <cuda_gridsize(n / 2), BLOCK, 0, get_cuda_stream() >> >(x, dx, n / 2, size, a, y);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void binary_activate_array_kernel(float *x, int n, int s, BINARY_ACTIVATION a, float *y)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int i = id % s;
int b = id / s;
float x1 = x[b*s + i];
float x2 = x[b*s + s / 2 + i];
if (id < n) y[id] = x1*x2;
}
extern "C" void binary_activate_array_gpu(float *x, int n, int size, BINARY_ACTIVATION a, float *y)
{
binary_activate_array_kernel << <cuda_gridsize(n / 2), BLOCK, 0, get_cuda_stream() >> >(x, n / 2, size, a, y);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) x[i] = activate_kernel(x[i], a);
}
__global__ void activate_array_swish_kernel(float *x, int n, float *output_sigmoid_gpu, float *output_gpu)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
float x_val = x[i];
float sigmoid = logistic_activate_kernel(x_val);
if (output_sigmoid_gpu) output_sigmoid_gpu[i] = sigmoid;
output_gpu[i] = x_val * sigmoid;
}
}
__device__ float mish_njuffa(float x)
{
float r;
float e = expf(x);
r = 1.0f / fmaf(fmaf(-0.5f, e, -1.0f), e, -1.0f);
r = fmaf(r, x, x);
return r;
}
__device__ float mish_yashas(float x)
{
float e = __expf(x);
if (x <= -18.0f)
return x * e;
float n = e * e + 2 * e;
if (x <= -5.0f)
return x * __fdividef(n, n + 2);
return x - 2 * __fdividef(x, n + 2);
}
// https://github.com/digantamisra98/Mish
__global__ void activate_array_mish_kernel(float *x, int n, float *activation_input, float *output_gpu)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
const float MISH_THRESHOLD = 20;
float x_val = x[i];
if (activation_input) activation_input[i] = x_val; // store value before activation
//output_gpu[i] = x_val * tanh_activate_kernel(logf(1 + expf(x_val)));
// Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L17-L20
// TF: https://github.com/tensorflow/addons/blob/093cdfa85d334cbe19a37624c33198f3140109ed/tensorflow_addons/custom_ops/activations/cc/kernels/mish_op.h#L40-L49
// log1p(x) == log(x + 1)
//output_gpu[i] = x_val * tanh_activate_kernel( softplus_kernel(x_val, MISH_THRESHOLD) );
output_gpu[i] = mish_yashas(x_val);
//output_gpu[i] = mish_njuffa(x_val);
}
}
__global__ void activate_array_leaky_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = leaky_activate_kernel(x[index]);
}
}
__global__ void activate_array_selu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = selu_activate_kernel(x[index]);
}
}
__global__ void activate_array_gelu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = gelu_activate_kernel(x[index]);
}
}
__global__ void activate_array_logistic_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = logistic_activate_kernel(x[index]);
}
}
__global__ void activate_array_tanh_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = tanh_activate_kernel(x[index]);
}
}
__global__ void activate_array_hardtan_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = hardtan_activate_kernel(x[index]);
}
}
__global__ void activate_array_relu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = relu_activate_kernel(x[index]);
}
}
__global__ void activate_array_relu6_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = relu6_activate_kernel(x[index]);
}
}
__global__ void gradient_array_kernel(float *x, int n, ACTIVATION a, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) delta[i] *= gradient_kernel(x[i], a);
}
// https://github.com/BVLC/caffe/blob/04ab089db018a292ae48d51732dd6c66766b36b6/src/caffe/layers/swish_layer.cu#L28-L30
__global__ void gradient_array_swish_kernel(float *x, int n, float *sigmoid_gpu, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
float swish = x[i];
delta[i] *= swish + sigmoid_gpu[i] * (1 - swish); // gradient_kernel(x[i], a);
}
}
// https://github.com/digantamisra98/Mish
__global__ void gradient_array_mish_kernel(int n, float *activation_input_gpu, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
const float MISH_THRESHOLD = 20.0f;
// implementation from TensorFlow: https://github.com/tensorflow/addons/blob/093cdfa85d334cbe19a37624c33198f3140109ed/tensorflow_addons/custom_ops/activations/cc/kernels/mish_op.h#L66-L80
// implementation from Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L26-L31
// log1p(x) == log(x + 1)
const float inp = activation_input_gpu[i];
const float sp = softplus_kernel(inp, MISH_THRESHOLD);
const float grad_sp = -expm1f(-sp);
//const float grad_sp = 1 - expf(-sp);
const float tsp = tanh(sp);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = inp * grad_tsp + tsp;
delta[i] *= grad;
//float x = activation_input[i];
//float d = 2 * expf(x) + expf(2 * x) + 2;
//float w = 4 * (x + 1) + 4 * expf(2 * x) + expf(3 * x) + expf(x)*(4 * x + 6);
//float derivative = expf(x) * w / (d * d);
//delta[i] *= derivative;
}
}
__global__ void gradient_array_leaky_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= leaky_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_selu_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= selu_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_gelu_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= gelu_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_logistic_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= logistic_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_tanh_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= tanh_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_hardtan_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= hardtan_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_relu_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= relu_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_relu6_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= relu6_gradient_kernel(x[index]);
}
}
extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
if (a == LINEAR) return;
else if(a == LEAKY) activate_array_leaky_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == LOGISTIC) activate_array_logistic_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == TANH) activate_array_tanh_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == HARDTAN) activate_array_hardtan_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == RELU) activate_array_relu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == RELU6) activate_array_relu6_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == SELU) activate_array_selu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == GELU) activate_array_gelu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else
hipLaunchKernelGGL(( activate_array_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, get_cuda_stream(), x, n, a);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void activate_array_swish_ongpu(float *x, int n, float *output_sigmoid_gpu, float *output_gpu)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
activate_array_swish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(x, n, output_sigmoid_gpu, output_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void activate_array_mish_ongpu(float *x, int n, float *activation_input_gpu, float *output_gpu)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
activate_array_mish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(x, n, activation_input_gpu, output_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void gradient_array_ongpu(float *x, int n, ACTIVATION a, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
if (a == LINEAR) return;
else if (a == LEAKY) gradient_array_leaky_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == LOGISTIC) gradient_array_logistic_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == TANH) gradient_array_tanh_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == HARDTAN) gradient_array_hardtan_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == RELU) gradient_array_relu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == RELU6) gradient_array_relu6_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
//else if (a == NORM_CHAN) gradient_array_relu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == NORM_CHAN_SOFTMAX || a == NORM_CHAN) {
printf(" Error: should be used custom NORM_CHAN_SOFTMAX-function for gradient \n");
exit(0);
}
else if (a == SELU) gradient_array_selu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == GELU) gradient_array_gelu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else
gradient_array_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (x, n, a, delta);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void gradient_array_swish_ongpu(float *x, int n, float *sigmoid_gpu, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
gradient_array_swish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (x, n, sigmoid_gpu, delta);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void gradient_array_mish_ongpu(int n, float *activation_input_gpu, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
gradient_array_mish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (n, activation_input_gpu, delta);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void activate_array_normalize_channels_kernel(float *x, int size, int batch, int channels, int wh_step, float *output_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
const float eps = 0.0001;
if (i < size) {
float sum = eps;
int k;
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > 0) sum += val;
}
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > 0) val = val / sum;
else val = 0;
output_gpu[wh_i + k * wh_step + b*wh_step*channels] = val;
}
}
}
extern "C" void activate_array_normalize_channels_ongpu(float *x, int n, int batch, int channels, int wh_step, float *output_gpu)
{
// n = w*h*c*batch
// size = w*h*batch
int size = n / channels;
const int num_blocks = get_number_of_blocks(size, BLOCK);
activate_array_normalize_channels_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (x, size, batch, channels, wh_step, output_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void activate_array_normalize_channels_softmax_kernel(float *x, int size, int batch, int channels, int wh_step, float *output_gpu, int use_max_val)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
const float eps = 0.0001;
if (i < size) {
float sum = eps;
float max_val = -FLT_MAX;
int k;
if (use_max_val) {
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > max_val || k == 0) max_val = val;
}
}
else
max_val = 0;
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
sum += expf(val - max_val);
}
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
val = expf(val - max_val) / sum;
if (isnan(val) || isinf(val)) val = 0;
output_gpu[wh_i + k * wh_step + b*wh_step*channels] = val;
}
}
}
extern "C" void activate_array_normalize_channels_softmax_ongpu(float *x, int n, int batch, int channels, int wh_step, float *output_gpu, int use_max_val)
{
// n = w*h*c*batch
// size = w*h*batch
int size = n / channels;
const int num_blocks = get_number_of_blocks(size, BLOCK);
activate_array_normalize_channels_softmax_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (x, size, batch, channels, wh_step, output_gpu, use_max_val);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void gradient_array_normalize_channels_softmax_kernel(float *x, int size, int batch, int channels, int wh_step, float *delta_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
if (i < size) {
int k;
/*
float grad = 0;
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
float out = x[index];
float delta = delta_gpu[index];
grad += out*fabs(delta);
}
*/
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
float delta = delta_gpu[index];
float grad = x[index] * (1 - x[index]);
delta = delta * grad;
if (isnan(delta) || isinf(delta)) delta = 0;
delta_gpu[index] = delta;
}
}
}
extern "C" void gradient_array_normalize_channels_softmax_ongpu(float *output_gpu, int n, int batch, int channels, int wh_step, float *delta_gpu)
{
// n = w*h*c*batch
// size = w*h*batch
int size = n / channels;
const int num_blocks = get_number_of_blocks(size, BLOCK);
gradient_array_normalize_channels_softmax_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (output_gpu, size, batch, channels, wh_step, delta_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void gradient_array_normalize_channels_kernel(float *x, int size, int batch, int channels, int wh_step, float *delta_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
if (i < size) {
int k;
/*
float grad = 0;
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
float out = x[index];
float delta = delta_gpu[index];
grad += out*fabs(delta);
}
*/
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
if (x[index] > 0) {
float delta = delta_gpu[index];
float grad = x[index];
delta = delta * grad;
delta_gpu[index] = delta;
}
}
}
}
extern "C" void gradient_array_normalize_channels_ongpu(float *output_gpu, int n, int batch, int channels, int wh_step, float *delta_gpu)
{
// n = w*h*c*batch
// size = w*h*batch
int size = n / channels;
const int num_blocks = get_number_of_blocks(size, BLOCK);
gradient_array_normalize_channels_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (output_gpu, size, batch, channels, wh_step, delta_gpu);
CHECK_CUDA(hipPeekAtLastError());
} | 6a48f053a5b75e17dea98222ce0e8f7b7178e8a7.cu | #include "darknet.h"
#include <cuda_runtime.h>
#include <curand.h>
#include <cublas_v2.h>
#include <float.h>
#include "activations.h"
#include "dark_cuda.h"
__device__ float lhtan_activate_kernel(float x)
{
if(x < 0) return .001*x;
if(x > 1) return .001*(x-1) + 1;
return x;
}
__device__ float lhtan_gradient_kernel(float x)
{
if(x > 0 && x < 1) return 1;
return .001;
}
__device__ float hardtan_activate_kernel(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
__device__ float linear_activate_kernel(float x){return x;}
__device__ float logistic_activate_kernel(float x){return 1.f/(1.f + expf(-x));}
__device__ float loggy_activate_kernel(float x){return 2.f/(1.f + expf(-x)) - 1;}
__device__ float relu_activate_kernel(float x){return x*(x>0);}
__device__ float relu6_activate_kernel(float x) { return min_val_cmp(max_val_cmp(x, 0), 6); }
__device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(expf(x)-1);}
__device__ float selu_activate_kernel(float x) { return (x >= 0)*1.0507f*x + (x < 0)*1.0507f*1.6732f*(expf(x) - 1); }
__device__ float relie_activate_kernel(float x){return (x>0) ? x : .01f*x;}
__device__ float ramp_activate_kernel(float x){return x*(x>0)+.1f*x;}
__device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1f*x;}
__device__ float tanh_activate_kernel(float x){return (2/(1 + expf(-2*x)) - 1);}
__device__ float gelu_activate_kernel(float x){return (0.5*x*(1 + tanhf(0.797885*x + 0.035677*powf(x, 3))));}
__device__ float softplus_kernel(float x, float threshold = 20) {
if (x > threshold) return x; // too large
else if (x < -threshold) return expf(x); // too small
return log1pf(expf(x));
//return logf(expf(x) + 1);
}
__device__ float plse_activate_kernel(float x)
{
if(x < -4) return .01f * (x + 4);
if(x > 4) return .01f * (x - 4) + 1;
return .125f*x + .5f;
}
__device__ float stair_activate_kernel(float x)
{
int n = floorf(x);
if (n%2 == 0) return floorf(x/2.f);
else return (x - n) + floorf(x/2.f);
}
__device__ float hardtan_gradient_kernel(float x)
{
if (x > -1 && x < 1) return 1;
return 0;
}
__device__ float linear_gradient_kernel(float x){return 1;}
__device__ float logistic_gradient_kernel(float x){return (1-x)*x;}
__device__ float loggy_gradient_kernel(float x)
{
float y = (x+1.F)/2.F;
return 2*(1-y)*y;
}
__device__ float relu_gradient_kernel(float x){return (x>0);}
__device__ float relu6_gradient_kernel(float x) { return (x > 0 && x < 6); }
__device__ float elu_gradient_kernel(float x){return (x >= 0) + (x < 0)*(x + 1);}
__device__ float selu_gradient_kernel(float x) { return (x >= 0)*1.0507f + (x < 0)*(x + 1.0507f*1.6732f); }
__device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01f;}
__device__ float ramp_gradient_kernel(float x){return (x>0)+.1f;}
__device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1f;}
__device__ float tanh_gradient_kernel(float x){return 1-x*x;}
__device__ float sech_gpu(float x) { return 2 / (expf(x) + expf(-x)); }
__device__ float gelu_gradient_kernel(float x) {
const float x3 = powf(x, 3);
return 0.5*tanhf(0.0356774*x3 + 0.797885*x) + (0.0535161*x3 + 0.398942*x) * powf(sech_gpu(0.0356774*x3 + 0.797885*x), 2) + 0.5;
}
__device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01f : .125f;}
__device__ float stair_gradient_kernel(float x)
{
if (floor(x) == x) return 0;
return 1;
}
__device__ float activate_kernel(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case RELU6:
return relu6_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case SELU:
return selu_activate_kernel(x);
case GELU:
return gelu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}
__device__ float gradient_kernel(float x, ACTIVATION a)
{
switch (a) {
case LINEAR:
return linear_gradient_kernel(x);
case LOGISTIC:
return logistic_gradient_kernel(x);
case LOGGY:
return loggy_gradient_kernel(x);
case RELU:
return relu_gradient_kernel(x);
case RELU6:
return relu6_gradient_kernel(x);
case NORM_CHAN:
return relu_gradient_kernel(x);
case ELU:
return elu_gradient_kernel(x);
case SELU:
return selu_gradient_kernel(x);
case GELU:
return gelu_gradient_kernel(x);
case RELIE:
return relie_gradient_kernel(x);
case RAMP:
return ramp_gradient_kernel(x);
case LEAKY:
return leaky_gradient_kernel(x);
case TANH:
return tanh_gradient_kernel(x);
case PLSE:
return plse_gradient_kernel(x);
case STAIR:
return stair_gradient_kernel(x);
case HARDTAN:
return hardtan_gradient_kernel(x);
case LHTAN:
return lhtan_gradient_kernel(x);
}
return 0;
}
__global__ void binary_gradient_array_kernel(float *x, float *dy, int n, int s, BINARY_ACTIVATION a, float *dx)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int i = id % s;
int b = id / s;
float x1 = x[b*s + i];
float x2 = x[b*s + s / 2 + i];
if (id < n) {
float de = dy[id];
dx[b*s + i] = x2*de;
dx[b*s + s / 2 + i] = x1*de;
}
}
extern "C" void binary_gradient_array_gpu(float *x, float *dx, int n, int size, BINARY_ACTIVATION a, float *y)
{
binary_gradient_array_kernel << <cuda_gridsize(n / 2), BLOCK, 0, get_cuda_stream() >> >(x, dx, n / 2, size, a, y);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void binary_activate_array_kernel(float *x, int n, int s, BINARY_ACTIVATION a, float *y)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int i = id % s;
int b = id / s;
float x1 = x[b*s + i];
float x2 = x[b*s + s / 2 + i];
if (id < n) y[id] = x1*x2;
}
extern "C" void binary_activate_array_gpu(float *x, int n, int size, BINARY_ACTIVATION a, float *y)
{
binary_activate_array_kernel << <cuda_gridsize(n / 2), BLOCK, 0, get_cuda_stream() >> >(x, n / 2, size, a, y);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) x[i] = activate_kernel(x[i], a);
}
__global__ void activate_array_swish_kernel(float *x, int n, float *output_sigmoid_gpu, float *output_gpu)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
float x_val = x[i];
float sigmoid = logistic_activate_kernel(x_val);
if (output_sigmoid_gpu) output_sigmoid_gpu[i] = sigmoid;
output_gpu[i] = x_val * sigmoid;
}
}
__device__ float mish_njuffa(float x)
{
float r;
float e = expf(x);
r = 1.0f / fmaf(fmaf(-0.5f, e, -1.0f), e, -1.0f);
r = fmaf(r, x, x);
return r;
}
__device__ float mish_yashas(float x)
{
float e = __expf(x);
if (x <= -18.0f)
return x * e;
float n = e * e + 2 * e;
if (x <= -5.0f)
return x * __fdividef(n, n + 2);
return x - 2 * __fdividef(x, n + 2);
}
// https://github.com/digantamisra98/Mish
__global__ void activate_array_mish_kernel(float *x, int n, float *activation_input, float *output_gpu)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
const float MISH_THRESHOLD = 20;
float x_val = x[i];
if (activation_input) activation_input[i] = x_val; // store value before activation
//output_gpu[i] = x_val * tanh_activate_kernel(logf(1 + expf(x_val)));
// Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L17-L20
// TF: https://github.com/tensorflow/addons/blob/093cdfa85d334cbe19a37624c33198f3140109ed/tensorflow_addons/custom_ops/activations/cc/kernels/mish_op.h#L40-L49
// log1p(x) == log(x + 1)
//output_gpu[i] = x_val * tanh_activate_kernel( softplus_kernel(x_val, MISH_THRESHOLD) );
output_gpu[i] = mish_yashas(x_val);
//output_gpu[i] = mish_njuffa(x_val);
}
}
__global__ void activate_array_leaky_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = leaky_activate_kernel(x[index]);
}
}
__global__ void activate_array_selu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = selu_activate_kernel(x[index]);
}
}
__global__ void activate_array_gelu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = gelu_activate_kernel(x[index]);
}
}
__global__ void activate_array_logistic_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = logistic_activate_kernel(x[index]);
}
}
__global__ void activate_array_tanh_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = tanh_activate_kernel(x[index]);
}
}
__global__ void activate_array_hardtan_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = hardtan_activate_kernel(x[index]);
}
}
__global__ void activate_array_relu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = relu_activate_kernel(x[index]);
}
}
__global__ void activate_array_relu6_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = relu6_activate_kernel(x[index]);
}
}
__global__ void gradient_array_kernel(float *x, int n, ACTIVATION a, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) delta[i] *= gradient_kernel(x[i], a);
}
// https://github.com/BVLC/caffe/blob/04ab089db018a292ae48d51732dd6c66766b36b6/src/caffe/layers/swish_layer.cu#L28-L30
__global__ void gradient_array_swish_kernel(float *x, int n, float *sigmoid_gpu, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
float swish = x[i];
delta[i] *= swish + sigmoid_gpu[i] * (1 - swish); // gradient_kernel(x[i], a);
}
}
// https://github.com/digantamisra98/Mish
__global__ void gradient_array_mish_kernel(int n, float *activation_input_gpu, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
const float MISH_THRESHOLD = 20.0f;
// implementation from TensorFlow: https://github.com/tensorflow/addons/blob/093cdfa85d334cbe19a37624c33198f3140109ed/tensorflow_addons/custom_ops/activations/cc/kernels/mish_op.h#L66-L80
// implementation from Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L26-L31
// log1p(x) == log(x + 1)
const float inp = activation_input_gpu[i];
const float sp = softplus_kernel(inp, MISH_THRESHOLD);
const float grad_sp = -expm1f(-sp);
//const float grad_sp = 1 - expf(-sp);
const float tsp = tanh(sp);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = inp * grad_tsp + tsp;
delta[i] *= grad;
//float x = activation_input[i];
//float d = 2 * expf(x) + expf(2 * x) + 2;
//float w = 4 * (x + 1) + 4 * expf(2 * x) + expf(3 * x) + expf(x)*(4 * x + 6);
//float derivative = expf(x) * w / (d * d);
//delta[i] *= derivative;
}
}
__global__ void gradient_array_leaky_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= leaky_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_selu_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= selu_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_gelu_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= gelu_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_logistic_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= logistic_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_tanh_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= tanh_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_hardtan_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= hardtan_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_relu_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= relu_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_relu6_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= relu6_gradient_kernel(x[index]);
}
}
extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
if (a == LINEAR) return;
else if(a == LEAKY) activate_array_leaky_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == LOGISTIC) activate_array_logistic_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == TANH) activate_array_tanh_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == HARDTAN) activate_array_hardtan_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == RELU) activate_array_relu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == RELU6) activate_array_relu6_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == SELU) activate_array_selu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == GELU) activate_array_gelu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else
activate_array_kernel<<<cuda_gridsize(n), BLOCK, 0, get_cuda_stream()>>>(x, n, a);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void activate_array_swish_ongpu(float *x, int n, float *output_sigmoid_gpu, float *output_gpu)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
activate_array_swish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(x, n, output_sigmoid_gpu, output_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void activate_array_mish_ongpu(float *x, int n, float *activation_input_gpu, float *output_gpu)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
activate_array_mish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(x, n, activation_input_gpu, output_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void gradient_array_ongpu(float *x, int n, ACTIVATION a, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
if (a == LINEAR) return;
else if (a == LEAKY) gradient_array_leaky_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == LOGISTIC) gradient_array_logistic_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == TANH) gradient_array_tanh_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == HARDTAN) gradient_array_hardtan_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == RELU) gradient_array_relu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == RELU6) gradient_array_relu6_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
//else if (a == NORM_CHAN) gradient_array_relu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == NORM_CHAN_SOFTMAX || a == NORM_CHAN) {
printf(" Error: should be used custom NORM_CHAN_SOFTMAX-function for gradient \n");
exit(0);
}
else if (a == SELU) gradient_array_selu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == GELU) gradient_array_gelu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else
gradient_array_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (x, n, a, delta);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void gradient_array_swish_ongpu(float *x, int n, float *sigmoid_gpu, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
gradient_array_swish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (x, n, sigmoid_gpu, delta);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void gradient_array_mish_ongpu(int n, float *activation_input_gpu, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
gradient_array_mish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (n, activation_input_gpu, delta);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void activate_array_normalize_channels_kernel(float *x, int size, int batch, int channels, int wh_step, float *output_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
const float eps = 0.0001;
if (i < size) {
float sum = eps;
int k;
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > 0) sum += val;
}
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > 0) val = val / sum;
else val = 0;
output_gpu[wh_i + k * wh_step + b*wh_step*channels] = val;
}
}
}
extern "C" void activate_array_normalize_channels_ongpu(float *x, int n, int batch, int channels, int wh_step, float *output_gpu)
{
// n = w*h*c*batch
// size = w*h*batch
int size = n / channels;
const int num_blocks = get_number_of_blocks(size, BLOCK);
activate_array_normalize_channels_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (x, size, batch, channels, wh_step, output_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void activate_array_normalize_channels_softmax_kernel(float *x, int size, int batch, int channels, int wh_step, float *output_gpu, int use_max_val)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
const float eps = 0.0001;
if (i < size) {
float sum = eps;
float max_val = -FLT_MAX;
int k;
if (use_max_val) {
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > max_val || k == 0) max_val = val;
}
}
else
max_val = 0;
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
sum += expf(val - max_val);
}
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
val = expf(val - max_val) / sum;
if (isnan(val) || isinf(val)) val = 0;
output_gpu[wh_i + k * wh_step + b*wh_step*channels] = val;
}
}
}
extern "C" void activate_array_normalize_channels_softmax_ongpu(float *x, int n, int batch, int channels, int wh_step, float *output_gpu, int use_max_val)
{
// n = w*h*c*batch
// size = w*h*batch
int size = n / channels;
const int num_blocks = get_number_of_blocks(size, BLOCK);
activate_array_normalize_channels_softmax_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (x, size, batch, channels, wh_step, output_gpu, use_max_val);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void gradient_array_normalize_channels_softmax_kernel(float *x, int size, int batch, int channels, int wh_step, float *delta_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
if (i < size) {
int k;
/*
float grad = 0;
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
float out = x[index];
float delta = delta_gpu[index];
grad += out*fabs(delta);
}
*/
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
float delta = delta_gpu[index];
float grad = x[index] * (1 - x[index]);
delta = delta * grad;
if (isnan(delta) || isinf(delta)) delta = 0;
delta_gpu[index] = delta;
}
}
}
extern "C" void gradient_array_normalize_channels_softmax_ongpu(float *output_gpu, int n, int batch, int channels, int wh_step, float *delta_gpu)
{
// n = w*h*c*batch
// size = w*h*batch
int size = n / channels;
const int num_blocks = get_number_of_blocks(size, BLOCK);
gradient_array_normalize_channels_softmax_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (output_gpu, size, batch, channels, wh_step, delta_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void gradient_array_normalize_channels_kernel(float *x, int size, int batch, int channels, int wh_step, float *delta_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
if (i < size) {
int k;
/*
float grad = 0;
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
float out = x[index];
float delta = delta_gpu[index];
grad += out*fabs(delta);
}
*/
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
if (x[index] > 0) {
float delta = delta_gpu[index];
float grad = x[index];
delta = delta * grad;
delta_gpu[index] = delta;
}
}
}
}
extern "C" void gradient_array_normalize_channels_ongpu(float *output_gpu, int n, int batch, int channels, int wh_step, float *delta_gpu)
{
// n = w*h*c*batch
// size = w*h*batch
int size = n / channels;
const int num_blocks = get_number_of_blocks(size, BLOCK);
gradient_array_normalize_channels_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (output_gpu, size, batch, channels, wh_step, delta_gpu);
CHECK_CUDA(cudaPeekAtLastError());
} |
fc789696debe488ac46f19ec45a8b0a4fa55cb9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
#include <stdio.h>
#include "lb.h"
}
#include "cuda_utils.hpp"
//static const float c_sound_sq = 1.0f/3.0f;
static __constant__ float c_sound_sq = 1.0f/3.0f;
static size_t size_of_rho_v;
static size_t size_of_rho_v_pi;
static float *devNodesA = NULL;
static float *devNodesB = NULL;
static float *devBoundaryForces = NULL;
static LB_rho_v *devRhoV = NULL;
static LB_rho_v_pi *devRhoVpi = NULL;
static LB_rho_v_pi *print_rho_v_pi = NULL;
static unsigned int intflag = 1;
unsigned int *devBoundaryMap = NULL;
float *devExtForces = NULL;
float *devBoundaryVelocities = NULL;
float *devCurrentNodes = NULL;
float *hostExtForces = NULL;
//static __device__ __constant__ LBparameters d_LBparams;
__constant__ LBparameters d_LBparams;
LBparameters h_LBparams = {
// agrid tau rho
1.0f, 1.0f, 1.0f,
// gammaShear gammaBulk gammaOdd gammaEven
1.0f, 1.0f, 0.0f, 0.0f,
// dimX dimY dimZ numNodes
0u, 0u, 0u, 0u,
// numBoundaries boundaryVelocity
0u, {0.0f, 0.0f, 0.0f},
// extForceFlag extForceDensity
0u, {0.0f, 0.0f, 0.0f}
};
__device__ void index_to_xyz (unsigned int index, unsigned int *xyz) {
xyz[0] = index % d_LBparams.dimX;
index /= d_LBparams.dimX;
xyz[1] = index % d_LBparams.dimY;
index /= d_LBparams.dimY;
xyz[2] = index;
}
__device__ void calc_m_from_n (unsigned int index, float *n_a, float *mode) {
// The following convention is used:
// The $\hat{c}_i$ form B. Duenweg's paper are given by:
/* c_0 = { 0, 0, 0}
c_1 = { 1, 0, 0}
c_2 = {-1, 0, 0}
c_3 = { 0, 1, 0}
c_4 = { 0,-1, 0}
c_5 = { 0, 0, 1}
c_6 = { 0, 0,-1}
c_7 = { 1, 1, 0}
c_8 = {-1,-1, 0}
c_9 = { 1,-1, 0}
c_10 = {-1, 1, 0}
c_11 = { 1, 0, 1}
c_12 = {-1, 0,-1}
c_13 = { 1, 0,-1}
c_14 = {-1, 0, 1}
c_15 = { 0, 1, 1}
c_16 = { 0,-1,-1}
c_17 = { 0, 1,-1}
c_18 = { 0,-1, 1} */
// The basis vectors (modes) are constructed as follows
// $m_k = \sum_{i} e_{ki} n_{i}$, where the $e_{ki}$ form a
// linear transformation (matrix) that is given by
/* $e{ 0,i} = 1$
$e{ 1,i} = c_{i,x}$
$e{ 2,i} = c_{i,y}$
$e{ 3,i} = c_{i,z}$
$e{ 4,i} = c_{i}^2 - 1$
$e{ 5,i} = c_{i,x}^2 - c_{i,y}^2$
$e{ 6,i} = c_{i}^2 - 3*c_{i,z}^2$
$e{ 7,i} = c_{i,x}*c_{i,y}$
$e{ 8,i} = c_{i,x}*c_{i,z}$
$e{ 9,i} = c_{i,y}*c_{i,z}$
$e{10,i} = (3*c_{i}^2 - 5)*c_{i,x}$
$e{11,i} = (3*c_{i}^2 - 5)*c_{i,y}$
$e{12,i} = (3*c_{i}^2 - 5)*c_{i,z}$
$e{13,i} = (c_{i,y}^2 - c_{i,z}^2)*c_{i,x}$
$e{14,i} = (c_{i,x}^2 - c_{i,z}^2)*c_{i,y}$
$e{15,i} = (c_{i,x}^2 - c_{i,y}^2)*c_{i,z}$
$e{16,i} = 3*c_{i}^2^2 - 6*c_{i}^2 + 1$
$e{17,i} = (2*c_{i}^2 - 3)*(c_{i,x}^2 - c_{i,y}^2)$
$e{18,i} = (2*c_{i}^2 - 3)*(c_{i}^2 - 3*c_{i,z}^2)$ */
// Such that the transformation matrix is given by
/* {{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{ 0, 1,-1, 0, 0, 0, 0, 1,-1, 1,-1, 1,-1, 1,-1, 0, 0, 0, 0},
{ 0, 0, 0, 1,-1, 0, 0, 1,-1,-1, 1, 0, 0, 0, 0, 1,-1, 1,-1},
{ 0, 0, 0, 0, 0, 1,-1, 0, 0, 0, 0, 1,-1,-1, 1, 1,-1,-1, 1},
{-1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{ 0, 1, 1,-1,-1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,-1,-1,-1,-1},
{ 0, 1, 1, 1, 1,-2,-2, 2, 2, 2, 2,-1,-1,-1,-1,-1,-1,-1,-1},
{ 0, 0, 0, 0, 0, 0, 0, 1, 1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0},
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,-1,-1, 0, 0, 0, 0},
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,-1,-1},
{ 0,-2, 2, 0, 0, 0, 0, 1,-1, 1,-1, 1,-1, 1,-1, 0, 0, 0, 0},
{ 0, 0, 0,-2, 2, 0, 0, 1,-1,-1, 1, 0, 0, 0, 0, 1,-1, 1,-1},
{ 0, 0, 0, 0, 0,-2, 2, 0, 0, 0, 0, 1,-1,-1, 1, 1,-1,-1, 1},
{ 0, 0, 0, 0, 0, 0, 0, 1,-1, 1,-1,-1, 1,-1, 1, 0, 0, 0, 0},
{ 0, 0, 0, 0, 0, 0, 0, 1,-1,-1, 1, 0, 0, 0, 0,-1, 1,-1, 1},
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-1,-1, 1,-1, 1, 1,-1},
{ 1,-2,-2,-2,-2,-2,-2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{ 0,-1,-1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,-1,-1,-1,-1},
{ 0,-1,-1,-1,-1, 2, 2, 2, 2, 2, 2,-1,-1,-1,-1,-1,-1,-1,-1}} */
// With weights
/* q^{c_{i}} = { 1/3, 1/18, 1/18, 1/18,
1/18, 1/18, 1/18, 1/36,
1/36, 1/36, 1/36, 1/36,
1/36, 1/36, 1/36, 1/36,
1/36, 1/36, 1/36 } */
// Which makes the transformation satisfy the following
// orthogonality condition:
// \sum_{i} q^{c_{i}} e_{ki} e_{li} = w_{k} \delta_{kl},
// where the weights are:
/* w_{i} = { 1, 1/3, 1/3, 1/3,
2/3, 4/9, 4/3, 1/9,
1/9, 1/9, 2/3, 2/3,
2/3, 2/9, 2/9, 2/9,
2, 4/9, 4/3 } */
// mass mode
mode[0] = n_a[ 0 * d_LBparams.numNodes + index]
+ n_a[ 1 * d_LBparams.numNodes + index] + n_a[ 2 * d_LBparams.numNodes + index]
+ n_a[ 3 * d_LBparams.numNodes + index] + n_a[ 4 * d_LBparams.numNodes + index]
+ n_a[ 5 * d_LBparams.numNodes + index] + n_a[ 6 * d_LBparams.numNodes + index]
+ n_a[ 7 * d_LBparams.numNodes + index] + n_a[ 8 * d_LBparams.numNodes + index]
+ n_a[ 9 * d_LBparams.numNodes + index] + n_a[10 * d_LBparams.numNodes + index]
+ n_a[11 * d_LBparams.numNodes + index] + n_a[12 * d_LBparams.numNodes + index]
+ n_a[13 * d_LBparams.numNodes + index] + n_a[14 * d_LBparams.numNodes + index]
+ n_a[15 * d_LBparams.numNodes + index] + n_a[16 * d_LBparams.numNodes + index]
+ n_a[17 * d_LBparams.numNodes + index] + n_a[18 * d_LBparams.numNodes + index];
// momentum modes
mode[1] = (n_a[ 1 * d_LBparams.numNodes + index] - n_a[ 2 * d_LBparams.numNodes + index])
+ (n_a[ 7 * d_LBparams.numNodes + index] - n_a[ 8 * d_LBparams.numNodes + index])
+ (n_a[ 9 * d_LBparams.numNodes + index] - n_a[10 * d_LBparams.numNodes + index])
+ (n_a[11 * d_LBparams.numNodes + index] - n_a[12 * d_LBparams.numNodes + index])
+ (n_a[13 * d_LBparams.numNodes + index] - n_a[14 * d_LBparams.numNodes + index]);
mode[2] = (n_a[ 3 * d_LBparams.numNodes + index] - n_a[ 4 * d_LBparams.numNodes + index])
+ (n_a[ 7 * d_LBparams.numNodes + index] - n_a[ 8 * d_LBparams.numNodes + index])
- (n_a[ 9 * d_LBparams.numNodes + index] - n_a[10 * d_LBparams.numNodes + index])
+ (n_a[15 * d_LBparams.numNodes + index] - n_a[16 * d_LBparams.numNodes + index])
+ (n_a[17 * d_LBparams.numNodes + index] - n_a[18 * d_LBparams.numNodes + index]);
mode[3] = (n_a[ 5 * d_LBparams.numNodes + index] - n_a[ 6 * d_LBparams.numNodes + index])
+ (n_a[11 * d_LBparams.numNodes + index] - n_a[12 * d_LBparams.numNodes + index])
- (n_a[13 * d_LBparams.numNodes + index] - n_a[14 * d_LBparams.numNodes + index])
+ (n_a[15 * d_LBparams.numNodes + index] - n_a[16 * d_LBparams.numNodes + index])
- (n_a[17 * d_LBparams.numNodes + index] - n_a[18 * d_LBparams.numNodes + index]);
// stress modes
mode[4] = - n_a[ 0 * d_LBparams.numNodes + index]
+ n_a[ 7 * d_LBparams.numNodes + index] + n_a[ 8 * d_LBparams.numNodes + index]
+ n_a[ 9 * d_LBparams.numNodes + index] + n_a[10 * d_LBparams.numNodes + index]
+ n_a[11 * d_LBparams.numNodes + index] + n_a[12 * d_LBparams.numNodes + index]
+ n_a[13 * d_LBparams.numNodes + index] + n_a[14 * d_LBparams.numNodes + index]
+ n_a[15 * d_LBparams.numNodes + index] + n_a[16 * d_LBparams.numNodes + index]
+ n_a[17 * d_LBparams.numNodes + index] + n_a[18 * d_LBparams.numNodes + index];
mode[5] = (n_a[ 1 * d_LBparams.numNodes + index] + n_a[ 2 * d_LBparams.numNodes + index])
- (n_a[ 3 * d_LBparams.numNodes + index] + n_a[ 4 * d_LBparams.numNodes + index])
+ (n_a[11 * d_LBparams.numNodes + index] + n_a[12 * d_LBparams.numNodes + index])
+ (n_a[13 * d_LBparams.numNodes + index] + n_a[14 * d_LBparams.numNodes + index])
- (n_a[15 * d_LBparams.numNodes + index] + n_a[16 * d_LBparams.numNodes + index])
- (n_a[17 * d_LBparams.numNodes + index] + n_a[18 * d_LBparams.numNodes + index]);
mode[6] = (n_a[ 1 * d_LBparams.numNodes + index] + n_a[ 2 * d_LBparams.numNodes + index])
+ (n_a[ 3 * d_LBparams.numNodes + index] + n_a[ 4 * d_LBparams.numNodes + index])
- (n_a[11 * d_LBparams.numNodes + index] + n_a[12 * d_LBparams.numNodes + index])
- (n_a[13 * d_LBparams.numNodes + index] + n_a[14 * d_LBparams.numNodes + index])
- (n_a[15 * d_LBparams.numNodes + index] + n_a[16 * d_LBparams.numNodes + index])
- (n_a[17 * d_LBparams.numNodes + index] + n_a[18 * d_LBparams.numNodes + index])
- 2.0f*( (n_a[5 * d_LBparams.numNodes + index] + n_a[ 6 * d_LBparams.numNodes + index])
- (n_a[7 * d_LBparams.numNodes + index] + n_a[ 8 * d_LBparams.numNodes + index])
- (n_a[9 * d_LBparams.numNodes + index] + n_a[10 * d_LBparams.numNodes + index]));
mode[7] = (n_a[7 * d_LBparams.numNodes + index] + n_a[ 8 * d_LBparams.numNodes + index])
- (n_a[9 * d_LBparams.numNodes + index] + n_a[10 * d_LBparams.numNodes + index]);
mode[8] = (n_a[11 * d_LBparams.numNodes + index] + n_a[12 * d_LBparams.numNodes + index])
- (n_a[13 * d_LBparams.numNodes + index] + n_a[14 * d_LBparams.numNodes + index]);
mode[9] = (n_a[15 * d_LBparams.numNodes + index] + n_a[16 * d_LBparams.numNodes + index])
- (n_a[17 * d_LBparams.numNodes + index] + n_a[18 * d_LBparams.numNodes + index]);
// kinetic modes
mode[10] = - 2.0f*(n_a[ 1 * d_LBparams.numNodes + index] - n_a[ 2 * d_LBparams.numNodes + index])
+ (n_a[ 7 * d_LBparams.numNodes + index] - n_a[ 8 * d_LBparams.numNodes + index])
+ (n_a[ 9 * d_LBparams.numNodes + index] - n_a[10 * d_LBparams.numNodes + index])
+ (n_a[11 * d_LBparams.numNodes + index] - n_a[12 * d_LBparams.numNodes + index])
+ (n_a[13 * d_LBparams.numNodes + index] - n_a[14 * d_LBparams.numNodes + index]);
mode[11] = - 2.0f*(n_a[ 3 * d_LBparams.numNodes + index] - n_a[ 4 * d_LBparams.numNodes + index])
+ (n_a[ 7 * d_LBparams.numNodes + index] - n_a[ 8 * d_LBparams.numNodes + index])
- (n_a[ 9 * d_LBparams.numNodes + index] - n_a[10 * d_LBparams.numNodes + index])
+ (n_a[15 * d_LBparams.numNodes + index] - n_a[16 * d_LBparams.numNodes + index])
+ (n_a[17 * d_LBparams.numNodes + index] - n_a[18 * d_LBparams.numNodes + index]);
mode[12] = - 2.0f*(n_a[ 5 * d_LBparams.numNodes + index] - n_a[ 6 * d_LBparams.numNodes + index])
+ (n_a[11 * d_LBparams.numNodes + index] - n_a[12 * d_LBparams.numNodes + index])
- (n_a[13 * d_LBparams.numNodes + index] - n_a[14 * d_LBparams.numNodes + index])
+ (n_a[15 * d_LBparams.numNodes + index] - n_a[16 * d_LBparams.numNodes + index])
- (n_a[17 * d_LBparams.numNodes + index] - n_a[18 * d_LBparams.numNodes + index]);
mode[13] = (n_a[ 7 * d_LBparams.numNodes + index] - n_a[ 8 * d_LBparams.numNodes + index])
+ (n_a[ 9 * d_LBparams.numNodes + index] - n_a[10 * d_LBparams.numNodes + index])
- (n_a[11 * d_LBparams.numNodes + index] - n_a[12 * d_LBparams.numNodes + index])
- (n_a[13 * d_LBparams.numNodes + index] - n_a[14 * d_LBparams.numNodes + index]);
mode[14] = (n_a[ 7 * d_LBparams.numNodes + index] - n_a[ 8 * d_LBparams.numNodes + index])
- (n_a[ 9 * d_LBparams.numNodes + index] - n_a[10 * d_LBparams.numNodes + index])
- (n_a[15 * d_LBparams.numNodes + index] - n_a[16 * d_LBparams.numNodes + index])
- (n_a[17 * d_LBparams.numNodes + index] - n_a[18 * d_LBparams.numNodes + index]);
mode[15] = (n_a[11 * d_LBparams.numNodes + index] - n_a[12 * d_LBparams.numNodes + index])
- (n_a[13 * d_LBparams.numNodes + index] - n_a[14 * d_LBparams.numNodes + index])
- (n_a[15 * d_LBparams.numNodes + index] - n_a[16 * d_LBparams.numNodes + index])
+ (n_a[17 * d_LBparams.numNodes + index] - n_a[18 * d_LBparams.numNodes + index]);
mode[16] = n_a[ 0 * d_LBparams.numNodes + index]
+ n_a[ 7 * d_LBparams.numNodes + index] + n_a[ 8 * d_LBparams.numNodes + index]
+ n_a[ 9 * d_LBparams.numNodes + index] + n_a[10 * d_LBparams.numNodes + index]
+ n_a[11 * d_LBparams.numNodes + index] + n_a[12 * d_LBparams.numNodes + index]
+ n_a[13 * d_LBparams.numNodes + index] + n_a[14 * d_LBparams.numNodes + index]
+ n_a[15 * d_LBparams.numNodes + index] + n_a[16 * d_LBparams.numNodes + index]
+ n_a[17 * d_LBparams.numNodes + index] + n_a[18 * d_LBparams.numNodes + index]
- 2.0f*( (n_a[1 * d_LBparams.numNodes + index] + n_a[2 * d_LBparams.numNodes + index])
+ (n_a[3 * d_LBparams.numNodes + index] + n_a[4 * d_LBparams.numNodes + index])
+ (n_a[5 * d_LBparams.numNodes + index] + n_a[6 * d_LBparams.numNodes + index]));
mode[17] = - (n_a[ 1 * d_LBparams.numNodes + index] + n_a[ 2 * d_LBparams.numNodes + index])
+ (n_a[ 3 * d_LBparams.numNodes + index] + n_a[ 4 * d_LBparams.numNodes + index])
+ (n_a[11 * d_LBparams.numNodes + index] + n_a[12 * d_LBparams.numNodes + index])
+ (n_a[13 * d_LBparams.numNodes + index] + n_a[14 * d_LBparams.numNodes + index])
- (n_a[15 * d_LBparams.numNodes + index] + n_a[16 * d_LBparams.numNodes + index])
- (n_a[17 * d_LBparams.numNodes + index] + n_a[18 * d_LBparams.numNodes + index]);
mode[18] = - (n_a[ 1 * d_LBparams.numNodes + index] + n_a[ 2 * d_LBparams.numNodes + index])
- (n_a[ 3 * d_LBparams.numNodes + index] + n_a[ 4 * d_LBparams.numNodes + index])
- (n_a[11 * d_LBparams.numNodes + index] + n_a[12 * d_LBparams.numNodes + index])
- (n_a[13 * d_LBparams.numNodes + index] + n_a[14 * d_LBparams.numNodes + index])
- (n_a[15 * d_LBparams.numNodes + index] + n_a[16 * d_LBparams.numNodes + index])
- (n_a[17 * d_LBparams.numNodes + index] + n_a[18 * d_LBparams.numNodes + index])
+ 2.0f*( (n_a[5 * d_LBparams.numNodes + index] + n_a[ 6 * d_LBparams.numNodes + index])
+ (n_a[7 * d_LBparams.numNodes + index] + n_a[ 8 * d_LBparams.numNodes + index])
+ (n_a[9 * d_LBparams.numNodes + index] + n_a[10 * d_LBparams.numNodes + index]));
}
__device__ void update_rho_v (unsigned int index, float *mode, float *ext_forces, LB_rho_v *d_v) {
float Rho_tot = 0.0f;
float u_tot[3] = {0.0f,0.0f,0.0f};
// Note:
// Remember that the populations are stored as differences to their equilibrium values.
// Quantities are calculated in LB units rather than MD units (cf. ESPResSo)
//d_v[index].rho[ii] = mode[0 +ii*LBQ] + para.rho[ii]*para.agrid*para.agrid*para.agrid;
//Rho_tot += mode[0+ii*LBQ] + para.rho[ii]*para.agrid*para.agrid*para.agrid;
d_v[index].rho = mode[0] + d_LBparams.rho;
Rho_tot += mode[0] + d_LBparams.rho;
u_tot[0] += mode[1];
u_tot[1] += mode[2];
u_tot[2] += mode[3];
/** if forces are present, the momentum density is redefined to
* inlcude one half-step of the force action. See the
* Chapman-Enskog expansion in [Ladd & Verberg]. */
u_tot[0] += 0.5f*ext_forces[0*d_LBparams.numNodes + index];
u_tot[1] += 0.5f*ext_forces[1*d_LBparams.numNodes + index];
u_tot[2] += 0.5f*ext_forces[2*d_LBparams.numNodes + index];
u_tot[0] /= Rho_tot;
u_tot[1] /= Rho_tot;
u_tot[2] /= Rho_tot;
d_v[index].v[0] = u_tot[0];
d_v[index].v[1] = u_tot[1];
d_v[index].v[2] = u_tot[2];
}
__device__ void relax_modes (unsigned int index, float *ext_forces, LB_rho_v *d_v, float *mode) {
float Rho;
float j[3];
float modes_from_pi_eq[6];
float u_tot[3] = {0.0f,0.0f,0.0f};
update_rho_v (index, mode, ext_forces, d_v);
Rho = mode[0] + d_LBparams.rho;
float inv_Rho = 1.0 / Rho;
u_tot[0] = d_v[index].v[0];
u_tot[1] = d_v[index].v[1];
u_tot[2] = d_v[index].v[2];
j[0] = Rho * u_tot[0];
j[1] = Rho * u_tot[1];
j[2] = Rho * u_tot[2];
/** equilibrium part of the stress modes (eq13 schiller)*/
modes_from_pi_eq[0] = ((j[0]*j[0])+(j[1]*j[1])+(j[2]*j[2])) * inv_Rho;
modes_from_pi_eq[1] = ((j[0]*j[0])-(j[1]*j[1])) * inv_Rho;
modes_from_pi_eq[2] = (((j[0]*j[0])+(j[1]*j[1])+(j[2]*j[2])) - 3.0f*(j[2]*j[2])) * inv_Rho;
modes_from_pi_eq[3] = j[0]*j[1] * inv_Rho;
modes_from_pi_eq[4] = j[0]*j[2] * inv_Rho;
modes_from_pi_eq[5] = j[1]*j[2] * inv_Rho;
/** relax the stress modes (eq14 schiller)*/
mode[4] = modes_from_pi_eq[0] + d_LBparams.gammaBulk * (mode[4] - modes_from_pi_eq[0]);
mode[5] = modes_from_pi_eq[1] + d_LBparams.gammaShear * (mode[5] - modes_from_pi_eq[1]);
mode[6] = modes_from_pi_eq[2] + d_LBparams.gammaShear * (mode[6] - modes_from_pi_eq[2]);
mode[7] = modes_from_pi_eq[3] + d_LBparams.gammaShear * (mode[7] - modes_from_pi_eq[3]);
mode[8] = modes_from_pi_eq[4] + d_LBparams.gammaShear * (mode[8] - modes_from_pi_eq[4]);
mode[9] = modes_from_pi_eq[5] + d_LBparams.gammaShear * (mode[9] - modes_from_pi_eq[5]);
/** relax the ghost modes (project them out) */
/** ghost modes have no equilibrium part due to orthogonality */
mode[10] = d_LBparams.gammaOdd*mode[10];
mode[11] = d_LBparams.gammaOdd*mode[11];
mode[12] = d_LBparams.gammaOdd*mode[12];
mode[13] = d_LBparams.gammaOdd*mode[13];
mode[14] = d_LBparams.gammaOdd*mode[14];
mode[15] = d_LBparams.gammaOdd*mode[15];
mode[16] = d_LBparams.gammaEven*mode[16];
mode[17] = d_LBparams.gammaEven*mode[17];
mode[18] = d_LBparams.gammaEven*mode[18];
}
__device__ void reset_LB_forces (unsigned int index, float *ext_forces) {
ext_forces[ index] = d_LBparams.extForceDensity[0];
ext_forces[ d_LBparams.numNodes + index] = d_LBparams.extForceDensity[1];
ext_forces[2*d_LBparams.numNodes + index] = d_LBparams.extForceDensity[2];
}
__device__ void apply_forces (unsigned int index, float *ext_forces, LB_rho_v *d_v, float *mode) {
float u[3] = {0.0f, 0.0f, 0.0f},
C[6] = {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
// Note: the values d_v were calculated in relax_modes()
u[0] = d_v[index].v[0];
u[1] = d_v[index].v[1];
u[2] = d_v[index].v[2];
C[0] += (1.0f + d_LBparams.gammaBulk) * u[0]*ext_forces[0*d_LBparams.numNodes + index] +
1.0f/3.0f * (d_LBparams.gammaBulk-d_LBparams.gammaShear) * (u[0]*ext_forces[0*d_LBparams.numNodes + index] +
u[1]*ext_forces[1*d_LBparams.numNodes + index] + u[2]*ext_forces[2*d_LBparams.numNodes + index]);
C[2] += (1.0f + d_LBparams.gammaBulk) * u[1]*ext_forces[1*d_LBparams.numNodes + index] +
1.0f/3.0f * (d_LBparams.gammaBulk-d_LBparams.gammaShear) * (u[0]*ext_forces[0*d_LBparams.numNodes + index] +
u[1]*ext_forces[1*d_LBparams.numNodes + index] + u[2]*ext_forces[2*d_LBparams.numNodes + index]);
C[5] += (1.0f + d_LBparams.gammaBulk) * u[2]*ext_forces[2*d_LBparams.numNodes + index] +
1.0f/3.0f * (d_LBparams.gammaBulk-d_LBparams.gammaShear) * (u[0]*ext_forces[0*d_LBparams.numNodes + index] +
u[1]*ext_forces[1*d_LBparams.numNodes + index] + u[2]*ext_forces[2*d_LBparams.numNodes + index]);
C[1] += 1.0f/2.0f * (1.0f+d_LBparams.gammaShear) * (u[0]*ext_forces[1*d_LBparams.numNodes + index] +
u[1]*ext_forces[0*d_LBparams.numNodes + index]);
C[3] += 1.0f/2.0f * (1.0f+d_LBparams.gammaShear) * (u[0]*ext_forces[2*d_LBparams.numNodes + index] +
u[2]*ext_forces[0*d_LBparams.numNodes + index]);
C[4] += 1.0f/2.0f * (1.0f+d_LBparams.gammaShear) * (u[1]*ext_forces[2*d_LBparams.numNodes + index] +
u[2]*ext_forces[1*d_LBparams.numNodes + index]);
/** update momentum modes */
mode[1] += ext_forces[0*d_LBparams.numNodes + index];
mode[2] += ext_forces[1*d_LBparams.numNodes + index];
mode[3] += ext_forces[2*d_LBparams.numNodes + index];
/** update stress modes */
mode[4] += C[0] + C[2] + C[5];
mode[5] += C[0] - C[2];
mode[6] += C[0] + C[2] - 2.0f*C[5];
mode[7] += C[1];
mode[8] += C[3];
mode[9] += C[4];
// Note: Body forces are reset in coupling.cu
//reset_LB_forces (index, ext_forces);
}
__device__ void normalize_modes (float* mode) {
/** normalization factors enter in the back transformation */
mode[ 0] *= 1.0f;
mode[ 1] *= 3.0f;
mode[ 2] *= 3.0f;
mode[ 3] *= 3.0f;
mode[ 4] *= 3.0f/2.0f;
mode[ 5] *= 9.0f/4.0f;
mode[ 6] *= 3.0f/4.0f;
mode[ 7] *= 9.0f;
mode[ 8] *= 9.0f;
mode[ 9] *= 9.0f;
mode[10] *= 3.0f/2.0f;
mode[11] *= 3.0f/2.0f;
mode[12] *= 3.0f/2.0f;
mode[13] *= 9.0f/2.0f;
mode[14] *= 9.0f/2.0f;
mode[15] *= 9.0f/2.0f;
mode[16] *= 1.0f/2.0f;
mode[17] *= 9.0f/4.0f;
mode[18] *= 3.0f/4.0f;
}
__device__ void calc_n_from_modes_push (unsigned int index, float *mode, float *n_b) {
unsigned int xyz[3];
index_to_xyz (index, xyz);
unsigned int x = xyz[0];
unsigned int y = xyz[1];
unsigned int z = xyz[2];
n_b[0*d_LBparams.numNodes + x + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/3.0f * (mode[0] - mode[4] + mode[16]);
n_b[1*d_LBparams.numNodes + (x+1)%d_LBparams.dimX + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/18.0f * (mode[0] + mode[1] + mode[5] + mode[6] - mode[17] - mode[18] -2.0f*(mode[10] + mode[16]));
n_b[2*d_LBparams.numNodes + (d_LBparams.dimX + x-1)%d_LBparams.dimX + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/18.0f * (mode[0] - mode[1] + mode[5] + mode[6] - mode[17] - mode[18] + 2.0f*(mode[10] - mode[16]));
n_b[3*d_LBparams.numNodes + x + d_LBparams.dimX*((y+1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/18.0f * (mode[0] + mode[2] - mode[5] + mode[6] + mode[17] - mode[18]- 2.0f*(mode[11] + mode[16]));
n_b[4*d_LBparams.numNodes + x + d_LBparams.dimX*((d_LBparams.dimY+y-1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/18.0f * (mode[0] - mode[2] - mode[5] + mode[6] + mode[17] - mode[18] + 2.0f*(mode[11] - mode[16]));
n_b[5*d_LBparams.numNodes + x + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*((z+1)%d_LBparams.dimZ)] =
1.0f/18.0f * (mode[0] + mode[3] - 2.0f*(mode[6] + mode[12] + mode[16] - mode[18]));
n_b[6*d_LBparams.numNodes + x + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*((d_LBparams.dimZ+z-1)%d_LBparams.dimZ)] =
1.0f/18.0f * (mode[0] - mode[3] - 2.0f*(mode[6] - mode[12] + mode[16] - mode[18]));
n_b[7*d_LBparams.numNodes + (x+1)%d_LBparams.dimX + d_LBparams.dimX*((y+1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/36.0f * (mode[0] + mode[1] + mode[2] + mode[4] + 2.0f*mode[6] + mode[7] + mode[10] + mode[11] + mode[13] + mode[14] + mode[16] + 2.0f*mode[18]);
n_b[8*d_LBparams.numNodes + (d_LBparams.dimX+x-1)%d_LBparams.dimX + d_LBparams.dimX*((d_LBparams.dimY+y-1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/36.0f * (mode[0] - mode[1] - mode[2] + mode[4] + 2.0f*mode[6] + mode[7] - mode[10] - mode[11] - mode[13] - mode[14] + mode[16] +
2.0f*mode[18]);
n_b[9*d_LBparams.numNodes + (x+1)%d_LBparams.dimX + d_LBparams.dimX*((d_LBparams.dimY+y-1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/36.0f * (mode[0] + mode[1] - mode[2] + mode[4] + 2.0f*mode[6] - mode[7] + mode[10] - mode[11] + mode[13] - mode[14] + mode[16] + 2.0f*mode[18]);
n_b[10*d_LBparams.numNodes + (d_LBparams.dimX+x-1)%d_LBparams.dimX + d_LBparams.dimX*((y+1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/36.0f * (mode[0] - mode[1] + mode[2] + mode[4] + 2.0f*mode[6] - mode[7] - mode[10] + mode[11] - mode[13] + mode[14] + mode[16] + 2.0f*mode[18]);
n_b[11*d_LBparams.numNodes + (x+1)%d_LBparams.dimX + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*((z+1)%d_LBparams.dimZ)] =
1.0f/36.0f * (mode[0] + mode[1] + mode[3] + mode[4] + mode[5] - mode[6] + mode[8] + mode[10] + mode[12] - mode[13] + mode[15] + mode[16] + mode[17] - mode[18]);
n_b[12*d_LBparams.numNodes + (d_LBparams.dimX+x-1)%d_LBparams.dimX + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*((d_LBparams.dimZ+z-1)%d_LBparams.dimZ)] =
1.0f/36.0f * (mode[0] - mode[1] - mode[3] + mode[4] + mode[5] - mode[6] + mode[8] - mode[10] - mode[12] + mode[13] - mode[15] + mode[16] + mode[17] - mode[18]);
n_b[13*d_LBparams.numNodes + (x+1)%d_LBparams.dimX + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*((d_LBparams.dimZ+z-1)%d_LBparams.dimZ)] =
1.0f/36.0f * (mode[0] + mode[1] - mode[3] + mode[4] + mode[5] - mode[6] - mode[8] + mode[10] - mode[12] - mode[13] - mode[15] + mode[16] + mode[17] - mode[18]);
n_b[14*d_LBparams.numNodes + (d_LBparams.dimX+x-1)%d_LBparams.dimX + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*((z+1)%d_LBparams.dimZ)] =
1.0f/36.0f * (mode[0] - mode[1] + mode[3] + mode[4] + mode[5] - mode[6] - mode[8] - mode[10] + mode[12] + mode[13] + mode[15] + mode[16] + mode[17] - mode[18]);
n_b[15*d_LBparams.numNodes + x + d_LBparams.dimX*((y+1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*((z+1)%d_LBparams.dimZ)] =
1.0f/36.0f * (mode[0] + mode[2] + mode[3] + mode[4] - mode[5] - mode[6] + mode[9] + mode[11] + mode[12] - mode[14] - mode[15] + mode[16] - mode[17] - mode[18]);
n_b[16*d_LBparams.numNodes + x + d_LBparams.dimX*((d_LBparams.dimY+y-1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*((d_LBparams.dimZ+z-1)%d_LBparams.dimZ)] =
1.0f/36.0f * (mode[0] - mode[2] - mode[3] + mode[4] - mode[5] - mode[6] + mode[9] - mode[11] - mode[12] + mode[14] + mode[15] + mode[16] - mode[17] - mode[18]);
n_b[17*d_LBparams.numNodes + x + d_LBparams.dimX*((y+1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*((d_LBparams.dimZ+z-1)%d_LBparams.dimZ)] =
1.0f/36.0f * (mode[0] + mode[2]- mode[3] + mode[4] - mode[5] - mode[6] - mode[9] + mode[11] - mode[12] - mode[14] + mode[15] + mode[16] - mode[17] - mode[18]);
n_b[18*d_LBparams.numNodes + x + d_LBparams.dimX*((d_LBparams.dimY+y-1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*((z+1)%d_LBparams.dimZ)] =
1.0f/36.0f * (mode[0] - mode[2] + mode[3] + mode[4] - mode[5] - mode[6] - mode[9] - mode[11] + mode[12] + mode[14] - mode[15] + mode[16] - mode[17] - mode[18]);
}
// Note: suffix f
__device__ void bounce_back_boundaries (unsigned int index, unsigned int *boundary_map, float *boundary_velocities, float *n_curr, float *devBoundaryForces) {
unsigned int boundaryIndex;
float v[3];
unsigned int xyz[3];
float shift;
float weight;
int c[3];
float pop_to_bounce_back;
unsigned int population;
size_t to_index, to_index_x, to_index_y, to_index_z;
float boundary_force[3] = {0.0f,0.0f,0.0f};
unsigned int inverse;
boundaryIndex = boundary_map[index];
if (boundaryIndex != 0)
{
// Version 1: can assign a velocity value to each boundary
//v[0] = boundary_velocities[(boundaryIndex-1)*3 + 0];
//v[1] = boundary_velocities[(boundaryIndex-1)*3 + 1];
//v[2] = boundary_velocities[(boundaryIndex-1)*3 + 2];
// Version 2: only allow walls in the y direction to move
if (boundaryIndex == 1) {
v[0] = -0.5f * d_LBparams.boundaryVelocity[0];
v[1] = 0.0f;
v[2] = 0.0f;
} else if (boundaryIndex == 2) {
v[0] = 0.5f * d_LBparams.boundaryVelocity[0];
v[1] = 0.0f;
v[2] = 0.0f;
} else {
v[0] = 0.0f;
v[1] = 0.0f;
v[2] = 0.0f;
}
index_to_xyz (index, xyz);
unsigned int x = xyz[0];
unsigned int y = xyz[1];
unsigned int z = xyz[2];
// TODO : PUT IN EQUILIBRIUM CONTRIBUTION TO THE BOUNCE-BACK DENSITY FOR THE BOUNDARY FORCE
// TODO : INITIALIZE BOUNDARY FORCE PROPERLY, HAS NONZERO ELEMENTS IN FIRST STEP
// TODO : SET INTERNAL BOUNDARY NODE VALUES TO ZERO
// Note:
// I have followed ESPResSo 4.1.2 and modified some of following code.
// I am still not sure if thoses prefactors, agrid and tau, should appear or not!
// The following macro just serves as text replacement !
#define BOUNCEBACK() \
shift = 2.0f * weight * d_LBparams.rho * (v[0]*c[0]+v[1]*c[1]+v[2]*c[2]) * 3.0f / d_LBparams.agrid * d_LBparams.tau; \
pop_to_bounce_back = n_curr[population*d_LBparams.numNodes + index]; \
to_index_x = (x+c[0]+d_LBparams.dimX) % d_LBparams.dimX; \
to_index_y = (y+c[1]+d_LBparams.dimY) % d_LBparams.dimY; \
to_index_z = (z+c[2]+d_LBparams.dimZ) % d_LBparams.dimZ; \
to_index = to_index_x + d_LBparams.dimX*to_index_y + d_LBparams.dimX*d_LBparams.dimY*to_index_z; \
if (boundary_map[to_index] == 0) { \
boundary_force[0] += (2.0f * pop_to_bounce_back + shift) * c[0]; \
boundary_force[1] += (2.0f * pop_to_bounce_back + shift) * c[1]; \
boundary_force[2] += (2.0f * pop_to_bounce_back + shift) * c[2]; \
n_curr[inverse*d_LBparams.numNodes + to_index] = pop_to_bounce_back + shift; \
}
// Note:
// to_index: destination node
// A minus sign is absorbed into the population velocity c, so the term pop_to_bounce_back + shift
// appears in the code above rather than pop_to_bounce_back - shift
// the resting population does nothing, i.e., population 0.
c[0]= 1;c[1]= 0;c[2]= 0; weight=1./18.; population= 2; inverse= 1;
BOUNCEBACK();
c[0]=-1;c[1]= 0;c[2]= 0; weight=1./18.; population= 1; inverse= 2;
BOUNCEBACK();
c[0]= 0;c[1]= 1;c[2]= 0; weight=1./18.; population= 4; inverse= 3;
BOUNCEBACK();
c[0]= 0;c[1]=-1;c[2]= 0; weight=1./18.; population= 3; inverse= 4;
BOUNCEBACK();
c[0]= 0;c[1]= 0;c[2]= 1; weight=1./18.; population= 6; inverse= 5;
BOUNCEBACK();
c[0]= 0;c[1]= 0;c[2]=-1; weight=1./18.; population= 5; inverse= 6;
BOUNCEBACK();
c[0]= 1;c[1]= 1;c[2]= 0; weight=1./36.; population= 8; inverse= 7;
BOUNCEBACK();
c[0]=-1;c[1]=-1;c[2]= 0; weight=1./36.; population= 7; inverse= 8;
BOUNCEBACK();
c[0]= 1;c[1]=-1;c[2]= 0; weight=1./36.; population=10; inverse= 9;
BOUNCEBACK();
c[0]=-1;c[1]= 1;c[2]= 0; weight=1./36.; population= 9; inverse=10;
BOUNCEBACK();
c[0]= 1;c[1]= 0;c[2]= 1; weight=1./36.; population=12; inverse=11;
BOUNCEBACK();
c[0]=-1;c[1]= 0;c[2]=-1; weight=1./36.; population=11; inverse=12;
BOUNCEBACK();
c[0]= 1;c[1]= 0;c[2]=-1; weight=1./36.; population=14; inverse=13;
BOUNCEBACK();
c[0]=-1;c[1]= 0;c[2]= 1; weight=1./36.; population=13; inverse=14;
BOUNCEBACK();
c[0]= 0;c[1]= 1;c[2]= 1; weight=1./36.; population=16; inverse=15;
BOUNCEBACK();
c[0]= 0;c[1]=-1;c[2]=-1; weight=1./36.; population=15; inverse=16;
BOUNCEBACK();
c[0]= 0;c[1]= 1;c[2]=-1; weight=1./36.; population=18; inverse=17;
BOUNCEBACK();
c[0]= 0;c[1]=-1;c[2]= 1; weight=1./36.; population=17; inverse=18;
BOUNCEBACK();
atomicAdd(&devBoundaryForces[(boundaryIndex-1)*3 + 0], boundary_force[0]);
atomicAdd(&devBoundaryForces[(boundaryIndex-1)*3 + 1], boundary_force[1]);
atomicAdd(&devBoundaryForces[(boundaryIndex-1)*3 + 2], boundary_force[2]);
}
}
__device__ void calc_values_in_LB_units (unsigned int index, unsigned int print_index, unsigned int *boundary_map, float *mode, float *ext_forces, LB_rho_v *d_v, LB_rho_v_pi *d_p_v) {
float j[3];
float modes_from_pi_eq[6];
float pi[6]={0.0f,0.0f,0.0f,0.0f,0.0f,0.0f};
if (boundary_map[index] == 0) {
/* Ensure we are working with the current values of d_v */
update_rho_v (index, mode, ext_forces, d_v);
d_p_v[print_index].rho = d_v[index].rho;
d_p_v[print_index].v[0] = d_v[index].v[0];
d_p_v[print_index].v[1] = d_v[index].v[1];
d_p_v[print_index].v[2] = d_v[index].v[2];
/* stress calculation */
float Rho = d_v[index].rho;
float inv_Rho = 1.0 / Rho;
/* note that d_v[index].v[] already includes the 1/2 f term, accounting for the pre- and post-collisional average */
j[0] = Rho * d_v[index].v[0];
j[1] = Rho * d_v[index].v[1];
j[2] = Rho * d_v[index].v[2];
// equilibrium part of the stress modes, which comes from
// the equality between modes and stress tensor components
/* m4 = trace(pi) - rho
m5 = pi_xx - pi_yy
m6 = trace(pi) - 3 pi_zz
m7 = pi_xy
m8 = pi_xz
m9 = pi_yz */
// and pluggin in the Euler stress for the equilibrium:
// pi_eq = rho_0*c_s^2*I3 + (j \otimes j)/rho
// with I3 the 3D identity matrix and
// rho = \trace(rho_0*c_s^2*I3), which yields
/* m4_from_pi_eq = j.j
m5_from_pi_eq = j_x*j_x - j_y*j_y
m6_from_pi_eq = j.j - 3*j_z*j_z
m7_from_pi_eq = j_x*j_y
m8_from_pi_eq = j_x*j_z
m9_from_pi_eq = j_y*j_z */
// where the / Rho term has been dropped. We thus obtain:
modes_from_pi_eq[0] = (j[0]*j[0] + j[1]*j[1] + j[2]*j[2] ) * inv_Rho;
modes_from_pi_eq[1] = (j[0]*j[0] - j[1]*j[1] ) * inv_Rho;
modes_from_pi_eq[2] = (j[0]*j[0] + j[1]*j[1] + j[2]*j[2] - 3.0*j[2]*j[2]) * inv_Rho;
modes_from_pi_eq[3] = j[0]*j[1] * inv_Rho;
modes_from_pi_eq[4] = j[0]*j[2] * inv_Rho;
modes_from_pi_eq[5] = j[1]*j[2] * inv_Rho;
/* Now we must predict the outcome of the next collision */
/* We immediately average pre- and post-collision. */
/* TODO: need a reference for this. */
mode[4] = modes_from_pi_eq[0] + (0.5 + 0.5*d_LBparams.gammaBulk) * (mode[4] - modes_from_pi_eq[0]);
mode[5] = modes_from_pi_eq[1] + (0.5 + 0.5*d_LBparams.gammaShear) * (mode[5] - modes_from_pi_eq[1]);
mode[6] = modes_from_pi_eq[2] + (0.5 + 0.5*d_LBparams.gammaShear) * (mode[6] - modes_from_pi_eq[2]);
mode[7] = modes_from_pi_eq[3] + (0.5 + 0.5*d_LBparams.gammaShear) * (mode[7] - modes_from_pi_eq[3]);
mode[8] = modes_from_pi_eq[4] + (0.5 + 0.5*d_LBparams.gammaShear) * (mode[8] - modes_from_pi_eq[4]);
mode[9] = modes_from_pi_eq[5] + (0.5 + 0.5*d_LBparams.gammaShear) * (mode[9] - modes_from_pi_eq[5]);
// Transform the stress tensor components according to the modes that
// correspond to those used by U. Schiller. In terms of populations this
// expression then corresponds exactly to those in Eqs. 116 - 121 in the
// Duenweg and Ladd paper, when these are written out in populations.
// But to ensure this, the expression in Schiller's modes has to be different!
pi[0] += (2.0f*(mode[0] + mode[4]) + mode[6] + 3.0f*mode[5]) / 6.0f; // xx
pi[1] += mode[7]; // xy
pi[2] += (2.0f*(mode[0] + mode[4]) + mode[6] - 3.0f*mode[5]) / 6.0f; // yy
pi[3] += mode[8]; // xz
pi[4] += mode[9]; // yz
pi[5] += (mode[0] + mode[4] - mode[6]) / 3.0f; // zz
for(int i=0; i < 6; i++) {
d_p_v[print_index].pi[i] = pi[i];
}
} else {
d_p_v[print_index].rho = 0.0f;
for (int i=0; i < 3; i++)
d_p_v[print_index].v[i] = 0.0f;
for(int i=0; i < 6; i++)
d_p_v[print_index].pi[i] = 0.0f;
}
}
__global__ void Print_dev_LB_Params() {
printf("\nDim_x Dim_y Dim_z Num_of_nodes\n");
printf("%u %u %u %u\n",d_LBparams.dimX, d_LBparams.dimY, d_LBparams.dimZ, d_LBparams.numNodes);
printf("Num_of_boundaries Boundary_vel.x Boundary_vel.y Boundary_vel.z\n");
printf("%u %f %f %f\n",
d_LBparams.numBoundaries, d_LBparams.boundaryVelocity[0], d_LBparams.boundaryVelocity[1], d_LBparams.boundaryVelocity[2]);
printf("Ext_force_flag Ext_force_density.x Ext_force_density.y Ext_force_density.z\n");
printf("%u %f %f %f\n",
d_LBparams.extForceFlag, d_LBparams.extForceDensity[0], d_LBparams.extForceDensity[1], d_LBparams.extForceDensity[2]);
printf("Agrid Tau Rho\n");
printf("%f %f %f\n", d_LBparams.agrid, d_LBparams.tau, d_LBparams.rho);
printf("gamma_shear gamma_bulk gamma_odd gamma_even\n");
printf("%f %f %f %f\n", d_LBparams.gammaShear, d_LBparams.gammaBulk, d_LBparams.gammaOdd, d_LBparams.gammaEven);
printf("c_sound_sq = %f\n", c_sound_sq);
}
__global__ void PrintDevVariable () {
printf ("The variable dimY equals to %u\n", d_LBparams.dimY);
}
__global__ void InitializeBoundaryMap (unsigned int *boundary_map) {
unsigned int index = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
unsigned int xyz[3];
if (index < d_LBparams.numNodes) {
index_to_xyz (index, xyz);
unsigned int x = xyz[0];
unsigned int y = xyz[1];
unsigned int z = xyz[2];
if (d_LBparams.numBoundaries >= 2) {
if (y==0) {
boundary_map[index] = 1;
} else if (y==(d_LBparams.dimY-1)) {
boundary_map[index] = 2;
}
}
if (d_LBparams.numBoundaries == 4) {
if (z==0) {
boundary_map[index] = 3;
} else if (z==(d_LBparams.dimZ-1)) {
boundary_map[index] = 4;
}
}
}
}
__global__ void InitializeBodyForces (float *ext_forces) {
unsigned int index = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
if (index < d_LBparams.numNodes) {
//ext_forces[0*d_LBparams.numNodes + index] = d_LBparams.extForceDensity[0];
//ext_forces[1*d_LBparams.numNodes + index] = d_LBparams.extForceDensity[1];
//ext_forces[2*d_LBparams.numNodes + index] = d_LBparams.extForceDensity[2];
ext_forces[ + index] = d_LBparams.extForceDensity[0];
ext_forces[ d_LBparams.numNodes + index] = d_LBparams.extForceDensity[1];
ext_forces[2*d_LBparams.numNodes + index] = d_LBparams.extForceDensity[2];
}
}
__global__ void calc_n_from_rho_j_pi (float *ext_forces, float *n_a, LB_rho_v *d_v) {
/* TODO: this can handle only a uniform density, something similar, but local,
has to be called every time the fields are set by the user ! */
unsigned int index = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
if (index < d_LBparams.numNodes)
{
float mode[19];
float Rho = d_LBparams.rho;
float v[3] = {0.0f, 0.0f, 0.0f};
float pi[6] = {Rho*c_sound_sq, 0.0f, Rho*c_sound_sq, 0.0f, 0.0f, Rho*c_sound_sq};
float rhoc_sq = Rho * c_sound_sq;
float avg_rho = d_LBparams.rho;
float local_rho, local_j[3], *local_pi, trace;
local_rho = Rho;
local_j[0] = Rho * v[0];
local_j[1] = Rho * v[1];
local_j[2] = Rho * v[2];
local_pi = pi;
/** reduce the pressure tensor to the part needed here.
NOTE: this not true anymore for SHANCHEN
if the densities are not uniform. FIXME*/
local_pi[0] -= rhoc_sq;
local_pi[2] -= rhoc_sq;
local_pi[5] -= rhoc_sq;
trace = local_pi[0] + local_pi[2] + local_pi[5];
float rho_times_coeff;
float tmp1,tmp2;
/** update the q=0 sublattice */
n_a[0 * d_LBparams.numNodes + index] = 1.0f/3.0f * (local_rho-avg_rho) - 1.0f/2.0f*trace;
/** update the q=1 sublattice */
rho_times_coeff = 1.0f/18.0f * (local_rho-avg_rho);
n_a[1 * d_LBparams.numNodes + index] = rho_times_coeff + 1.0f/6.0f*local_j[0] + 1.0f/4.0f*local_pi[0] - 1.0f/12.0f*trace;
n_a[2 * d_LBparams.numNodes + index] = rho_times_coeff - 1.0f/6.0f*local_j[0] + 1.0f/4.0f*local_pi[0] - 1.0f/12.0f*trace;
n_a[3 * d_LBparams.numNodes + index] = rho_times_coeff + 1.0f/6.0f*local_j[1] + 1.0f/4.0f*local_pi[2] - 1.0f/12.0f*trace;
n_a[4 * d_LBparams.numNodes + index] = rho_times_coeff - 1.0f/6.0f*local_j[1] + 1.0f/4.0f*local_pi[2] - 1.0f/12.0f*trace;
n_a[5 * d_LBparams.numNodes + index] = rho_times_coeff + 1.0f/6.0f*local_j[2] + 1.0f/4.0f*local_pi[5] - 1.0f/12.0f*trace;
n_a[6 * d_LBparams.numNodes + index] = rho_times_coeff - 1.0f/6.0f*local_j[2] + 1.0f/4.0f*local_pi[5] - 1.0f/12.0f*trace;
/** update the q=2 sublattice */
rho_times_coeff = 1.0f/36.0f * (local_rho-avg_rho);
tmp1 = local_pi[0] + local_pi[2];
tmp2 = 2.0f*local_pi[1];
n_a[ 7 * d_LBparams.numNodes+index] = rho_times_coeff + 1.0f/12.0f*(local_j[0]+local_j[1]) + 1.0f/8.0f*(tmp1+tmp2) - 1.0f/24.0f*trace;
n_a[ 8 * d_LBparams.numNodes+index] = rho_times_coeff - 1.0f/12.0f*(local_j[0]+local_j[1]) + 1.0f/8.0f*(tmp1+tmp2) - 1.0f/24.0f*trace;
n_a[ 9 * d_LBparams.numNodes+index] = rho_times_coeff + 1.0f/12.0f*(local_j[0]-local_j[1]) + 1.0f/8.0f*(tmp1-tmp2) - 1.0f/24.0f*trace;
n_a[10 * d_LBparams.numNodes+index] = rho_times_coeff - 1.0f/12.0f*(local_j[0]-local_j[1]) + 1.0f/8.0f*(tmp1-tmp2) - 1.0f/24.0f*trace;
tmp1 = local_pi[0] + local_pi[5];
tmp2 = 2.0f*local_pi[3];
n_a[11 * d_LBparams.numNodes + index] = rho_times_coeff + 1.0f/12.0f*(local_j[0]+local_j[2]) + 1.0f/8.0f*(tmp1+tmp2) - 1.0f/24.0f*trace;
n_a[12 * d_LBparams.numNodes + index] = rho_times_coeff - 1.0f/12.0f*(local_j[0]+local_j[2]) + 1.0f/8.0f*(tmp1+tmp2) - 1.0f/24.0f*trace;
n_a[13 * d_LBparams.numNodes + index] = rho_times_coeff + 1.0f/12.0f*(local_j[0]-local_j[2]) + 1.0f/8.0f*(tmp1-tmp2) - 1.0f/24.0f*trace;
n_a[14 * d_LBparams.numNodes + index] = rho_times_coeff - 1.0f/12.0f*(local_j[0]-local_j[2]) + 1.0f/8.0f*(tmp1-tmp2) - 1.0f/24.0f*trace;
tmp1 = local_pi[2] + local_pi[5];
tmp2 = 2.0f*local_pi[4];
n_a[15 * d_LBparams.numNodes + index] = rho_times_coeff + 1.0f/12.0f*(local_j[1]+local_j[2]) + 1.0f/8.0f*(tmp1+tmp2) - 1.0f/24.0f*trace;
n_a[16 * d_LBparams.numNodes + index] = rho_times_coeff - 1.0f/12.0f*(local_j[1]+local_j[2]) + 1.0f/8.0f*(tmp1+tmp2) - 1.0f/24.0f*trace;
n_a[17 * d_LBparams.numNodes + index] = rho_times_coeff + 1.0f/12.0f*(local_j[1]-local_j[2]) + 1.0f/8.0f*(tmp1-tmp2) - 1.0f/24.0f*trace;
n_a[18 * d_LBparams.numNodes + index] = rho_times_coeff - 1.0f/12.0f*(local_j[1]-local_j[2]) + 1.0f/8.0f*(tmp1-tmp2) - 1.0f/24.0f*trace;
/**set different seed for randomgen on every node */
// n_a.seed[index] = para.your_seed + index;
calc_m_from_n (index, n_a, mode);
update_rho_v (index, mode, ext_forces, d_v);
}
}
__global__ void integrate (float *n_a, float *ext_forces, LB_rho_v *d_v, float *n_b) {
unsigned int index = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
float mode[19]; // the 19 moments (modes) are only temporary register values
if (index < d_LBparams.numNodes) {
calc_m_from_n (index, n_a, mode);
relax_modes (index, ext_forces, d_v, mode);
apply_forces (index, ext_forces, d_v, mode);
normalize_modes (mode);
calc_n_from_modes_push (index, mode, n_b);
}
}
__global__ void apply_boundaries (unsigned int *boundary_map, float *boundary_velocities, float *n_curr, float *boundary_forces) {
unsigned int index = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x;
if (index < d_LBparams.numNodes) {
bounce_back_boundaries (index, boundary_map, boundary_velocities, n_curr, boundary_forces);
}
}
__global__ void get_mesoscopic_values_in_LB_units (float *n_a, unsigned int *boundary_map, float *ext_forces, LB_rho_v *d_v, LB_rho_v_pi *d_p_v) {
unsigned int index = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
if (index < d_LBparams.numNodes) {
float mode[19];
calc_m_from_n (index, n_a, mode);
calc_values_in_LB_units (index, index, boundary_map, mode, ext_forces, d_v, d_p_v);
}
}
extern "C"
void Initialize_LB_Parameters_dev (LBparameters *h_LBparams) {
cuda_safe_mem(hipMemcpyToSymbol(d_LBparams, h_LBparams, sizeof(LBparameters)));
}
extern "C"
void PrintDevParasWrapper () {
hipLaunchKernelGGL(( Print_dev_LB_Params), dim3(1),dim3(1), 0, 0, );
hipLaunchKernelGGL(( PrintDevVariable) , dim3(1),dim3(1), 0, 0, );
}
extern "C"
void InitializeLB () {
#define free_realloc_and_clear(var, size) \
{ \
if ((var) != NULL) hipFree((var)); \
cuda_safe_mem(hipMalloc((void**)&var, size)); \
hipMemset(var, 0, size); \
}
cuda_safe_mem(hipMemcpyToSymbol(d_LBparams, &h_LBparams, sizeof(LBparameters)));
free_realloc_and_clear(devNodesA, h_LBparams.numNodes * 19 * sizeof(float));
free_realloc_and_clear(devNodesB, h_LBparams.numNodes * 19 * sizeof(float));
free_realloc_and_clear(devBoundaryMap, h_LBparams.numNodes * sizeof(unsigned int));
free_realloc_and_clear(devExtForces, h_LBparams.numNodes * 3 * sizeof(float));
free_realloc_and_clear(devBoundaryForces, h_LBparams.numBoundaries*3*sizeof(float));
free_realloc_and_clear(devBoundaryVelocities, h_LBparams.numBoundaries*3*sizeof(float));
size_of_rho_v = h_LBparams.numNodes * sizeof(LB_rho_v);
size_of_rho_v_pi = h_LBparams.numNodes * sizeof(LB_rho_v_pi);
free_realloc_and_clear(devRhoV, size_of_rho_v);
/* TODO: this is a almost a copy of device_rho_v think about eliminating it, and maybe pi can be added to device_rho_v in this case*/
free_realloc_and_clear(print_rho_v_pi, size_of_rho_v_pi);
// Note: discarded design
///**check flag if lb gpu init works*/
//free_and_realloc(gpu_check, sizeof(int));
//if (h_gpu_check != NULL)
// free (h_gpu_check);
//h_gpu_check = (int*)malloc(sizeof(int));
//h_gpu_check[0] = 0;
//hostExtForces = (float *)calloc(h_LBparams.numNodes*3, sizeof(float)); // memory needs to be released at the end !
// values for the kernel call
int threads_per_block = 64;
int blocks_per_grid_y = 4;
int blocks_per_grid_x = (h_LBparams.numNodes + threads_per_block * blocks_per_grid_y - 1) / (threads_per_block * blocks_per_grid_y);
dim3 dim_grid = make_uint3(blocks_per_grid_x, blocks_per_grid_y, 1);
KERNELCALL(InitializeBoundaryMap, dim_grid, threads_per_block, (devBoundaryMap));
//cuda_safe_mem(hipMemcpy(devBoundaryVelocities, hostBoundaryVelocities, 3*h_LBparams.numBoundaries*sizeof(float), hipMemcpyHostToDevice));
KERNELCALL(InitializeBodyForces, dim_grid, threads_per_block, (devExtForces));
KERNELCALL(calc_n_from_rho_j_pi, dim_grid, threads_per_block, (devExtForces, devNodesA, devRhoV));
intflag = 1;
devCurrentNodes = devNodesA;
// Note: discarded design
//cuda_safe_mem(hipMemcpy (h_gpu_check, gpu_check, sizeof(int), hipMemcpyDeviceToHost));
//fprintf(stderr, "initialization of lb gpu code %i\n", h_LBparams.numNodes);
hipDeviceSynchronize();
// Note: discarded design
//#if __CUDA_ARCH__ >= 200
//if(!h_gpu_check[0])
//{
// fprintf(stderr, "initialization of lb gpu code failed! \n");
// errexit();
//}
//#endif
}
extern "C"
void UpdateLBE () {
int threads_per_block = 64;
int blocks_per_grid_y = 4;
int blocks_per_grid_x = (h_LBparams.numNodes + threads_per_block * blocks_per_grid_y - 1) / (threads_per_block * blocks_per_grid_y);
dim3 dim_grid = make_uint3 (blocks_per_grid_x, blocks_per_grid_y, 1);
// call of fluid step
/* NOTE: if pi is needed at every integration step, one should call an extended version
of the integrate kernel, or pass also devRhoV_pi and make sure that either
it or devRhoV are NULL depending on extended_values_flag */
if (intflag == 1) {
KERNELCALL(integrate, dim_grid, threads_per_block, (devNodesA, devExtForces, devRhoV, devNodesB));
devCurrentNodes = devNodesB;
intflag = 0;
} else {
KERNELCALL(integrate, dim_grid, threads_per_block, (devNodesB, devExtForces, devRhoV, devNodesA));
devCurrentNodes = devNodesA;
intflag = 1;
}
if (h_LBparams.numBoundaries > 0) {
// Version 1: can assign a velocity value to each boundary
//SetBoundaryVelocities ();
//KERNELCALL(apply_boundaries, dim_grid, threads_per_block, (devBoundaryMap, devBoundaryVelocities, devCurrentNodes, devBoundaryForces));
// Version 2: only allow walls in the y direction to move
KERNELCALL(apply_boundaries, dim_grid, threads_per_block, (devBoundaryMap, devBoundaryVelocities, devCurrentNodes, devBoundaryForces));
}
}
extern "C"
void SetBoundaryVelocities () {
float *hostBoundaryVelocities;
hostBoundaryVelocities = (float *) calloc (h_LBparams.numBoundaries*3, sizeof(float));
hostBoundaryVelocities[0*3+0] = -0.5*h_LBparams.boundaryVelocity[0];
hostBoundaryVelocities[0*3+1] = 0.0;
hostBoundaryVelocities[0*3+2] = 0.0;
hostBoundaryVelocities[1*3+0] = 0.5*h_LBparams.boundaryVelocity[0];
hostBoundaryVelocities[1*3+1] = 0.0;
hostBoundaryVelocities[1*3+2] = 0.0;
cuda_safe_mem(hipMemcpy(devBoundaryVelocities, hostBoundaryVelocities, 3*h_LBparams.numBoundaries*sizeof(float), hipMemcpyHostToDevice));
free (hostBoundaryVelocities);
}
extern "C"
void lb_get_values_GPU (LB_rho_v_pi *host_values) {
// values for the kernel call
int threads_per_block = 64;
int blocks_per_grid_y = 4;
int blocks_per_grid_x = (h_LBparams.numNodes + threads_per_block * blocks_per_grid_y - 1) / (threads_per_block * blocks_per_grid_y);
dim3 dim_grid = make_uint3(blocks_per_grid_x, blocks_per_grid_y, 1);
KERNELCALL(get_mesoscopic_values_in_LB_units, dim_grid, threads_per_block, (devCurrentNodes, devBoundaryMap, devExtForces, devRhoV, print_rho_v_pi));
cuda_safe_mem(hipMemcpy(host_values, print_rho_v_pi, size_of_rho_v_pi, hipMemcpyDeviceToHost));
}
extern "C"
void PrintFluidVelocitiesVTK (char *filePath) {
LB_rho_v_pi *host_values;
size_t size_of_values = h_LBparams.numNodes * sizeof(LB_rho_v_pi);
host_values = (LB_rho_v_pi*) malloc (size_of_values);
lb_get_values_GPU (host_values);
FILE* fp = fopen (filePath, "w");
fprintf (fp, "# vtk DataFile Version 2.0\nlbfluid_gpu\n"
"ASCII\nDATASET STRUCTURED_POINTS\nDIMENSIONS %u %u %u\n"
"ORIGIN %f %f %f\nSPACING %f %f %f\nPOINT_DATA %u\n"
"SCALARS velocity float 3\nLOOKUP_TABLE default\n",
h_LBparams.dimX, h_LBparams.dimY, h_LBparams.dimZ,
h_LBparams.agrid*0.5, h_LBparams.agrid*0.5, h_LBparams.agrid*0.5,
h_LBparams.agrid, h_LBparams.agrid, h_LBparams.agrid,
h_LBparams.numNodes);
for(int j=0; j < int(h_LBparams.numNodes); ++j) {
fprintf (fp, "%f %f %f\n", host_values[j].v[0], host_values[j].v[1], host_values[j].v[2]);
}
fclose (fp);
free (host_values);
}
| fc789696debe488ac46f19ec45a8b0a4fa55cb9d.cu | #include "cuda.h"
extern "C" {
#include <stdio.h>
#include "lb.h"
}
#include "cuda_utils.hpp"
//static const float c_sound_sq = 1.0f/3.0f;
static __constant__ float c_sound_sq = 1.0f/3.0f;
static size_t size_of_rho_v;
static size_t size_of_rho_v_pi;
static float *devNodesA = NULL;
static float *devNodesB = NULL;
static float *devBoundaryForces = NULL;
static LB_rho_v *devRhoV = NULL;
static LB_rho_v_pi *devRhoVpi = NULL;
static LB_rho_v_pi *print_rho_v_pi = NULL;
static unsigned int intflag = 1;
unsigned int *devBoundaryMap = NULL;
float *devExtForces = NULL;
float *devBoundaryVelocities = NULL;
float *devCurrentNodes = NULL;
float *hostExtForces = NULL;
//static __device__ __constant__ LBparameters d_LBparams;
__constant__ LBparameters d_LBparams;
LBparameters h_LBparams = {
// agrid tau rho
1.0f, 1.0f, 1.0f,
// gammaShear gammaBulk gammaOdd gammaEven
1.0f, 1.0f, 0.0f, 0.0f,
// dimX dimY dimZ numNodes
0u, 0u, 0u, 0u,
// numBoundaries boundaryVelocity
0u, {0.0f, 0.0f, 0.0f},
// extForceFlag extForceDensity
0u, {0.0f, 0.0f, 0.0f}
};
__device__ void index_to_xyz (unsigned int index, unsigned int *xyz) {
xyz[0] = index % d_LBparams.dimX;
index /= d_LBparams.dimX;
xyz[1] = index % d_LBparams.dimY;
index /= d_LBparams.dimY;
xyz[2] = index;
}
__device__ void calc_m_from_n (unsigned int index, float *n_a, float *mode) {
// The following convention is used:
// The $\hat{c}_i$ form B. Duenweg's paper are given by:
/* c_0 = { 0, 0, 0}
c_1 = { 1, 0, 0}
c_2 = {-1, 0, 0}
c_3 = { 0, 1, 0}
c_4 = { 0,-1, 0}
c_5 = { 0, 0, 1}
c_6 = { 0, 0,-1}
c_7 = { 1, 1, 0}
c_8 = {-1,-1, 0}
c_9 = { 1,-1, 0}
c_10 = {-1, 1, 0}
c_11 = { 1, 0, 1}
c_12 = {-1, 0,-1}
c_13 = { 1, 0,-1}
c_14 = {-1, 0, 1}
c_15 = { 0, 1, 1}
c_16 = { 0,-1,-1}
c_17 = { 0, 1,-1}
c_18 = { 0,-1, 1} */
// The basis vectors (modes) are constructed as follows
// $m_k = \sum_{i} e_{ki} n_{i}$, where the $e_{ki}$ form a
// linear transformation (matrix) that is given by
/* $e{ 0,i} = 1$
$e{ 1,i} = c_{i,x}$
$e{ 2,i} = c_{i,y}$
$e{ 3,i} = c_{i,z}$
$e{ 4,i} = c_{i}^2 - 1$
$e{ 5,i} = c_{i,x}^2 - c_{i,y}^2$
$e{ 6,i} = c_{i}^2 - 3*c_{i,z}^2$
$e{ 7,i} = c_{i,x}*c_{i,y}$
$e{ 8,i} = c_{i,x}*c_{i,z}$
$e{ 9,i} = c_{i,y}*c_{i,z}$
$e{10,i} = (3*c_{i}^2 - 5)*c_{i,x}$
$e{11,i} = (3*c_{i}^2 - 5)*c_{i,y}$
$e{12,i} = (3*c_{i}^2 - 5)*c_{i,z}$
$e{13,i} = (c_{i,y}^2 - c_{i,z}^2)*c_{i,x}$
$e{14,i} = (c_{i,x}^2 - c_{i,z}^2)*c_{i,y}$
$e{15,i} = (c_{i,x}^2 - c_{i,y}^2)*c_{i,z}$
$e{16,i} = 3*c_{i}^2^2 - 6*c_{i}^2 + 1$
$e{17,i} = (2*c_{i}^2 - 3)*(c_{i,x}^2 - c_{i,y}^2)$
$e{18,i} = (2*c_{i}^2 - 3)*(c_{i}^2 - 3*c_{i,z}^2)$ */
// Such that the transformation matrix is given by
/* {{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{ 0, 1,-1, 0, 0, 0, 0, 1,-1, 1,-1, 1,-1, 1,-1, 0, 0, 0, 0},
{ 0, 0, 0, 1,-1, 0, 0, 1,-1,-1, 1, 0, 0, 0, 0, 1,-1, 1,-1},
{ 0, 0, 0, 0, 0, 1,-1, 0, 0, 0, 0, 1,-1,-1, 1, 1,-1,-1, 1},
{-1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{ 0, 1, 1,-1,-1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,-1,-1,-1,-1},
{ 0, 1, 1, 1, 1,-2,-2, 2, 2, 2, 2,-1,-1,-1,-1,-1,-1,-1,-1},
{ 0, 0, 0, 0, 0, 0, 0, 1, 1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0},
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,-1,-1, 0, 0, 0, 0},
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,-1,-1},
{ 0,-2, 2, 0, 0, 0, 0, 1,-1, 1,-1, 1,-1, 1,-1, 0, 0, 0, 0},
{ 0, 0, 0,-2, 2, 0, 0, 1,-1,-1, 1, 0, 0, 0, 0, 1,-1, 1,-1},
{ 0, 0, 0, 0, 0,-2, 2, 0, 0, 0, 0, 1,-1,-1, 1, 1,-1,-1, 1},
{ 0, 0, 0, 0, 0, 0, 0, 1,-1, 1,-1,-1, 1,-1, 1, 0, 0, 0, 0},
{ 0, 0, 0, 0, 0, 0, 0, 1,-1,-1, 1, 0, 0, 0, 0,-1, 1,-1, 1},
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-1,-1, 1,-1, 1, 1,-1},
{ 1,-2,-2,-2,-2,-2,-2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{ 0,-1,-1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,-1,-1,-1,-1},
{ 0,-1,-1,-1,-1, 2, 2, 2, 2, 2, 2,-1,-1,-1,-1,-1,-1,-1,-1}} */
// With weights
/* q^{c_{i}} = { 1/3, 1/18, 1/18, 1/18,
1/18, 1/18, 1/18, 1/36,
1/36, 1/36, 1/36, 1/36,
1/36, 1/36, 1/36, 1/36,
1/36, 1/36, 1/36 } */
// Which makes the transformation satisfy the following
// orthogonality condition:
// \sum_{i} q^{c_{i}} e_{ki} e_{li} = w_{k} \delta_{kl},
// where the weights are:
/* w_{i} = { 1, 1/3, 1/3, 1/3,
2/3, 4/9, 4/3, 1/9,
1/9, 1/9, 2/3, 2/3,
2/3, 2/9, 2/9, 2/9,
2, 4/9, 4/3 } */
// mass mode
mode[0] = n_a[ 0 * d_LBparams.numNodes + index]
+ n_a[ 1 * d_LBparams.numNodes + index] + n_a[ 2 * d_LBparams.numNodes + index]
+ n_a[ 3 * d_LBparams.numNodes + index] + n_a[ 4 * d_LBparams.numNodes + index]
+ n_a[ 5 * d_LBparams.numNodes + index] + n_a[ 6 * d_LBparams.numNodes + index]
+ n_a[ 7 * d_LBparams.numNodes + index] + n_a[ 8 * d_LBparams.numNodes + index]
+ n_a[ 9 * d_LBparams.numNodes + index] + n_a[10 * d_LBparams.numNodes + index]
+ n_a[11 * d_LBparams.numNodes + index] + n_a[12 * d_LBparams.numNodes + index]
+ n_a[13 * d_LBparams.numNodes + index] + n_a[14 * d_LBparams.numNodes + index]
+ n_a[15 * d_LBparams.numNodes + index] + n_a[16 * d_LBparams.numNodes + index]
+ n_a[17 * d_LBparams.numNodes + index] + n_a[18 * d_LBparams.numNodes + index];
// momentum modes
mode[1] = (n_a[ 1 * d_LBparams.numNodes + index] - n_a[ 2 * d_LBparams.numNodes + index])
+ (n_a[ 7 * d_LBparams.numNodes + index] - n_a[ 8 * d_LBparams.numNodes + index])
+ (n_a[ 9 * d_LBparams.numNodes + index] - n_a[10 * d_LBparams.numNodes + index])
+ (n_a[11 * d_LBparams.numNodes + index] - n_a[12 * d_LBparams.numNodes + index])
+ (n_a[13 * d_LBparams.numNodes + index] - n_a[14 * d_LBparams.numNodes + index]);
mode[2] = (n_a[ 3 * d_LBparams.numNodes + index] - n_a[ 4 * d_LBparams.numNodes + index])
+ (n_a[ 7 * d_LBparams.numNodes + index] - n_a[ 8 * d_LBparams.numNodes + index])
- (n_a[ 9 * d_LBparams.numNodes + index] - n_a[10 * d_LBparams.numNodes + index])
+ (n_a[15 * d_LBparams.numNodes + index] - n_a[16 * d_LBparams.numNodes + index])
+ (n_a[17 * d_LBparams.numNodes + index] - n_a[18 * d_LBparams.numNodes + index]);
mode[3] = (n_a[ 5 * d_LBparams.numNodes + index] - n_a[ 6 * d_LBparams.numNodes + index])
+ (n_a[11 * d_LBparams.numNodes + index] - n_a[12 * d_LBparams.numNodes + index])
- (n_a[13 * d_LBparams.numNodes + index] - n_a[14 * d_LBparams.numNodes + index])
+ (n_a[15 * d_LBparams.numNodes + index] - n_a[16 * d_LBparams.numNodes + index])
- (n_a[17 * d_LBparams.numNodes + index] - n_a[18 * d_LBparams.numNodes + index]);
// stress modes
mode[4] = - n_a[ 0 * d_LBparams.numNodes + index]
+ n_a[ 7 * d_LBparams.numNodes + index] + n_a[ 8 * d_LBparams.numNodes + index]
+ n_a[ 9 * d_LBparams.numNodes + index] + n_a[10 * d_LBparams.numNodes + index]
+ n_a[11 * d_LBparams.numNodes + index] + n_a[12 * d_LBparams.numNodes + index]
+ n_a[13 * d_LBparams.numNodes + index] + n_a[14 * d_LBparams.numNodes + index]
+ n_a[15 * d_LBparams.numNodes + index] + n_a[16 * d_LBparams.numNodes + index]
+ n_a[17 * d_LBparams.numNodes + index] + n_a[18 * d_LBparams.numNodes + index];
mode[5] = (n_a[ 1 * d_LBparams.numNodes + index] + n_a[ 2 * d_LBparams.numNodes + index])
- (n_a[ 3 * d_LBparams.numNodes + index] + n_a[ 4 * d_LBparams.numNodes + index])
+ (n_a[11 * d_LBparams.numNodes + index] + n_a[12 * d_LBparams.numNodes + index])
+ (n_a[13 * d_LBparams.numNodes + index] + n_a[14 * d_LBparams.numNodes + index])
- (n_a[15 * d_LBparams.numNodes + index] + n_a[16 * d_LBparams.numNodes + index])
- (n_a[17 * d_LBparams.numNodes + index] + n_a[18 * d_LBparams.numNodes + index]);
mode[6] = (n_a[ 1 * d_LBparams.numNodes + index] + n_a[ 2 * d_LBparams.numNodes + index])
+ (n_a[ 3 * d_LBparams.numNodes + index] + n_a[ 4 * d_LBparams.numNodes + index])
- (n_a[11 * d_LBparams.numNodes + index] + n_a[12 * d_LBparams.numNodes + index])
- (n_a[13 * d_LBparams.numNodes + index] + n_a[14 * d_LBparams.numNodes + index])
- (n_a[15 * d_LBparams.numNodes + index] + n_a[16 * d_LBparams.numNodes + index])
- (n_a[17 * d_LBparams.numNodes + index] + n_a[18 * d_LBparams.numNodes + index])
- 2.0f*( (n_a[5 * d_LBparams.numNodes + index] + n_a[ 6 * d_LBparams.numNodes + index])
- (n_a[7 * d_LBparams.numNodes + index] + n_a[ 8 * d_LBparams.numNodes + index])
- (n_a[9 * d_LBparams.numNodes + index] + n_a[10 * d_LBparams.numNodes + index]));
mode[7] = (n_a[7 * d_LBparams.numNodes + index] + n_a[ 8 * d_LBparams.numNodes + index])
- (n_a[9 * d_LBparams.numNodes + index] + n_a[10 * d_LBparams.numNodes + index]);
mode[8] = (n_a[11 * d_LBparams.numNodes + index] + n_a[12 * d_LBparams.numNodes + index])
- (n_a[13 * d_LBparams.numNodes + index] + n_a[14 * d_LBparams.numNodes + index]);
mode[9] = (n_a[15 * d_LBparams.numNodes + index] + n_a[16 * d_LBparams.numNodes + index])
- (n_a[17 * d_LBparams.numNodes + index] + n_a[18 * d_LBparams.numNodes + index]);
// kinetic modes
mode[10] = - 2.0f*(n_a[ 1 * d_LBparams.numNodes + index] - n_a[ 2 * d_LBparams.numNodes + index])
+ (n_a[ 7 * d_LBparams.numNodes + index] - n_a[ 8 * d_LBparams.numNodes + index])
+ (n_a[ 9 * d_LBparams.numNodes + index] - n_a[10 * d_LBparams.numNodes + index])
+ (n_a[11 * d_LBparams.numNodes + index] - n_a[12 * d_LBparams.numNodes + index])
+ (n_a[13 * d_LBparams.numNodes + index] - n_a[14 * d_LBparams.numNodes + index]);
mode[11] = - 2.0f*(n_a[ 3 * d_LBparams.numNodes + index] - n_a[ 4 * d_LBparams.numNodes + index])
+ (n_a[ 7 * d_LBparams.numNodes + index] - n_a[ 8 * d_LBparams.numNodes + index])
- (n_a[ 9 * d_LBparams.numNodes + index] - n_a[10 * d_LBparams.numNodes + index])
+ (n_a[15 * d_LBparams.numNodes + index] - n_a[16 * d_LBparams.numNodes + index])
+ (n_a[17 * d_LBparams.numNodes + index] - n_a[18 * d_LBparams.numNodes + index]);
mode[12] = - 2.0f*(n_a[ 5 * d_LBparams.numNodes + index] - n_a[ 6 * d_LBparams.numNodes + index])
+ (n_a[11 * d_LBparams.numNodes + index] - n_a[12 * d_LBparams.numNodes + index])
- (n_a[13 * d_LBparams.numNodes + index] - n_a[14 * d_LBparams.numNodes + index])
+ (n_a[15 * d_LBparams.numNodes + index] - n_a[16 * d_LBparams.numNodes + index])
- (n_a[17 * d_LBparams.numNodes + index] - n_a[18 * d_LBparams.numNodes + index]);
mode[13] = (n_a[ 7 * d_LBparams.numNodes + index] - n_a[ 8 * d_LBparams.numNodes + index])
+ (n_a[ 9 * d_LBparams.numNodes + index] - n_a[10 * d_LBparams.numNodes + index])
- (n_a[11 * d_LBparams.numNodes + index] - n_a[12 * d_LBparams.numNodes + index])
- (n_a[13 * d_LBparams.numNodes + index] - n_a[14 * d_LBparams.numNodes + index]);
mode[14] = (n_a[ 7 * d_LBparams.numNodes + index] - n_a[ 8 * d_LBparams.numNodes + index])
- (n_a[ 9 * d_LBparams.numNodes + index] - n_a[10 * d_LBparams.numNodes + index])
- (n_a[15 * d_LBparams.numNodes + index] - n_a[16 * d_LBparams.numNodes + index])
- (n_a[17 * d_LBparams.numNodes + index] - n_a[18 * d_LBparams.numNodes + index]);
mode[15] = (n_a[11 * d_LBparams.numNodes + index] - n_a[12 * d_LBparams.numNodes + index])
- (n_a[13 * d_LBparams.numNodes + index] - n_a[14 * d_LBparams.numNodes + index])
- (n_a[15 * d_LBparams.numNodes + index] - n_a[16 * d_LBparams.numNodes + index])
+ (n_a[17 * d_LBparams.numNodes + index] - n_a[18 * d_LBparams.numNodes + index]);
mode[16] = n_a[ 0 * d_LBparams.numNodes + index]
+ n_a[ 7 * d_LBparams.numNodes + index] + n_a[ 8 * d_LBparams.numNodes + index]
+ n_a[ 9 * d_LBparams.numNodes + index] + n_a[10 * d_LBparams.numNodes + index]
+ n_a[11 * d_LBparams.numNodes + index] + n_a[12 * d_LBparams.numNodes + index]
+ n_a[13 * d_LBparams.numNodes + index] + n_a[14 * d_LBparams.numNodes + index]
+ n_a[15 * d_LBparams.numNodes + index] + n_a[16 * d_LBparams.numNodes + index]
+ n_a[17 * d_LBparams.numNodes + index] + n_a[18 * d_LBparams.numNodes + index]
- 2.0f*( (n_a[1 * d_LBparams.numNodes + index] + n_a[2 * d_LBparams.numNodes + index])
+ (n_a[3 * d_LBparams.numNodes + index] + n_a[4 * d_LBparams.numNodes + index])
+ (n_a[5 * d_LBparams.numNodes + index] + n_a[6 * d_LBparams.numNodes + index]));
mode[17] = - (n_a[ 1 * d_LBparams.numNodes + index] + n_a[ 2 * d_LBparams.numNodes + index])
+ (n_a[ 3 * d_LBparams.numNodes + index] + n_a[ 4 * d_LBparams.numNodes + index])
+ (n_a[11 * d_LBparams.numNodes + index] + n_a[12 * d_LBparams.numNodes + index])
+ (n_a[13 * d_LBparams.numNodes + index] + n_a[14 * d_LBparams.numNodes + index])
- (n_a[15 * d_LBparams.numNodes + index] + n_a[16 * d_LBparams.numNodes + index])
- (n_a[17 * d_LBparams.numNodes + index] + n_a[18 * d_LBparams.numNodes + index]);
mode[18] = - (n_a[ 1 * d_LBparams.numNodes + index] + n_a[ 2 * d_LBparams.numNodes + index])
- (n_a[ 3 * d_LBparams.numNodes + index] + n_a[ 4 * d_LBparams.numNodes + index])
- (n_a[11 * d_LBparams.numNodes + index] + n_a[12 * d_LBparams.numNodes + index])
- (n_a[13 * d_LBparams.numNodes + index] + n_a[14 * d_LBparams.numNodes + index])
- (n_a[15 * d_LBparams.numNodes + index] + n_a[16 * d_LBparams.numNodes + index])
- (n_a[17 * d_LBparams.numNodes + index] + n_a[18 * d_LBparams.numNodes + index])
+ 2.0f*( (n_a[5 * d_LBparams.numNodes + index] + n_a[ 6 * d_LBparams.numNodes + index])
+ (n_a[7 * d_LBparams.numNodes + index] + n_a[ 8 * d_LBparams.numNodes + index])
+ (n_a[9 * d_LBparams.numNodes + index] + n_a[10 * d_LBparams.numNodes + index]));
}
__device__ void update_rho_v (unsigned int index, float *mode, float *ext_forces, LB_rho_v *d_v) {
float Rho_tot = 0.0f;
float u_tot[3] = {0.0f,0.0f,0.0f};
// Note:
// Remember that the populations are stored as differences to their equilibrium values.
// Quantities are calculated in LB units rather than MD units (cf. ESPResSo)
//d_v[index].rho[ii] = mode[0 +ii*LBQ] + para.rho[ii]*para.agrid*para.agrid*para.agrid;
//Rho_tot += mode[0+ii*LBQ] + para.rho[ii]*para.agrid*para.agrid*para.agrid;
d_v[index].rho = mode[0] + d_LBparams.rho;
Rho_tot += mode[0] + d_LBparams.rho;
u_tot[0] += mode[1];
u_tot[1] += mode[2];
u_tot[2] += mode[3];
/** if forces are present, the momentum density is redefined to
* inlcude one half-step of the force action. See the
* Chapman-Enskog expansion in [Ladd & Verberg]. */
u_tot[0] += 0.5f*ext_forces[0*d_LBparams.numNodes + index];
u_tot[1] += 0.5f*ext_forces[1*d_LBparams.numNodes + index];
u_tot[2] += 0.5f*ext_forces[2*d_LBparams.numNodes + index];
u_tot[0] /= Rho_tot;
u_tot[1] /= Rho_tot;
u_tot[2] /= Rho_tot;
d_v[index].v[0] = u_tot[0];
d_v[index].v[1] = u_tot[1];
d_v[index].v[2] = u_tot[2];
}
__device__ void relax_modes (unsigned int index, float *ext_forces, LB_rho_v *d_v, float *mode) {
float Rho;
float j[3];
float modes_from_pi_eq[6];
float u_tot[3] = {0.0f,0.0f,0.0f};
update_rho_v (index, mode, ext_forces, d_v);
Rho = mode[0] + d_LBparams.rho;
float inv_Rho = 1.0 / Rho;
u_tot[0] = d_v[index].v[0];
u_tot[1] = d_v[index].v[1];
u_tot[2] = d_v[index].v[2];
j[0] = Rho * u_tot[0];
j[1] = Rho * u_tot[1];
j[2] = Rho * u_tot[2];
/** equilibrium part of the stress modes (eq13 schiller)*/
modes_from_pi_eq[0] = ((j[0]*j[0])+(j[1]*j[1])+(j[2]*j[2])) * inv_Rho;
modes_from_pi_eq[1] = ((j[0]*j[0])-(j[1]*j[1])) * inv_Rho;
modes_from_pi_eq[2] = (((j[0]*j[0])+(j[1]*j[1])+(j[2]*j[2])) - 3.0f*(j[2]*j[2])) * inv_Rho;
modes_from_pi_eq[3] = j[0]*j[1] * inv_Rho;
modes_from_pi_eq[4] = j[0]*j[2] * inv_Rho;
modes_from_pi_eq[5] = j[1]*j[2] * inv_Rho;
/** relax the stress modes (eq14 schiller)*/
mode[4] = modes_from_pi_eq[0] + d_LBparams.gammaBulk * (mode[4] - modes_from_pi_eq[0]);
mode[5] = modes_from_pi_eq[1] + d_LBparams.gammaShear * (mode[5] - modes_from_pi_eq[1]);
mode[6] = modes_from_pi_eq[2] + d_LBparams.gammaShear * (mode[6] - modes_from_pi_eq[2]);
mode[7] = modes_from_pi_eq[3] + d_LBparams.gammaShear * (mode[7] - modes_from_pi_eq[3]);
mode[8] = modes_from_pi_eq[4] + d_LBparams.gammaShear * (mode[8] - modes_from_pi_eq[4]);
mode[9] = modes_from_pi_eq[5] + d_LBparams.gammaShear * (mode[9] - modes_from_pi_eq[5]);
/** relax the ghost modes (project them out) */
/** ghost modes have no equilibrium part due to orthogonality */
mode[10] = d_LBparams.gammaOdd*mode[10];
mode[11] = d_LBparams.gammaOdd*mode[11];
mode[12] = d_LBparams.gammaOdd*mode[12];
mode[13] = d_LBparams.gammaOdd*mode[13];
mode[14] = d_LBparams.gammaOdd*mode[14];
mode[15] = d_LBparams.gammaOdd*mode[15];
mode[16] = d_LBparams.gammaEven*mode[16];
mode[17] = d_LBparams.gammaEven*mode[17];
mode[18] = d_LBparams.gammaEven*mode[18];
}
__device__ void reset_LB_forces (unsigned int index, float *ext_forces) {
ext_forces[ index] = d_LBparams.extForceDensity[0];
ext_forces[ d_LBparams.numNodes + index] = d_LBparams.extForceDensity[1];
ext_forces[2*d_LBparams.numNodes + index] = d_LBparams.extForceDensity[2];
}
__device__ void apply_forces (unsigned int index, float *ext_forces, LB_rho_v *d_v, float *mode) {
float u[3] = {0.0f, 0.0f, 0.0f},
C[6] = {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
// Note: the values d_v were calculated in relax_modes()
u[0] = d_v[index].v[0];
u[1] = d_v[index].v[1];
u[2] = d_v[index].v[2];
C[0] += (1.0f + d_LBparams.gammaBulk) * u[0]*ext_forces[0*d_LBparams.numNodes + index] +
1.0f/3.0f * (d_LBparams.gammaBulk-d_LBparams.gammaShear) * (u[0]*ext_forces[0*d_LBparams.numNodes + index] +
u[1]*ext_forces[1*d_LBparams.numNodes + index] + u[2]*ext_forces[2*d_LBparams.numNodes + index]);
C[2] += (1.0f + d_LBparams.gammaBulk) * u[1]*ext_forces[1*d_LBparams.numNodes + index] +
1.0f/3.0f * (d_LBparams.gammaBulk-d_LBparams.gammaShear) * (u[0]*ext_forces[0*d_LBparams.numNodes + index] +
u[1]*ext_forces[1*d_LBparams.numNodes + index] + u[2]*ext_forces[2*d_LBparams.numNodes + index]);
C[5] += (1.0f + d_LBparams.gammaBulk) * u[2]*ext_forces[2*d_LBparams.numNodes + index] +
1.0f/3.0f * (d_LBparams.gammaBulk-d_LBparams.gammaShear) * (u[0]*ext_forces[0*d_LBparams.numNodes + index] +
u[1]*ext_forces[1*d_LBparams.numNodes + index] + u[2]*ext_forces[2*d_LBparams.numNodes + index]);
C[1] += 1.0f/2.0f * (1.0f+d_LBparams.gammaShear) * (u[0]*ext_forces[1*d_LBparams.numNodes + index] +
u[1]*ext_forces[0*d_LBparams.numNodes + index]);
C[3] += 1.0f/2.0f * (1.0f+d_LBparams.gammaShear) * (u[0]*ext_forces[2*d_LBparams.numNodes + index] +
u[2]*ext_forces[0*d_LBparams.numNodes + index]);
C[4] += 1.0f/2.0f * (1.0f+d_LBparams.gammaShear) * (u[1]*ext_forces[2*d_LBparams.numNodes + index] +
u[2]*ext_forces[1*d_LBparams.numNodes + index]);
/** update momentum modes */
mode[1] += ext_forces[0*d_LBparams.numNodes + index];
mode[2] += ext_forces[1*d_LBparams.numNodes + index];
mode[3] += ext_forces[2*d_LBparams.numNodes + index];
/** update stress modes */
mode[4] += C[0] + C[2] + C[5];
mode[5] += C[0] - C[2];
mode[6] += C[0] + C[2] - 2.0f*C[5];
mode[7] += C[1];
mode[8] += C[3];
mode[9] += C[4];
// Note: Body forces are reset in coupling.cu
//reset_LB_forces (index, ext_forces);
}
__device__ void normalize_modes (float* mode) {
/** normalization factors enter in the back transformation */
mode[ 0] *= 1.0f;
mode[ 1] *= 3.0f;
mode[ 2] *= 3.0f;
mode[ 3] *= 3.0f;
mode[ 4] *= 3.0f/2.0f;
mode[ 5] *= 9.0f/4.0f;
mode[ 6] *= 3.0f/4.0f;
mode[ 7] *= 9.0f;
mode[ 8] *= 9.0f;
mode[ 9] *= 9.0f;
mode[10] *= 3.0f/2.0f;
mode[11] *= 3.0f/2.0f;
mode[12] *= 3.0f/2.0f;
mode[13] *= 9.0f/2.0f;
mode[14] *= 9.0f/2.0f;
mode[15] *= 9.0f/2.0f;
mode[16] *= 1.0f/2.0f;
mode[17] *= 9.0f/4.0f;
mode[18] *= 3.0f/4.0f;
}
__device__ void calc_n_from_modes_push (unsigned int index, float *mode, float *n_b) {
unsigned int xyz[3];
index_to_xyz (index, xyz);
unsigned int x = xyz[0];
unsigned int y = xyz[1];
unsigned int z = xyz[2];
n_b[0*d_LBparams.numNodes + x + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/3.0f * (mode[0] - mode[4] + mode[16]);
n_b[1*d_LBparams.numNodes + (x+1)%d_LBparams.dimX + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/18.0f * (mode[0] + mode[1] + mode[5] + mode[6] - mode[17] - mode[18] -2.0f*(mode[10] + mode[16]));
n_b[2*d_LBparams.numNodes + (d_LBparams.dimX + x-1)%d_LBparams.dimX + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/18.0f * (mode[0] - mode[1] + mode[5] + mode[6] - mode[17] - mode[18] + 2.0f*(mode[10] - mode[16]));
n_b[3*d_LBparams.numNodes + x + d_LBparams.dimX*((y+1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/18.0f * (mode[0] + mode[2] - mode[5] + mode[6] + mode[17] - mode[18]- 2.0f*(mode[11] + mode[16]));
n_b[4*d_LBparams.numNodes + x + d_LBparams.dimX*((d_LBparams.dimY+y-1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/18.0f * (mode[0] - mode[2] - mode[5] + mode[6] + mode[17] - mode[18] + 2.0f*(mode[11] - mode[16]));
n_b[5*d_LBparams.numNodes + x + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*((z+1)%d_LBparams.dimZ)] =
1.0f/18.0f * (mode[0] + mode[3] - 2.0f*(mode[6] + mode[12] + mode[16] - mode[18]));
n_b[6*d_LBparams.numNodes + x + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*((d_LBparams.dimZ+z-1)%d_LBparams.dimZ)] =
1.0f/18.0f * (mode[0] - mode[3] - 2.0f*(mode[6] - mode[12] + mode[16] - mode[18]));
n_b[7*d_LBparams.numNodes + (x+1)%d_LBparams.dimX + d_LBparams.dimX*((y+1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/36.0f * (mode[0] + mode[1] + mode[2] + mode[4] + 2.0f*mode[6] + mode[7] + mode[10] + mode[11] + mode[13] + mode[14] + mode[16] + 2.0f*mode[18]);
n_b[8*d_LBparams.numNodes + (d_LBparams.dimX+x-1)%d_LBparams.dimX + d_LBparams.dimX*((d_LBparams.dimY+y-1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/36.0f * (mode[0] - mode[1] - mode[2] + mode[4] + 2.0f*mode[6] + mode[7] - mode[10] - mode[11] - mode[13] - mode[14] + mode[16] +
2.0f*mode[18]);
n_b[9*d_LBparams.numNodes + (x+1)%d_LBparams.dimX + d_LBparams.dimX*((d_LBparams.dimY+y-1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/36.0f * (mode[0] + mode[1] - mode[2] + mode[4] + 2.0f*mode[6] - mode[7] + mode[10] - mode[11] + mode[13] - mode[14] + mode[16] + 2.0f*mode[18]);
n_b[10*d_LBparams.numNodes + (d_LBparams.dimX+x-1)%d_LBparams.dimX + d_LBparams.dimX*((y+1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*z] =
1.0f/36.0f * (mode[0] - mode[1] + mode[2] + mode[4] + 2.0f*mode[6] - mode[7] - mode[10] + mode[11] - mode[13] + mode[14] + mode[16] + 2.0f*mode[18]);
n_b[11*d_LBparams.numNodes + (x+1)%d_LBparams.dimX + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*((z+1)%d_LBparams.dimZ)] =
1.0f/36.0f * (mode[0] + mode[1] + mode[3] + mode[4] + mode[5] - mode[6] + mode[8] + mode[10] + mode[12] - mode[13] + mode[15] + mode[16] + mode[17] - mode[18]);
n_b[12*d_LBparams.numNodes + (d_LBparams.dimX+x-1)%d_LBparams.dimX + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*((d_LBparams.dimZ+z-1)%d_LBparams.dimZ)] =
1.0f/36.0f * (mode[0] - mode[1] - mode[3] + mode[4] + mode[5] - mode[6] + mode[8] - mode[10] - mode[12] + mode[13] - mode[15] + mode[16] + mode[17] - mode[18]);
n_b[13*d_LBparams.numNodes + (x+1)%d_LBparams.dimX + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*((d_LBparams.dimZ+z-1)%d_LBparams.dimZ)] =
1.0f/36.0f * (mode[0] + mode[1] - mode[3] + mode[4] + mode[5] - mode[6] - mode[8] + mode[10] - mode[12] - mode[13] - mode[15] + mode[16] + mode[17] - mode[18]);
n_b[14*d_LBparams.numNodes + (d_LBparams.dimX+x-1)%d_LBparams.dimX + d_LBparams.dimX*y + d_LBparams.dimX*d_LBparams.dimY*((z+1)%d_LBparams.dimZ)] =
1.0f/36.0f * (mode[0] - mode[1] + mode[3] + mode[4] + mode[5] - mode[6] - mode[8] - mode[10] + mode[12] + mode[13] + mode[15] + mode[16] + mode[17] - mode[18]);
n_b[15*d_LBparams.numNodes + x + d_LBparams.dimX*((y+1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*((z+1)%d_LBparams.dimZ)] =
1.0f/36.0f * (mode[0] + mode[2] + mode[3] + mode[4] - mode[5] - mode[6] + mode[9] + mode[11] + mode[12] - mode[14] - mode[15] + mode[16] - mode[17] - mode[18]);
n_b[16*d_LBparams.numNodes + x + d_LBparams.dimX*((d_LBparams.dimY+y-1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*((d_LBparams.dimZ+z-1)%d_LBparams.dimZ)] =
1.0f/36.0f * (mode[0] - mode[2] - mode[3] + mode[4] - mode[5] - mode[6] + mode[9] - mode[11] - mode[12] + mode[14] + mode[15] + mode[16] - mode[17] - mode[18]);
n_b[17*d_LBparams.numNodes + x + d_LBparams.dimX*((y+1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*((d_LBparams.dimZ+z-1)%d_LBparams.dimZ)] =
1.0f/36.0f * (mode[0] + mode[2]- mode[3] + mode[4] - mode[5] - mode[6] - mode[9] + mode[11] - mode[12] - mode[14] + mode[15] + mode[16] - mode[17] - mode[18]);
n_b[18*d_LBparams.numNodes + x + d_LBparams.dimX*((d_LBparams.dimY+y-1)%d_LBparams.dimY) + d_LBparams.dimX*d_LBparams.dimY*((z+1)%d_LBparams.dimZ)] =
1.0f/36.0f * (mode[0] - mode[2] + mode[3] + mode[4] - mode[5] - mode[6] - mode[9] - mode[11] + mode[12] + mode[14] - mode[15] + mode[16] - mode[17] - mode[18]);
}
// Note: suffix f
__device__ void bounce_back_boundaries (unsigned int index, unsigned int *boundary_map, float *boundary_velocities, float *n_curr, float *devBoundaryForces) {
unsigned int boundaryIndex;
float v[3];
unsigned int xyz[3];
float shift;
float weight;
int c[3];
float pop_to_bounce_back;
unsigned int population;
size_t to_index, to_index_x, to_index_y, to_index_z;
float boundary_force[3] = {0.0f,0.0f,0.0f};
unsigned int inverse;
boundaryIndex = boundary_map[index];
if (boundaryIndex != 0)
{
// Version 1: can assign a velocity value to each boundary
//v[0] = boundary_velocities[(boundaryIndex-1)*3 + 0];
//v[1] = boundary_velocities[(boundaryIndex-1)*3 + 1];
//v[2] = boundary_velocities[(boundaryIndex-1)*3 + 2];
// Version 2: only allow walls in the y direction to move
if (boundaryIndex == 1) {
v[0] = -0.5f * d_LBparams.boundaryVelocity[0];
v[1] = 0.0f;
v[2] = 0.0f;
} else if (boundaryIndex == 2) {
v[0] = 0.5f * d_LBparams.boundaryVelocity[0];
v[1] = 0.0f;
v[2] = 0.0f;
} else {
v[0] = 0.0f;
v[1] = 0.0f;
v[2] = 0.0f;
}
index_to_xyz (index, xyz);
unsigned int x = xyz[0];
unsigned int y = xyz[1];
unsigned int z = xyz[2];
// TODO : PUT IN EQUILIBRIUM CONTRIBUTION TO THE BOUNCE-BACK DENSITY FOR THE BOUNDARY FORCE
// TODO : INITIALIZE BOUNDARY FORCE PROPERLY, HAS NONZERO ELEMENTS IN FIRST STEP
// TODO : SET INTERNAL BOUNDARY NODE VALUES TO ZERO
// Note:
// I have followed ESPResSo 4.1.2 and modified some of following code.
// I am still not sure if thoses prefactors, agrid and tau, should appear or not!
// The following macro just serves as text replacement !
#define BOUNCEBACK() \
shift = 2.0f * weight * d_LBparams.rho * (v[0]*c[0]+v[1]*c[1]+v[2]*c[2]) * 3.0f / d_LBparams.agrid * d_LBparams.tau; \
pop_to_bounce_back = n_curr[population*d_LBparams.numNodes + index]; \
to_index_x = (x+c[0]+d_LBparams.dimX) % d_LBparams.dimX; \
to_index_y = (y+c[1]+d_LBparams.dimY) % d_LBparams.dimY; \
to_index_z = (z+c[2]+d_LBparams.dimZ) % d_LBparams.dimZ; \
to_index = to_index_x + d_LBparams.dimX*to_index_y + d_LBparams.dimX*d_LBparams.dimY*to_index_z; \
if (boundary_map[to_index] == 0) { \
boundary_force[0] += (2.0f * pop_to_bounce_back + shift) * c[0]; \
boundary_force[1] += (2.0f * pop_to_bounce_back + shift) * c[1]; \
boundary_force[2] += (2.0f * pop_to_bounce_back + shift) * c[2]; \
n_curr[inverse*d_LBparams.numNodes + to_index] = pop_to_bounce_back + shift; \
}
// Note:
// to_index: destination node
// A minus sign is absorbed into the population velocity c, so the term pop_to_bounce_back + shift
// appears in the code above rather than pop_to_bounce_back - shift
// the resting population does nothing, i.e., population 0.
c[0]= 1;c[1]= 0;c[2]= 0; weight=1./18.; population= 2; inverse= 1;
BOUNCEBACK();
c[0]=-1;c[1]= 0;c[2]= 0; weight=1./18.; population= 1; inverse= 2;
BOUNCEBACK();
c[0]= 0;c[1]= 1;c[2]= 0; weight=1./18.; population= 4; inverse= 3;
BOUNCEBACK();
c[0]= 0;c[1]=-1;c[2]= 0; weight=1./18.; population= 3; inverse= 4;
BOUNCEBACK();
c[0]= 0;c[1]= 0;c[2]= 1; weight=1./18.; population= 6; inverse= 5;
BOUNCEBACK();
c[0]= 0;c[1]= 0;c[2]=-1; weight=1./18.; population= 5; inverse= 6;
BOUNCEBACK();
c[0]= 1;c[1]= 1;c[2]= 0; weight=1./36.; population= 8; inverse= 7;
BOUNCEBACK();
c[0]=-1;c[1]=-1;c[2]= 0; weight=1./36.; population= 7; inverse= 8;
BOUNCEBACK();
c[0]= 1;c[1]=-1;c[2]= 0; weight=1./36.; population=10; inverse= 9;
BOUNCEBACK();
c[0]=-1;c[1]= 1;c[2]= 0; weight=1./36.; population= 9; inverse=10;
BOUNCEBACK();
c[0]= 1;c[1]= 0;c[2]= 1; weight=1./36.; population=12; inverse=11;
BOUNCEBACK();
c[0]=-1;c[1]= 0;c[2]=-1; weight=1./36.; population=11; inverse=12;
BOUNCEBACK();
c[0]= 1;c[1]= 0;c[2]=-1; weight=1./36.; population=14; inverse=13;
BOUNCEBACK();
c[0]=-1;c[1]= 0;c[2]= 1; weight=1./36.; population=13; inverse=14;
BOUNCEBACK();
c[0]= 0;c[1]= 1;c[2]= 1; weight=1./36.; population=16; inverse=15;
BOUNCEBACK();
c[0]= 0;c[1]=-1;c[2]=-1; weight=1./36.; population=15; inverse=16;
BOUNCEBACK();
c[0]= 0;c[1]= 1;c[2]=-1; weight=1./36.; population=18; inverse=17;
BOUNCEBACK();
c[0]= 0;c[1]=-1;c[2]= 1; weight=1./36.; population=17; inverse=18;
BOUNCEBACK();
atomicAdd(&devBoundaryForces[(boundaryIndex-1)*3 + 0], boundary_force[0]);
atomicAdd(&devBoundaryForces[(boundaryIndex-1)*3 + 1], boundary_force[1]);
atomicAdd(&devBoundaryForces[(boundaryIndex-1)*3 + 2], boundary_force[2]);
}
}
__device__ void calc_values_in_LB_units (unsigned int index, unsigned int print_index, unsigned int *boundary_map, float *mode, float *ext_forces, LB_rho_v *d_v, LB_rho_v_pi *d_p_v) {
float j[3];
float modes_from_pi_eq[6];
float pi[6]={0.0f,0.0f,0.0f,0.0f,0.0f,0.0f};
if (boundary_map[index] == 0) {
/* Ensure we are working with the current values of d_v */
update_rho_v (index, mode, ext_forces, d_v);
d_p_v[print_index].rho = d_v[index].rho;
d_p_v[print_index].v[0] = d_v[index].v[0];
d_p_v[print_index].v[1] = d_v[index].v[1];
d_p_v[print_index].v[2] = d_v[index].v[2];
/* stress calculation */
float Rho = d_v[index].rho;
float inv_Rho = 1.0 / Rho;
/* note that d_v[index].v[] already includes the 1/2 f term, accounting for the pre- and post-collisional average */
j[0] = Rho * d_v[index].v[0];
j[1] = Rho * d_v[index].v[1];
j[2] = Rho * d_v[index].v[2];
// equilibrium part of the stress modes, which comes from
// the equality between modes and stress tensor components
/* m4 = trace(pi) - rho
m5 = pi_xx - pi_yy
m6 = trace(pi) - 3 pi_zz
m7 = pi_xy
m8 = pi_xz
m9 = pi_yz */
// and pluggin in the Euler stress for the equilibrium:
// pi_eq = rho_0*c_s^2*I3 + (j \otimes j)/rho
// with I3 the 3D identity matrix and
// rho = \trace(rho_0*c_s^2*I3), which yields
/* m4_from_pi_eq = j.j
m5_from_pi_eq = j_x*j_x - j_y*j_y
m6_from_pi_eq = j.j - 3*j_z*j_z
m7_from_pi_eq = j_x*j_y
m8_from_pi_eq = j_x*j_z
m9_from_pi_eq = j_y*j_z */
// where the / Rho term has been dropped. We thus obtain:
modes_from_pi_eq[0] = (j[0]*j[0] + j[1]*j[1] + j[2]*j[2] ) * inv_Rho;
modes_from_pi_eq[1] = (j[0]*j[0] - j[1]*j[1] ) * inv_Rho;
modes_from_pi_eq[2] = (j[0]*j[0] + j[1]*j[1] + j[2]*j[2] - 3.0*j[2]*j[2]) * inv_Rho;
modes_from_pi_eq[3] = j[0]*j[1] * inv_Rho;
modes_from_pi_eq[4] = j[0]*j[2] * inv_Rho;
modes_from_pi_eq[5] = j[1]*j[2] * inv_Rho;
/* Now we must predict the outcome of the next collision */
/* We immediately average pre- and post-collision. */
/* TODO: need a reference for this. */
mode[4] = modes_from_pi_eq[0] + (0.5 + 0.5*d_LBparams.gammaBulk) * (mode[4] - modes_from_pi_eq[0]);
mode[5] = modes_from_pi_eq[1] + (0.5 + 0.5*d_LBparams.gammaShear) * (mode[5] - modes_from_pi_eq[1]);
mode[6] = modes_from_pi_eq[2] + (0.5 + 0.5*d_LBparams.gammaShear) * (mode[6] - modes_from_pi_eq[2]);
mode[7] = modes_from_pi_eq[3] + (0.5 + 0.5*d_LBparams.gammaShear) * (mode[7] - modes_from_pi_eq[3]);
mode[8] = modes_from_pi_eq[4] + (0.5 + 0.5*d_LBparams.gammaShear) * (mode[8] - modes_from_pi_eq[4]);
mode[9] = modes_from_pi_eq[5] + (0.5 + 0.5*d_LBparams.gammaShear) * (mode[9] - modes_from_pi_eq[5]);
// Transform the stress tensor components according to the modes that
// correspond to those used by U. Schiller. In terms of populations this
// expression then corresponds exactly to those in Eqs. 116 - 121 in the
// Duenweg and Ladd paper, when these are written out in populations.
// But to ensure this, the expression in Schiller's modes has to be different!
pi[0] += (2.0f*(mode[0] + mode[4]) + mode[6] + 3.0f*mode[5]) / 6.0f; // xx
pi[1] += mode[7]; // xy
pi[2] += (2.0f*(mode[0] + mode[4]) + mode[6] - 3.0f*mode[5]) / 6.0f; // yy
pi[3] += mode[8]; // xz
pi[4] += mode[9]; // yz
pi[5] += (mode[0] + mode[4] - mode[6]) / 3.0f; // zz
for(int i=0; i < 6; i++) {
d_p_v[print_index].pi[i] = pi[i];
}
} else {
d_p_v[print_index].rho = 0.0f;
for (int i=0; i < 3; i++)
d_p_v[print_index].v[i] = 0.0f;
for(int i=0; i < 6; i++)
d_p_v[print_index].pi[i] = 0.0f;
}
}
__global__ void Print_dev_LB_Params() {
printf("\nDim_x Dim_y Dim_z Num_of_nodes\n");
printf("%u %u %u %u\n",d_LBparams.dimX, d_LBparams.dimY, d_LBparams.dimZ, d_LBparams.numNodes);
printf("Num_of_boundaries Boundary_vel.x Boundary_vel.y Boundary_vel.z\n");
printf("%u %f %f %f\n",
d_LBparams.numBoundaries, d_LBparams.boundaryVelocity[0], d_LBparams.boundaryVelocity[1], d_LBparams.boundaryVelocity[2]);
printf("Ext_force_flag Ext_force_density.x Ext_force_density.y Ext_force_density.z\n");
printf("%u %f %f %f\n",
d_LBparams.extForceFlag, d_LBparams.extForceDensity[0], d_LBparams.extForceDensity[1], d_LBparams.extForceDensity[2]);
printf("Agrid Tau Rho\n");
printf("%f %f %f\n", d_LBparams.agrid, d_LBparams.tau, d_LBparams.rho);
printf("gamma_shear gamma_bulk gamma_odd gamma_even\n");
printf("%f %f %f %f\n", d_LBparams.gammaShear, d_LBparams.gammaBulk, d_LBparams.gammaOdd, d_LBparams.gammaEven);
printf("c_sound_sq = %f\n", c_sound_sq);
}
__global__ void PrintDevVariable () {
printf ("The variable dimY equals to %u\n", d_LBparams.dimY);
}
__global__ void InitializeBoundaryMap (unsigned int *boundary_map) {
unsigned int index = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
unsigned int xyz[3];
if (index < d_LBparams.numNodes) {
index_to_xyz (index, xyz);
unsigned int x = xyz[0];
unsigned int y = xyz[1];
unsigned int z = xyz[2];
if (d_LBparams.numBoundaries >= 2) {
if (y==0) {
boundary_map[index] = 1;
} else if (y==(d_LBparams.dimY-1)) {
boundary_map[index] = 2;
}
}
if (d_LBparams.numBoundaries == 4) {
if (z==0) {
boundary_map[index] = 3;
} else if (z==(d_LBparams.dimZ-1)) {
boundary_map[index] = 4;
}
}
}
}
__global__ void InitializeBodyForces (float *ext_forces) {
unsigned int index = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
if (index < d_LBparams.numNodes) {
//ext_forces[0*d_LBparams.numNodes + index] = d_LBparams.extForceDensity[0];
//ext_forces[1*d_LBparams.numNodes + index] = d_LBparams.extForceDensity[1];
//ext_forces[2*d_LBparams.numNodes + index] = d_LBparams.extForceDensity[2];
ext_forces[ + index] = d_LBparams.extForceDensity[0];
ext_forces[ d_LBparams.numNodes + index] = d_LBparams.extForceDensity[1];
ext_forces[2*d_LBparams.numNodes + index] = d_LBparams.extForceDensity[2];
}
}
__global__ void calc_n_from_rho_j_pi (float *ext_forces, float *n_a, LB_rho_v *d_v) {
/* TODO: this can handle only a uniform density, something similar, but local,
has to be called every time the fields are set by the user ! */
unsigned int index = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
if (index < d_LBparams.numNodes)
{
float mode[19];
float Rho = d_LBparams.rho;
float v[3] = {0.0f, 0.0f, 0.0f};
float pi[6] = {Rho*c_sound_sq, 0.0f, Rho*c_sound_sq, 0.0f, 0.0f, Rho*c_sound_sq};
float rhoc_sq = Rho * c_sound_sq;
float avg_rho = d_LBparams.rho;
float local_rho, local_j[3], *local_pi, trace;
local_rho = Rho;
local_j[0] = Rho * v[0];
local_j[1] = Rho * v[1];
local_j[2] = Rho * v[2];
local_pi = pi;
/** reduce the pressure tensor to the part needed here.
NOTE: this not true anymore for SHANCHEN
if the densities are not uniform. FIXME*/
local_pi[0] -= rhoc_sq;
local_pi[2] -= rhoc_sq;
local_pi[5] -= rhoc_sq;
trace = local_pi[0] + local_pi[2] + local_pi[5];
float rho_times_coeff;
float tmp1,tmp2;
/** update the q=0 sublattice */
n_a[0 * d_LBparams.numNodes + index] = 1.0f/3.0f * (local_rho-avg_rho) - 1.0f/2.0f*trace;
/** update the q=1 sublattice */
rho_times_coeff = 1.0f/18.0f * (local_rho-avg_rho);
n_a[1 * d_LBparams.numNodes + index] = rho_times_coeff + 1.0f/6.0f*local_j[0] + 1.0f/4.0f*local_pi[0] - 1.0f/12.0f*trace;
n_a[2 * d_LBparams.numNodes + index] = rho_times_coeff - 1.0f/6.0f*local_j[0] + 1.0f/4.0f*local_pi[0] - 1.0f/12.0f*trace;
n_a[3 * d_LBparams.numNodes + index] = rho_times_coeff + 1.0f/6.0f*local_j[1] + 1.0f/4.0f*local_pi[2] - 1.0f/12.0f*trace;
n_a[4 * d_LBparams.numNodes + index] = rho_times_coeff - 1.0f/6.0f*local_j[1] + 1.0f/4.0f*local_pi[2] - 1.0f/12.0f*trace;
n_a[5 * d_LBparams.numNodes + index] = rho_times_coeff + 1.0f/6.0f*local_j[2] + 1.0f/4.0f*local_pi[5] - 1.0f/12.0f*trace;
n_a[6 * d_LBparams.numNodes + index] = rho_times_coeff - 1.0f/6.0f*local_j[2] + 1.0f/4.0f*local_pi[5] - 1.0f/12.0f*trace;
/** update the q=2 sublattice */
rho_times_coeff = 1.0f/36.0f * (local_rho-avg_rho);
tmp1 = local_pi[0] + local_pi[2];
tmp2 = 2.0f*local_pi[1];
n_a[ 7 * d_LBparams.numNodes+index] = rho_times_coeff + 1.0f/12.0f*(local_j[0]+local_j[1]) + 1.0f/8.0f*(tmp1+tmp2) - 1.0f/24.0f*trace;
n_a[ 8 * d_LBparams.numNodes+index] = rho_times_coeff - 1.0f/12.0f*(local_j[0]+local_j[1]) + 1.0f/8.0f*(tmp1+tmp2) - 1.0f/24.0f*trace;
n_a[ 9 * d_LBparams.numNodes+index] = rho_times_coeff + 1.0f/12.0f*(local_j[0]-local_j[1]) + 1.0f/8.0f*(tmp1-tmp2) - 1.0f/24.0f*trace;
n_a[10 * d_LBparams.numNodes+index] = rho_times_coeff - 1.0f/12.0f*(local_j[0]-local_j[1]) + 1.0f/8.0f*(tmp1-tmp2) - 1.0f/24.0f*trace;
tmp1 = local_pi[0] + local_pi[5];
tmp2 = 2.0f*local_pi[3];
n_a[11 * d_LBparams.numNodes + index] = rho_times_coeff + 1.0f/12.0f*(local_j[0]+local_j[2]) + 1.0f/8.0f*(tmp1+tmp2) - 1.0f/24.0f*trace;
n_a[12 * d_LBparams.numNodes + index] = rho_times_coeff - 1.0f/12.0f*(local_j[0]+local_j[2]) + 1.0f/8.0f*(tmp1+tmp2) - 1.0f/24.0f*trace;
n_a[13 * d_LBparams.numNodes + index] = rho_times_coeff + 1.0f/12.0f*(local_j[0]-local_j[2]) + 1.0f/8.0f*(tmp1-tmp2) - 1.0f/24.0f*trace;
n_a[14 * d_LBparams.numNodes + index] = rho_times_coeff - 1.0f/12.0f*(local_j[0]-local_j[2]) + 1.0f/8.0f*(tmp1-tmp2) - 1.0f/24.0f*trace;
tmp1 = local_pi[2] + local_pi[5];
tmp2 = 2.0f*local_pi[4];
n_a[15 * d_LBparams.numNodes + index] = rho_times_coeff + 1.0f/12.0f*(local_j[1]+local_j[2]) + 1.0f/8.0f*(tmp1+tmp2) - 1.0f/24.0f*trace;
n_a[16 * d_LBparams.numNodes + index] = rho_times_coeff - 1.0f/12.0f*(local_j[1]+local_j[2]) + 1.0f/8.0f*(tmp1+tmp2) - 1.0f/24.0f*trace;
n_a[17 * d_LBparams.numNodes + index] = rho_times_coeff + 1.0f/12.0f*(local_j[1]-local_j[2]) + 1.0f/8.0f*(tmp1-tmp2) - 1.0f/24.0f*trace;
n_a[18 * d_LBparams.numNodes + index] = rho_times_coeff - 1.0f/12.0f*(local_j[1]-local_j[2]) + 1.0f/8.0f*(tmp1-tmp2) - 1.0f/24.0f*trace;
/**set different seed for randomgen on every node */
// n_a.seed[index] = para.your_seed + index;
calc_m_from_n (index, n_a, mode);
update_rho_v (index, mode, ext_forces, d_v);
}
}
__global__ void integrate (float *n_a, float *ext_forces, LB_rho_v *d_v, float *n_b) {
unsigned int index = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
float mode[19]; // the 19 moments (modes) are only temporary register values
if (index < d_LBparams.numNodes) {
calc_m_from_n (index, n_a, mode);
relax_modes (index, ext_forces, d_v, mode);
apply_forces (index, ext_forces, d_v, mode);
normalize_modes (mode);
calc_n_from_modes_push (index, mode, n_b);
}
}
__global__ void apply_boundaries (unsigned int *boundary_map, float *boundary_velocities, float *n_curr, float *boundary_forces) {
unsigned int index = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x;
if (index < d_LBparams.numNodes) {
bounce_back_boundaries (index, boundary_map, boundary_velocities, n_curr, boundary_forces);
}
}
__global__ void get_mesoscopic_values_in_LB_units (float *n_a, unsigned int *boundary_map, float *ext_forces, LB_rho_v *d_v, LB_rho_v_pi *d_p_v) {
unsigned int index = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
if (index < d_LBparams.numNodes) {
float mode[19];
calc_m_from_n (index, n_a, mode);
calc_values_in_LB_units (index, index, boundary_map, mode, ext_forces, d_v, d_p_v);
}
}
extern "C"
void Initialize_LB_Parameters_dev (LBparameters *h_LBparams) {
cuda_safe_mem(cudaMemcpyToSymbol(d_LBparams, h_LBparams, sizeof(LBparameters)));
}
extern "C"
void PrintDevParasWrapper () {
Print_dev_LB_Params<<<1,1>>>();
PrintDevVariable <<<1,1>>> ();
}
extern "C"
void InitializeLB () {
#define free_realloc_and_clear(var, size) \
{ \
if ((var) != NULL) cudaFree((var)); \
cuda_safe_mem(cudaMalloc((void**)&var, size)); \
cudaMemset(var, 0, size); \
}
cuda_safe_mem(cudaMemcpyToSymbol(d_LBparams, &h_LBparams, sizeof(LBparameters)));
free_realloc_and_clear(devNodesA, h_LBparams.numNodes * 19 * sizeof(float));
free_realloc_and_clear(devNodesB, h_LBparams.numNodes * 19 * sizeof(float));
free_realloc_and_clear(devBoundaryMap, h_LBparams.numNodes * sizeof(unsigned int));
free_realloc_and_clear(devExtForces, h_LBparams.numNodes * 3 * sizeof(float));
free_realloc_and_clear(devBoundaryForces, h_LBparams.numBoundaries*3*sizeof(float));
free_realloc_and_clear(devBoundaryVelocities, h_LBparams.numBoundaries*3*sizeof(float));
size_of_rho_v = h_LBparams.numNodes * sizeof(LB_rho_v);
size_of_rho_v_pi = h_LBparams.numNodes * sizeof(LB_rho_v_pi);
free_realloc_and_clear(devRhoV, size_of_rho_v);
/* TODO: this is a almost a copy of device_rho_v think about eliminating it, and maybe pi can be added to device_rho_v in this case*/
free_realloc_and_clear(print_rho_v_pi, size_of_rho_v_pi);
// Note: discarded design
///**check flag if lb gpu init works*/
//free_and_realloc(gpu_check, sizeof(int));
//if (h_gpu_check != NULL)
// free (h_gpu_check);
//h_gpu_check = (int*)malloc(sizeof(int));
//h_gpu_check[0] = 0;
//hostExtForces = (float *)calloc(h_LBparams.numNodes*3, sizeof(float)); // memory needs to be released at the end !
// values for the kernel call
int threads_per_block = 64;
int blocks_per_grid_y = 4;
int blocks_per_grid_x = (h_LBparams.numNodes + threads_per_block * blocks_per_grid_y - 1) / (threads_per_block * blocks_per_grid_y);
dim3 dim_grid = make_uint3(blocks_per_grid_x, blocks_per_grid_y, 1);
KERNELCALL(InitializeBoundaryMap, dim_grid, threads_per_block, (devBoundaryMap));
//cuda_safe_mem(cudaMemcpy(devBoundaryVelocities, hostBoundaryVelocities, 3*h_LBparams.numBoundaries*sizeof(float), cudaMemcpyHostToDevice));
KERNELCALL(InitializeBodyForces, dim_grid, threads_per_block, (devExtForces));
KERNELCALL(calc_n_from_rho_j_pi, dim_grid, threads_per_block, (devExtForces, devNodesA, devRhoV));
intflag = 1;
devCurrentNodes = devNodesA;
// Note: discarded design
//cuda_safe_mem(cudaMemcpy (h_gpu_check, gpu_check, sizeof(int), cudaMemcpyDeviceToHost));
//fprintf(stderr, "initialization of lb gpu code %i\n", h_LBparams.numNodes);
cudaDeviceSynchronize();
// Note: discarded design
//#if __CUDA_ARCH__ >= 200
//if(!h_gpu_check[0])
//{
// fprintf(stderr, "initialization of lb gpu code failed! \n");
// errexit();
//}
//#endif
}
extern "C"
void UpdateLBE () {
int threads_per_block = 64;
int blocks_per_grid_y = 4;
int blocks_per_grid_x = (h_LBparams.numNodes + threads_per_block * blocks_per_grid_y - 1) / (threads_per_block * blocks_per_grid_y);
dim3 dim_grid = make_uint3 (blocks_per_grid_x, blocks_per_grid_y, 1);
// call of fluid step
/* NOTE: if pi is needed at every integration step, one should call an extended version
of the integrate kernel, or pass also devRhoV_pi and make sure that either
it or devRhoV are NULL depending on extended_values_flag */
if (intflag == 1) {
KERNELCALL(integrate, dim_grid, threads_per_block, (devNodesA, devExtForces, devRhoV, devNodesB));
devCurrentNodes = devNodesB;
intflag = 0;
} else {
KERNELCALL(integrate, dim_grid, threads_per_block, (devNodesB, devExtForces, devRhoV, devNodesA));
devCurrentNodes = devNodesA;
intflag = 1;
}
if (h_LBparams.numBoundaries > 0) {
// Version 1: can assign a velocity value to each boundary
//SetBoundaryVelocities ();
//KERNELCALL(apply_boundaries, dim_grid, threads_per_block, (devBoundaryMap, devBoundaryVelocities, devCurrentNodes, devBoundaryForces));
// Version 2: only allow walls in the y direction to move
KERNELCALL(apply_boundaries, dim_grid, threads_per_block, (devBoundaryMap, devBoundaryVelocities, devCurrentNodes, devBoundaryForces));
}
}
extern "C"
void SetBoundaryVelocities () {
float *hostBoundaryVelocities;
hostBoundaryVelocities = (float *) calloc (h_LBparams.numBoundaries*3, sizeof(float));
hostBoundaryVelocities[0*3+0] = -0.5*h_LBparams.boundaryVelocity[0];
hostBoundaryVelocities[0*3+1] = 0.0;
hostBoundaryVelocities[0*3+2] = 0.0;
hostBoundaryVelocities[1*3+0] = 0.5*h_LBparams.boundaryVelocity[0];
hostBoundaryVelocities[1*3+1] = 0.0;
hostBoundaryVelocities[1*3+2] = 0.0;
cuda_safe_mem(cudaMemcpy(devBoundaryVelocities, hostBoundaryVelocities, 3*h_LBparams.numBoundaries*sizeof(float), cudaMemcpyHostToDevice));
free (hostBoundaryVelocities);
}
extern "C"
void lb_get_values_GPU (LB_rho_v_pi *host_values) {
// values for the kernel call
int threads_per_block = 64;
int blocks_per_grid_y = 4;
int blocks_per_grid_x = (h_LBparams.numNodes + threads_per_block * blocks_per_grid_y - 1) / (threads_per_block * blocks_per_grid_y);
dim3 dim_grid = make_uint3(blocks_per_grid_x, blocks_per_grid_y, 1);
KERNELCALL(get_mesoscopic_values_in_LB_units, dim_grid, threads_per_block, (devCurrentNodes, devBoundaryMap, devExtForces, devRhoV, print_rho_v_pi));
cuda_safe_mem(cudaMemcpy(host_values, print_rho_v_pi, size_of_rho_v_pi, cudaMemcpyDeviceToHost));
}
extern "C"
void PrintFluidVelocitiesVTK (char *filePath) {
LB_rho_v_pi *host_values;
size_t size_of_values = h_LBparams.numNodes * sizeof(LB_rho_v_pi);
host_values = (LB_rho_v_pi*) malloc (size_of_values);
lb_get_values_GPU (host_values);
FILE* fp = fopen (filePath, "w");
fprintf (fp, "# vtk DataFile Version 2.0\nlbfluid_gpu\n"
"ASCII\nDATASET STRUCTURED_POINTS\nDIMENSIONS %u %u %u\n"
"ORIGIN %f %f %f\nSPACING %f %f %f\nPOINT_DATA %u\n"
"SCALARS velocity float 3\nLOOKUP_TABLE default\n",
h_LBparams.dimX, h_LBparams.dimY, h_LBparams.dimZ,
h_LBparams.agrid*0.5, h_LBparams.agrid*0.5, h_LBparams.agrid*0.5,
h_LBparams.agrid, h_LBparams.agrid, h_LBparams.agrid,
h_LBparams.numNodes);
for(int j=0; j < int(h_LBparams.numNodes); ++j) {
fprintf (fp, "%f %f %f\n", host_values[j].v[0], host_values[j].v[1], host_values[j].v[2]);
}
fclose (fp);
free (host_values);
}
|
5308fe4a048e37f4fd5ba63162331eaf25356b60.hip | // !!! This is a file automatically generated by hipify!!!
/* * ARQUITECTURA DE COMPUTADORES
* 2 Grado en Ingenieria Informatica
* Curso 2020/21
*
* ENTREGA no.5 Rendimiento GPU vs CPU
*
* EQUIPO: G6 ARCO 103
* MIEMBROS: Gonzalez Martinez Sergio
* Arnaiz Lopez Lucia
* San Martin Liendo Alvar
*
*/ /////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
// includes
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <math.h>
#include <thread>
#ifdef __linux__
#include <sys/time.h>
typedef struct timeval event;
#else
#include <windows.h>
typedef LARGE_INTEGER event;
#endif
///////////////////////////////////////////////////////////////////////////
void hiloRango(int* arrOrg, int* arrDest, int n, int id, int nums);
// declaracion de funciones
// HOST: funcion llamada desde el host y ejecutada en el host
__host__ void setEvent(event* ev)
/* Descripcion: Genera un evento de tiempo */
{
#ifdef __linux__
gettimeofday(ev, NULL);
#else
QueryPerformanceCounter(ev);
#endif
}
__host__ double eventDiff(event* first, event* last)
/* Descripcion: Devuelve la diferencia de tiempo (en ms) entre dos eventos */
{
#ifdef __linux__
return
((double)(last->tv_sec + (double)last->tv_usec / 1000000) -
(double)(first->tv_sec + (double)first->tv_usec / 1000000)) * 1000.0;
#else
event freq;
QueryPerformanceFrequency(&freq);
return ((double)(last->QuadPart - first->QuadPart) / (double)freq.QuadPart) * 1000.0;
#endif
}
__global__ void ordenarPorRango(int* arrOrg, int* arrDest, int n) {
int myID = threadIdx.x + blockIdx.x * blockDim.x;
int rango = 0;
int valor = arrOrg[myID];
if (myID < n) {
for (int i = 0; i < n; i++) {
if (valor > arrOrg[i] || (valor == arrOrg[i] && i > myID)) {
rango++;
}
}
}
//rintf("Rango del %i = %i\n", valor, rango);
arrDest[rango] = valor;
}
__host__ void ordenarPorRangoCPU(int* arrOrg, int* arrDest, int n) {
// Se sustituye el id por j
int rango = 0;
for (int j = 0; j < n; j++) {
for (int i = 0; i < n; i++) {
if (arrOrg[j] > arrOrg[i] || (arrOrg[j] == arrOrg[i] && i > j)) {
rango++;
}
}
arrDest[rango] = arrOrg[j];
rango = 0;
}
}
// Orden == potencia de 2 hasta la que calcular
double calcularCPU(int orden) {
int* hst_A, * hst_B;
int len = pow(2, orden);
// reserva en el host
hst_A = (int*)malloc(len * sizeof(float));
hst_B = (int*)malloc(len * sizeof(float));
// inicializacion
srand((int)time(NULL));
for (int i = 0; i < len; i++)
{
hst_A[i] = rand() % 51;
}
// La funcion eventDiff() calcula la diferencia de tiempo (en milisegundos) entre dos eventos.
event start; // variable para almacenar el evento de tiempo inicial.
event stop; // variable para almacenar el evento de tiempo final.
double t_ms;
// Iniciamos el contador
setEvent(&start); // marca de evento inicial
ordenarPorRangoCPU(hst_A, hst_B, len);
// Paramos el contador
setEvent(&stop);// marca de evento final
// Intervalos de tiempo
t_ms = eventDiff(&start, &stop); // diferencia de tiempo en ms
//printf("En CPU con %i valores tarda %lf\n", len, t_ms);
return t_ms;
}
// Orden == potencia de 2 hasta la que calcular
double calcularGPU(int orden) {
int len = pow(2, orden);
int* hst_A, * dev_A, * dev_B;
// reserva en el host
hst_A = (int*)malloc(len * sizeof(float));
hipMalloc((void**)&dev_B, len * sizeof(int));
hipMalloc((void**)&dev_A, len * sizeof(int));
// Numero de bloques
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
int cores = deviceProp.maxThreadsPerBlock;
int bloques = ceil(len / cores);
// declaracion de eventos
hipEvent_t startDev;
hipEvent_t stopDev;
// creacion de eventos
hipEventCreate(&startDev);
hipEventCreate(&stopDev);
// inicializacion
srand((int)time(NULL));
for (int i = 0; i < len; i++)
{
hst_A[i] = rand() % 51;
}
hipMemcpy(dev_A, hst_A, len * sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(startDev, 0);
ordenarPorRango << <bloques, cores >> > (dev_A, dev_B, len);
hipEventRecord(stopDev, 0);
// sincronizacion GPU-CPU
hipEventSynchronize(stopDev);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, startDev, stopDev);
//printf("En GPU con %i valores tarda %lf\n", len, elapsedTime);
return (double)elapsedTime;
}
double calcularCPU_Hilos(int orden) {
int* hst_A, * hst_B;
int len = pow(2, orden);
// reserva en el host
hst_A = (int*)malloc(len * sizeof(float));
hst_B = (int*)malloc(len * sizeof(float));
// inicializacion
srand((int)time(NULL));
for (int i = 0; i < len; i++)
{
hst_A[i] = rand() % 51;
}
// La funcion eventDiff() calcula la diferencia de tiempo (en milisegundos) entre dos eventos.
event start; // variable para almacenar el evento de tiempo inicial.
event stop; // variable para almacenar el evento de tiempo final.
double t_ms;
const int nucleos = std::thread::hardware_concurrency();
std::thread* hilos = new std::thread[nucleos];
// de cuantos numeros se ocupa cada hilo
int nums = ceil((float)len / nucleos);
// Iniciamos el contador
setEvent(&start); // marca de evento inicial
for (int i = 0; i < nucleos; i++) {
hilos[i] = std::thread(hiloRango, hst_A, hst_B, len, i, nums);
}
for (int i = 0; i < nucleos; i++) {
hilos[i].join();
}
// Paramos el contador
setEvent(&stop);// marca de evento final
//for (int i = 0; i < len; i++) {
// printf("%i ",hst_B[i]);
//}
//printf("\n");
// Intervalos de tiempo
t_ms = eventDiff(&start, &stop); // diferencia de tiempo en ms
//printf("En CPU con %i valores tarda %lf\n", len, t_ms);
return t_ms;
}
void hiloRango(int* arrOrg, int* arrDest, int n, int id, int nums) {
int rango = 0;
for (int j = (id * nums); j < ((id + 1) * nums) && j < n; j++) {
for (int i = 0; i < n; i++) {
if (arrOrg[j] > arrOrg[i] || (arrOrg[j] == arrOrg[i] && i > j)) {
rango++;
}
}
arrDest[rango] = arrOrg[j];
rango = 0;
}
}
///////////////////////////////////////////////////////////////////////////
// MAIN: rutina principal ejecutada en el host
int main(int argc, char** argv)
{
int orden;
const unsigned char OFFSET = 5;
printf("Hasta que potencia quieres calcular (desde 2^5 (32))[1-n]: ");
scanf("%i", &orden);
double* tiemposCPUHilos = (double*)malloc(orden * sizeof(double));
double* tiemposCPU = (double*)malloc(orden * sizeof(double));
double* tiemposGPU = (double*)malloc(orden * sizeof(double));
for (int i = 0; i < orden; i++) {
tiemposCPU[i] = calcularCPU((i + OFFSET));
tiemposGPU[i] = calcularGPU((i + OFFSET));
tiemposCPUHilos[i] = calcularCPU_Hilos((i + OFFSET));
}
/// HILOS ///
////////////
int potencia;
printf(" N ");
for (int i = 0; i < orden; i++) {
potencia = pow(2, (i + OFFSET));
printf(" %d ", potencia);
}
printf("\n");
printf(" GPU ");
for (int i = 0; i < orden; i++) {
printf("%3.8lf ", tiemposGPU[i]);
}
printf("\n");
printf(" CPU ");
for (int i = 0; i < orden; i++) {
printf("%3.8lf ", tiemposCPU[i]);
}
printf("\n");
printf("CPU+Hilos");
for (int i = 0; i < orden; i++) {
printf("%3.8lf ", tiemposCPUHilos[i]);
}
printf("\n");
// salida
printf("\npulsa INTRO para finalizar...");
fflush(stdin);
getchar();
return 0;
} | 5308fe4a048e37f4fd5ba63162331eaf25356b60.cu |
/* * ARQUITECTURA DE COMPUTADORES
* 2º Grado en Ingenieria Informatica
* Curso 2020/21
*
* ENTREGA no.5 Rendimiento GPU vs CPU
*
* EQUIPO: G6 ARCO 103
* MIEMBROS: Gonzalez Martinez Sergio
* Arnaiz Lopez Lucia
* San Martin Liendo Alvar
*
*/ /////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
// includes
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
#include <math.h>
#include <thread>
#ifdef __linux__
#include <sys/time.h>
typedef struct timeval event;
#else
#include <windows.h>
typedef LARGE_INTEGER event;
#endif
///////////////////////////////////////////////////////////////////////////
void hiloRango(int* arrOrg, int* arrDest, int n, int id, int nums);
// declaracion de funciones
// HOST: funcion llamada desde el host y ejecutada en el host
__host__ void setEvent(event* ev)
/* Descripcion: Genera un evento de tiempo */
{
#ifdef __linux__
gettimeofday(ev, NULL);
#else
QueryPerformanceCounter(ev);
#endif
}
__host__ double eventDiff(event* first, event* last)
/* Descripcion: Devuelve la diferencia de tiempo (en ms) entre dos eventos */
{
#ifdef __linux__
return
((double)(last->tv_sec + (double)last->tv_usec / 1000000) -
(double)(first->tv_sec + (double)first->tv_usec / 1000000)) * 1000.0;
#else
event freq;
QueryPerformanceFrequency(&freq);
return ((double)(last->QuadPart - first->QuadPart) / (double)freq.QuadPart) * 1000.0;
#endif
}
__global__ void ordenarPorRango(int* arrOrg, int* arrDest, int n) {
int myID = threadIdx.x + blockIdx.x * blockDim.x;
int rango = 0;
int valor = arrOrg[myID];
if (myID < n) {
for (int i = 0; i < n; i++) {
if (valor > arrOrg[i] || (valor == arrOrg[i] && i > myID)) {
rango++;
}
}
}
//rintf("Rango del %i = %i\n", valor, rango);
arrDest[rango] = valor;
}
__host__ void ordenarPorRangoCPU(int* arrOrg, int* arrDest, int n) {
// Se sustituye el id por j
int rango = 0;
for (int j = 0; j < n; j++) {
for (int i = 0; i < n; i++) {
if (arrOrg[j] > arrOrg[i] || (arrOrg[j] == arrOrg[i] && i > j)) {
rango++;
}
}
arrDest[rango] = arrOrg[j];
rango = 0;
}
}
// Orden == potencia de 2 hasta la que calcular
double calcularCPU(int orden) {
int* hst_A, * hst_B;
int len = pow(2, orden);
// reserva en el host
hst_A = (int*)malloc(len * sizeof(float));
hst_B = (int*)malloc(len * sizeof(float));
// inicializacion
srand((int)time(NULL));
for (int i = 0; i < len; i++)
{
hst_A[i] = rand() % 51;
}
// La funcion eventDiff() calcula la diferencia de tiempo (en milisegundos) entre dos eventos.
event start; // variable para almacenar el evento de tiempo inicial.
event stop; // variable para almacenar el evento de tiempo final.
double t_ms;
// Iniciamos el contador
setEvent(&start); // marca de evento inicial
ordenarPorRangoCPU(hst_A, hst_B, len);
// Paramos el contador
setEvent(&stop);// marca de evento final
// Intervalos de tiempo
t_ms = eventDiff(&start, &stop); // diferencia de tiempo en ms
//printf("En CPU con %i valores tarda %lf\n", len, t_ms);
return t_ms;
}
// Orden == potencia de 2 hasta la que calcular
double calcularGPU(int orden) {
int len = pow(2, orden);
int* hst_A, * dev_A, * dev_B;
// reserva en el host
hst_A = (int*)malloc(len * sizeof(float));
cudaMalloc((void**)&dev_B, len * sizeof(int));
cudaMalloc((void**)&dev_A, len * sizeof(int));
// Numero de bloques
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
int cores = deviceProp.maxThreadsPerBlock;
int bloques = ceil(len / cores);
// declaracion de eventos
cudaEvent_t startDev;
cudaEvent_t stopDev;
// creacion de eventos
cudaEventCreate(&startDev);
cudaEventCreate(&stopDev);
// inicializacion
srand((int)time(NULL));
for (int i = 0; i < len; i++)
{
hst_A[i] = rand() % 51;
}
cudaMemcpy(dev_A, hst_A, len * sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(startDev, 0);
ordenarPorRango << <bloques, cores >> > (dev_A, dev_B, len);
cudaEventRecord(stopDev, 0);
// sincronizacion GPU-CPU
cudaEventSynchronize(stopDev);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, startDev, stopDev);
//printf("En GPU con %i valores tarda %lf\n", len, elapsedTime);
return (double)elapsedTime;
}
double calcularCPU_Hilos(int orden) {
int* hst_A, * hst_B;
int len = pow(2, orden);
// reserva en el host
hst_A = (int*)malloc(len * sizeof(float));
hst_B = (int*)malloc(len * sizeof(float));
// inicializacion
srand((int)time(NULL));
for (int i = 0; i < len; i++)
{
hst_A[i] = rand() % 51;
}
// La funcion eventDiff() calcula la diferencia de tiempo (en milisegundos) entre dos eventos.
event start; // variable para almacenar el evento de tiempo inicial.
event stop; // variable para almacenar el evento de tiempo final.
double t_ms;
const int nucleos = std::thread::hardware_concurrency();
std::thread* hilos = new std::thread[nucleos];
// de cuantos numeros se ocupa cada hilo
int nums = ceil((float)len / nucleos);
// Iniciamos el contador
setEvent(&start); // marca de evento inicial
for (int i = 0; i < nucleos; i++) {
hilos[i] = std::thread(hiloRango, hst_A, hst_B, len, i, nums);
}
for (int i = 0; i < nucleos; i++) {
hilos[i].join();
}
// Paramos el contador
setEvent(&stop);// marca de evento final
//for (int i = 0; i < len; i++) {
// printf("%i ",hst_B[i]);
//}
//printf("\n");
// Intervalos de tiempo
t_ms = eventDiff(&start, &stop); // diferencia de tiempo en ms
//printf("En CPU con %i valores tarda %lf\n", len, t_ms);
return t_ms;
}
void hiloRango(int* arrOrg, int* arrDest, int n, int id, int nums) {
int rango = 0;
for (int j = (id * nums); j < ((id + 1) * nums) && j < n; j++) {
for (int i = 0; i < n; i++) {
if (arrOrg[j] > arrOrg[i] || (arrOrg[j] == arrOrg[i] && i > j)) {
rango++;
}
}
arrDest[rango] = arrOrg[j];
rango = 0;
}
}
///////////////////////////////////////////////////////////////////////////
// MAIN: rutina principal ejecutada en el host
int main(int argc, char** argv)
{
int orden;
const unsigned char OFFSET = 5;
printf("Hasta que potencia quieres calcular (desde 2^5 (32))[1-n]: ");
scanf("%i", &orden);
double* tiemposCPUHilos = (double*)malloc(orden * sizeof(double));
double* tiemposCPU = (double*)malloc(orden * sizeof(double));
double* tiemposGPU = (double*)malloc(orden * sizeof(double));
for (int i = 0; i < orden; i++) {
tiemposCPU[i] = calcularCPU((i + OFFSET));
tiemposGPU[i] = calcularGPU((i + OFFSET));
tiemposCPUHilos[i] = calcularCPU_Hilos((i + OFFSET));
}
/// HILOS ///
////////////
int potencia;
printf(" N ");
for (int i = 0; i < orden; i++) {
potencia = pow(2, (i + OFFSET));
printf(" %d ", potencia);
}
printf("\n");
printf(" GPU ");
for (int i = 0; i < orden; i++) {
printf("%3.8lf ", tiemposGPU[i]);
}
printf("\n");
printf(" CPU ");
for (int i = 0; i < orden; i++) {
printf("%3.8lf ", tiemposCPU[i]);
}
printf("\n");
printf("CPU+Hilos");
for (int i = 0; i < orden; i++) {
printf("%3.8lf ", tiemposCPUHilos[i]);
}
printf("\n");
// salida
printf("\npulsa INTRO para finalizar...");
fflush(stdin);
getchar();
return 0;
} |
95deea863b495210a321e633baf32a373225ba53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zlobpcg_residuals.cu normal z -> c, Fri Jan 30 19:00:29 2015
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
#define PRECISION_c
// copied from scnrm2.cu in trunk/magmablas
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, magmaFloat_ptr x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
__global__ void
magma_clobpcg_res_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magmaFloat_ptr evals,
magmaFloatComplex * X,
magmaFloatComplex * R,
magmaFloat_ptr res)
{
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if( row<num_rows){
for( int i=0; i<num_vecs; i++ ){
R[row + i*num_rows] = R[row + i*num_rows]
+ MAGMA_C_MAKE( -evals[i], 0.0 )
* X[ row + i*num_rows ];
}
}
}
/*
magmablas_scnrm2_kernel(
int m,
magmaFloatComplex * da,
int ldda,
float * dxnorm )
{
const int i = threadIdx.x;
magmaFloatComplex_ptr dx = da + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_C_REAL( dx[j] );
float im = MAGMA_C_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
res[blockIdx.x] = sqrt(sum[0]);
}
*/
/**
Purpose
-------
This routine computes for Block-LOBPCG, the set of residuals.
R = Ax - x evalues
It replaces:
for(int i=0; i < n; i++){
magma_caxpy(m, MAGMA_C_MAKE(-evalues[i],0),blockX+i*m,1,blockR+i*m,1);
}
The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
evalues magmaFloat_ptr
array of eigenvalues/approximations
@param[in]
X magmaFloatComplex_ptr
block of eigenvector approximations
@param[in]
R magmaFloatComplex_ptr
block of residuals
@param[in]
res magmaFloat_ptr
array of residuals
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_clobpcg_res(
magma_int_t num_rows,
magma_int_t num_vecs,
magmaFloat_ptr evalues,
magmaFloatComplex_ptr X,
magmaFloatComplex_ptr R,
magmaFloat_ptr res,
magma_queue_t queue )
{
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
dim3 threads( block_size );
dim3 grid( (num_rows+block_size-1)/block_size, 1, 1 );
hipLaunchKernelGGL(( magma_clobpcg_res_kernel), dim3(grid), dim3(threads), 0, queue ,
num_rows, num_vecs, evalues, X, R, res );
return MAGMA_SUCCESS;
}
| 95deea863b495210a321e633baf32a373225ba53.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zlobpcg_residuals.cu normal z -> c, Fri Jan 30 19:00:29 2015
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
#define PRECISION_c
// copied from scnrm2.cu in trunk/magmablas
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, magmaFloat_ptr x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
__global__ void
magma_clobpcg_res_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magmaFloat_ptr evals,
magmaFloatComplex * X,
magmaFloatComplex * R,
magmaFloat_ptr res)
{
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if( row<num_rows){
for( int i=0; i<num_vecs; i++ ){
R[row + i*num_rows] = R[row + i*num_rows]
+ MAGMA_C_MAKE( -evals[i], 0.0 )
* X[ row + i*num_rows ];
}
}
}
/*
magmablas_scnrm2_kernel(
int m,
magmaFloatComplex * da,
int ldda,
float * dxnorm )
{
const int i = threadIdx.x;
magmaFloatComplex_ptr dx = da + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_C_REAL( dx[j] );
float im = MAGMA_C_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
res[blockIdx.x] = sqrt(sum[0]);
}
*/
/**
Purpose
-------
This routine computes for Block-LOBPCG, the set of residuals.
R = Ax - x evalues
It replaces:
for(int i=0; i < n; i++){
magma_caxpy(m, MAGMA_C_MAKE(-evalues[i],0),blockX+i*m,1,blockR+i*m,1);
}
The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
evalues magmaFloat_ptr
array of eigenvalues/approximations
@param[in]
X magmaFloatComplex_ptr
block of eigenvector approximations
@param[in]
R magmaFloatComplex_ptr
block of residuals
@param[in]
res magmaFloat_ptr
array of residuals
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_clobpcg_res(
magma_int_t num_rows,
magma_int_t num_vecs,
magmaFloat_ptr evalues,
magmaFloatComplex_ptr X,
magmaFloatComplex_ptr R,
magmaFloat_ptr res,
magma_queue_t queue )
{
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
dim3 threads( block_size );
dim3 grid( (num_rows+block_size-1)/block_size, 1, 1 );
magma_clobpcg_res_kernel<<< grid, threads, 0, queue >>>
( num_rows, num_vecs, evalues, X, R, res );
return MAGMA_SUCCESS;
}
|
38b8d85725e8e97b4367f61c15679e875a83214e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <conio.h>
#include <iostream>
#include <vector>
#include <chrono>
#include <math.h>
#include <string>
/**********************************************************************************
* Super szybkie rwnolege sito erastotelesa v2.0
* Aby skompilowa progra trzeba posiada specjalny kompilator invidii
* Aby uruchomic wspolbierzna wersje algorytmu naley miec karte graficzna invidii
* Opis algorytmu zamieszczony na onedrive
* Algorytm jest w wersji 1.0 (glowne ograniczenie maksymalne N to ok. 1mld
*///*******************************************************************************
#define AutoThreads
//#define Default
//#define DEBUG
#define DefaultSectorNum 16
#define ThreadsPerBlock 512
#define DefaultNum (1<<10)
#define DefaultToShow 1000
#define big unsigned long long int
#define uint unsigned int
const uint IntSize = (sizeof(int) * 8);
using namespace std;
using namespace std::chrono;
void GetError(hipError_t Err) {
cout << "WYSTAPIL BLAD: " << hipGetErrorString(Err) << endl;
}
//funkcje GPU ulatwiajace operacje na bitach (dostep do pos'tego bitu w tabeli a)
__device__ inline bool pos(int* a, uint pos) {
return (a[pos / 32] >> (pos % 32)) & 1;
}
//funkcje GPU ulatwiajace operacje na bitach (ustawienie pos'tego bitu w tabeli a)
__device__ inline void set(int* a, uint pos, bool val = true) {
if (val) { //ustawia pos'ty bit w cigu a na warto val
atomicOr(&a[pos / 32], (1 << (pos % 32)));
}
else {
atomicAnd(&a[pos / 32], ~((int)(1 << (pos % 32))));
}
}
//Te same funcje ale dla CPU
inline bool Pos(int* a, big pos) {
return (a[pos / IntSize] >> (pos % IntSize)) & 1;
}
inline void Set(int* a, big pos, bool val) {
if (val) {
a[pos / IntSize] |= (1 << (pos % IntSize));
}
else {
a[pos / IntSize] &= ~((char)(1 << (pos % IntSize)));
}
}
struct Modes {
big Algoritm;
big num;
bool GPU = false;
bool CPU = false;
big toShow;
bool ShowLast;
bool ShowinRows;
};
//IMPLEMENTACJE ALGORYTMW DLA CPU + zoptymalizowana tablica boolowska
class BinaryArry2d
{
char* Data;
big AllocSize;
big Size;
public:
BinaryArry2d(const big sizeX) {
big rX = (sizeX - 1) / 8 + 1;
Data = new char[rX];
AllocSize = rX;
Size = sizeX;
}
void Fill(bool fill) {
if (fill) {
for (int i = 0; i < AllocSize; i++) {
Data[i] = 255;
}
}
else {
for (int i = 0; i < AllocSize; i++) {
Data[i] = 0;
}
}
}
BinaryArry2d& operator ^= (big x) {
Data[x / 8] ^= (1 << (x % 8));
return *this;
}
bool operator[](big x) {
return (Data[x / 8] >> (x % 8)) & 1;
}
void set(big x, bool type) {
if (type) {
Data[x / 8] |= (1 << (x % 8));
}
else {
Data[x / 8] &= ~((char)(1 << (x % 8)));
}
}
uint size() {
return Size;
}
uint allocSize() {
return AllocSize;
}
~BinaryArry2d() {
delete[] Data;
}
};
void SieveOfAtkin(big num, BinaryArry2d &Array) {
Array.Fill(false);
Array.set(2, true);
Array.set(3, true);
Array.set(5, true);
big sqrtnum = sqrt(num + 1);
big n, mod;
for (big x = 1; x <= sqrtnum; x++)
{
for (big y = 1; y <= sqrtnum; y += 2)
{
n = 4 * x*x + y * y;
if (n <= num) {
mod = n % 60;
if (mod == 1 || mod == 13 || mod == 17 || mod == 29 || mod == 37 || mod == 41 || mod == 49 || mod == 53) {
Array ^= n;
}
}
}
}
for (big x = 1; x <= sqrtnum; x += 2)
{
for (big y = 2; y <= sqrtnum; y += 2)
{
n = 3 * x*x + y * y;
if (n <= num) {
mod = n % 60;
if (mod == 7 || mod == 19 || mod == 31 || mod == 43) {
Array ^= n;
}
}
}
}
for (big x = 2; x <= sqrtnum; x++)
{
for (big y = 1; y <= x - 1; y++)
{
n = 3 * x*x - y * y;
if (n <= num) {
mod = n % 60;
if (mod == 11 || mod == 23 || mod == 47 || mod == 59) {
Array ^= n;
}
}
}
}
for (big i = 5; i <= sqrtnum; i++)
{
if (Array[i])
{
for (big j = i * i; j <= num; j += i)
{
Array.set(j, false);
}
}
}
}
void SieveOfSundaram(big n, BinaryArry2d & Array)
{
Array.Fill(false);
const uint nNew = (n - 2) / 2;
uint pos = 0;
for (uint i = 1; i <= nNew; i++) {
for (uint j = i; (i + j + 2 * i*j) <= nNew; j++) {
pos = i + j + 2 * i*j;
Array.set(pos, true);
}
}
}
void SieveOfEratostenes(big n, BinaryArry2d & Array) {
uint sqrtn = sqrt(n);
Array.Fill(true);
for (uint i = 2; i <= sqrtn; i++) {
if (Array[i]) {
for (int j = i * i; j <= n; j += i) {
Array.set(j, false);
}
}
}
}
//#############################################################
//# #
//# Funkcje GPU #
//# #
//#############################################################
//USTAW TABELE a NA SAME FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF...
__global__ void memoryset(int *a, big num, const big AllocSize)
{
uint tid = blockDim.x*blockIdx.x + threadIdx.x;
const uint move = blockDim.x * gridDim.x;
while (tid < AllocSize) {
a[tid] = -1;
tid += move;
}
}
//WSPBIERZNIE WYTNIJ WSZYSTKIE WIELOKROTNOSCI x Z TABELI a
__global__ void CutMultiples(int *a, const uint xx, const uint x, const uint Num)
{
uint tid = threadIdx.x + blockIdx.x*blockDim.x;
const uint move = blockDim.x * gridDim.x;
uint Pos = xx + x * tid;
while (Pos <= Num) {
set(a, Pos, false);
tid += move;
Pos = xx + x * tid;
}
}
unsigned big Sito_GPUv2(const unsigned big num, vector<uint>&Array, Modes Settings) {
big BufNum;
#ifndef DefaultSectorNum
for (uint i = 0; i < 16; i++)
{
BufNum = 1 << i;
if (num / BufNum <= 1073741824) {
break;
}
}
#else
BufNum = DefaultSectorNum;
#endif // DefaultSectorSize
auto ts = high_resolution_clock::now(); //chrono - mierzenie czasu
const uint sqrtNum = ceil(sqrt(num));
hipError_t Error; //Wylapywanie bledow CUDA'Y
hipStream_t* Streams = new hipStream_t[BufNum];
const big AllocSize = (num) / IntSize + 1; //ROZMIAR ALOKACJI NA CPU
const big BuffAllocSize = (num / BufNum) / IntSize + 1;//ROZMIAR ALOKACJI SEKTORA
const big BuffSize = (num / BufNum);//ROZMIAR 1 SEKTORA
hipDeviceProp_t GPU; //Wlasciwosci GPU
//Init
hipGetDeviceProperties(&GPU, 0);
for (int i = 0; i < BufNum; i++)
{
hipStreamCreate(&(Streams[i]));
}
//Alloc
int *a = new int[AllocSize];
int *gpu_a;
Error = hipMalloc((void**)&gpu_a, AllocSize * sizeof(int));
if (Error != hipSuccess) {
cout << "Za malo VRAMU" << endl;
free(a);
hipFree(gpu_a);
return false;
}
//Wypelnianie tabeli FFFFFFFFFFFFF
for (big i = 0; i < AllocSize; i++)
{
a[i] = -1;
}
//Przygotowywanie siatki
#ifdef AutoThreads
uint blocks = GPU.multiProcessorCount * 2;
uint threads = ThreadsPerBlock;
#else
uint blocks = 256;
uint threads = 256;
#endif // AutoThreads
cout << "Specyfikacja GPU 0: " << endl;
cout << "Nazwa: \t" << GPU.name << endl;
cout << "Major: \t" << GPU.major << endl;
cout << "ALOKACJA:" << endl;
cout << "Sector num: \t" << BufNum << endl;
cout << "Mem: \t" << GPU.totalGlobalMem << "B, " << GPU.totalGlobalMem*0.000000001f << "GB" << endl;
cout << "Alloc: \t" << AllocSize * 4 << "B, " << (float)(AllocSize * 4 * 100) / GPU.totalGlobalMem << "%" << endl;
cout << "SectorSize: \t" << BuffAllocSize * 4 << endl;
cout << "Sector: \t" << BuffSize << endl;
////#####################################################////
//// GWNY ALGORYTM ////
////#####################################################////
for (uint x = 0; x < BufNum; x++)
{
memoryset << < blocks, threads, 0, Streams[x] >> > ((gpu_a + x * BuffAllocSize), num, BuffAllocSize);
}
//Zmienne pomocnicze
big i = 2, // iterator
y = 0, // Start
x = 0,
z = 0,
ii = 0, // kwadrat i
bii = 0, // (i-1)^2
size = 0, // ilosc bajtw do skopiowania
adress = 0; // adres od ktrego ma zaczac sie kopiowanie
for (; i <= sqrtNum; i++)//wykonaj na i od 2 do sqrtNum
{
if (Pos(a, i)) { //Jezeli A[i] == true:
y = 0;
ii = i * i;
x = ii / BuffSize;
CutMultiples << < blocks, threads, 0, Streams[x] >> > (gpu_a + (ii / IntSize), ii%BuffSize, i, BuffSize);
x++;
z = (uint)ceil((float)(BuffSize - i) / i)*i;
//Wytnij wielokrotnosci i
for (int* gpu_a_dev = gpu_a + (BuffSize) / IntSize; x < BufNum; x++)
{
y = z + i;
CutMultiples << < blocks, threads, 0, Streams[x] >> > (gpu_a_dev, y%BuffSize, i, BuffSize);
hipDeviceSynchronize();
hipMemcpy(a, gpu_a, AllocSize * sizeof(int), hipMemcpyDeviceToHost);
z = (uint)ceil((float)(y - i) / i)*i;
gpu_a_dev += (BuffSize) / IntSize;
}
//Oblicz adres od ktorego ma zaczac sie kopiowanie i ilosc bajtow do skopiowania
size = ii / 32 - bii / 32 + 1;
adress = bii / 32;
bii = ii;
hipDeviceSynchronize();
//Kopiowanie
hipMemcpy(&a[adress], &gpu_a[adress], size * sizeof(int), hipMemcpyDeviceToHost);
#ifdef DEBUG
Error = hipGetLastError();
if (Error != hipSuccess) {
GetError(Error);
free(a);
hipFree(gpu_a);
return -1;
}
int* currentadress = &a[adress];
int* endadress = &a[AllocSize - 1];
hipMemcpy(a, gpu_a, AllocSize * sizeof(int), hipMemcpyDeviceToHost);
#endif // DEBUG
}
}
//Ostatnia iteracja nie kopjuje wiec skopjuj od (i-1)^2 do konca tabeli
i -= 1;
ii = i * i;
size = ii / 32 - bii / 32 + 1;
i -= 1;
bii = i * i;
adress = AllocSize - size;
hipMemcpy(&a[adress], &gpu_a[adress], size * sizeof(int), hipMemcpyDeviceToHost);
//Zmierz czas
auto te = high_resolution_clock::now();
auto GPU_Time = duration_cast<milliseconds>(te - ts);
//Wpisz wyniki do tablicy dynamicznej
Array.clear();
uint counter = 0;
for (big i = 2; i < num; i++)
{
if (Pos(a, i)) {
Array.push_back(i);
}
if (counter > Settings.toShow) {
break;
}
counter++;
}
if (Settings.ShowLast) {
for (big i = Settings.num - 1; i > 0; i--)
{
if (Pos(a, i)) {
Array.push_back(i);
break;
}
}
}
free(a);
hipFree(gpu_a);
return GPU_Time.count();
}//NIEDZIALA
unsigned big Sito_GPU(const unsigned big num, vector<uint>&Array, Modes Settings) {
auto ts = high_resolution_clock::now(); //chrono - mierzenie czasu
const uint sqrtNum = ceil(sqrt(num));
hipError_t Error; //Wylapywanie bledow CUDA'Y
hipStream_t Stream; //Bajer do optymalizacji
const big AllocSize = (num) / IntSize + 1; //Rozmiar Alokacji
hipDeviceProp_t GPU; //Wlasciwosci GPU
//Init
hipGetDeviceProperties(&GPU, 0);
hipStreamCreate(&Stream);
//Alloc
int *a = new int[AllocSize];
int *gpu_a;
Error = hipMalloc((void**)&gpu_a, AllocSize * sizeof(int));
if (Error != hipSuccess) {
cout << "Za malo VRAMU" << endl;
free(a);
hipFree(gpu_a);
return false;
}
//Wypelnianie tabeli FFFFFFFFFFFFF
for (big i = 0; i < AllocSize; i++)
{
a[i] = -1;
}
//Przygotowywanie siatki
#ifdef AutoThreads
uint blocks = GPU.multiProcessorCount * 2;
uint threads = ThreadsPerBlock;
#else
uint blocks = 256;
uint threads = 256;
#endif // AutoThreads
cout << "Specyfikacja GPU 0: " << endl;
cout << "Nazwa: \t" << GPU.name << endl;
cout << "Major: \t" << GPU.major << endl;
cout << "Mem: \t" << GPU.totalGlobalMem << "B, " << GPU.totalGlobalMem*0.000000001f << "GB" << endl;
cout << "Alloc: \t" << AllocSize * 4 << "B, " << (float)(AllocSize * 4 * 100) / GPU.totalGlobalMem << "%" << endl;
////#####################################################////
//// GWNY ALGORYTM ////
////#####################################################////
memoryset << < blocks, threads >> > (gpu_a, num, AllocSize);
//Zmienne pomocnicze
big i = 2, // iterator
ii = 0, // kwadrat i
bii = 0, // (i-1)^2
size = 0, // ilosc bajtw do skopiowania
adress = 0; // adres od ktrego ma zaczac sie kopiowanie
for (; i <= sqrtNum; i++)//wykonaj na i od 2 do sqrtNum
{
if (Pos(a, i)) { //Jezeli A[i] == true:
ii = i * i;
//Wytnij wielokrotnosci i
CutMultiples << < blocks, threads, 0, Stream >> > (gpu_a, ii, i, num);
//Oblicz adres od ktorego ma zaczac sie kopiowanie i ilosc bajtow do skopiowania
size = ii / 32 - bii / 32 + 1;
adress = bii / 32;
bii = ii;
#ifdef DEBUG
Error = hipGetLastError();
if (Error != hipSuccess) {
GetError(Error);
free(a);
hipFree(gpu_a);
return -1;
}
int* currentadress = &a[adress];
int* endadress = &a[AllocSize - 1];
#endif // DEBUG
//Kopiowanie
hipMemcpyAsync(&a[adress], &gpu_a[adress], size * sizeof(int), hipMemcpyDeviceToHost, Stream);
}
}
//Ostatnia iteracja nie kopjuje wiec skopjuj od (i-1)^2 do konca tabeli
i -= 1;
ii = i * i;
size = ii / 32 - bii / 32 + 1;
i -= 1;
bii = i * i;
adress = AllocSize - size;
hipMemcpy(&a[adress], &gpu_a[adress], size * sizeof(int), hipMemcpyDeviceToHost);
//Zmierz czas
auto te = high_resolution_clock::now();
auto GPU_Time = duration_cast<milliseconds>(te - ts);
//Wpisz wyniki do tablicy dynamicznej
Array.clear();
uint counter = 0;
for (big i = 2; i < num; i++)
{
if (Pos(a, i)) {
Array.push_back(i);
}
if (counter > Settings.toShow) {
break;
}
counter++;
}
if (Settings.ShowLast) {
for (big i = Settings.num - 1; i > 0; i--)
{
if (Pos(a, i)) {
Array.push_back(i);
break;
}
}
}
free(a);
hipFree(gpu_a);
return GPU_Time.count();
}
unsigned big Sito_CPU(const big num, vector<uint>&Array, Modes Settings) {
auto ts = high_resolution_clock::now();
//Tablica boolowska
BinaryArry2d binArray(num + 1);
//Wybierz algorytm
switch (Settings.Algoritm)
{
case 1:
SieveOfEratostenes(num, binArray);
break;
case 2:
SieveOfAtkin(num, binArray);
break;
case 3:
SieveOfSundaram(num, binArray);
break;
default:
break;
}
//Zmierz czas
auto te = high_resolution_clock::now();
auto CPU_Time = duration_cast<milliseconds>(te - ts);
//Wpisz wyniki do tabeli dynamicznej
Array.clear();
uint counter = 0;
if (Settings.Algoritm != 3) {
for (uint i = 2; i < num; i++)
{
if (binArray[i]) {
Array.push_back(i);
}
if (counter > Settings.toShow) {
break;
}
counter++;
}
if (Settings.ShowLast) {
for (int i = Settings.num; i > 0; i--)
{
if (binArray[i]) {
Array.push_back(i);
break;
}
}
}
}
else {
Array.push_back(2);
for (uint i = 1; i < (num - 2) / 2; i++)
{
if (!binArray[i]) {
Array.push_back(2 * i + 1);
}
if (counter > Settings.toShow) {
break;
}
counter++;
}
if (Settings.ShowLast) {
for (big i = (num - 2) / 2; i > 0; i--)
{
if (!binArray[i]) {
Array.push_back(2 * i + 1);
break;
}
}
}
}
return (big)CPU_Time.count();
}
Modes Load() {
cout << "******************************************" << endl;
cout << "*********SUPER SPEED SIEVE ON GPU*********" << endl;
cout << "*************M. Zlotorowicz***************" << endl;
cout << "******************************************" << endl;
cout << endl;
#ifndef Default
cout << "Ktorego urzadzenia chcesz uzyc?" << endl;
cout << "1. GPU" << endl;
cout << "2. CPU" << endl;
cout << "3. GPU i CPU" << endl;
Modes settings;
int dev;
do {
cin >> dev;
} while (dev > 3);
switch (dev)
{
case 1:
settings.GPU = true;
break;
case 2:
settings.CPU = true;
break;
case 3:
settings.CPU = true;
settings.GPU = true;
break;
default:
break;
}
if (settings.CPU) {
cout << "Jakiego Sita chcesz uzyc (CPU)?" << endl;
cout << "1. Erastotenesa" << endl;
cout << "2. Atkina" << endl;
cout << "3. Sundarama" << endl;
do {
cin >> settings.Algoritm;
} while (settings.Algoritm > 3);
}
else {
settings.Algoritm = -1;
}
string NUM;
cout << "Podaj zakres do sprawdzenia(MAX: 2147483648 (31T):" << endl;
cout << "LiczbaGB - Tyle liczb by zmiescily sie w podanej liczbe GB" << endl;
cout << "LiczbaZ - 1 i Z zer" << endl;
cout << "LiczbaT - 2^T potegi" << endl;
cin >> NUM;
size_t at = NUM.find("GB", 0);
if (at != string::npos) {
settings.num = stoi(NUM.substr(0, NUM.size() - 2)) * 8000000000;
}
else {
size_t at = NUM.find("Z", 0);
if (at != string::npos) {
settings.num = 1 * pow(10, stoi(NUM.substr(0, NUM.size() - 1)));
}
else {
size_t at = NUM.find("T", 0);
if (at != string::npos) {
settings.num = pow(2, stoi(NUM.substr(0, NUM.size() - 1)));
}
else {
settings.num = stoll(NUM);
}
}
}
cout << settings.num << " Liczb" << endl;
if (settings.num > 60000000000 && settings.CPU) {
cout << "UWAGA! dla tak duzego n CPU moze liczyc naprawde dlugi czas..." << endl;
}
cout << "Podaj ilosc Elementow do wyswietlenia" << endl;
cin >> settings.toShow;
cout << "Czy chcesz zobaczyc Ostatnia liczbe pierwsza? (t/n)" << endl;
char t;
cin >> t;
if (t == 't') {
settings.ShowLast = true;
}
else {
settings.ShowLast = false;
}
cout << "Wyswietlic w rzedach? (t/n)" << endl;
cin >> t;
if (t == 't') {
settings.ShowinRows = true;
}
else {
settings.ShowinRows = false;
}
#else //Default
Modes settings;
settings.Algoritm = 2;
settings.CPU = true;
settings.GPU = true;
settings.num = DefaultNum;
settings.ShowLast = true;
settings.toShow = DefaultToShow;
settings.ShowinRows = true;
#endif
return settings;
}
void Show(vector<uint>primesGPU, vector<uint>primesCPU, Modes settings, big CPU_time, big GPU_time) {
if (settings.GPU) {
cout << "GPU";
}
if (settings.CPU) {
cout << "\t" << "CPU";
}
cout << endl;
if (settings.GPU && settings.CPU) {
cout << "GPU JEST SZYBSZE OD CPU " << (float)CPU_time / GPU_time << " razy. " << endl;
}
if (settings.GPU) {
cout << GPU_time << "ms";
}
if (settings.CPU) {
cout << "\t" << CPU_time << "ms";
}
cout << endl;
if (!settings.ShowinRows) {
if (settings.GPU) {
cout << "GPU: [";
for (auto a : primesGPU) {
cout << a << ", ";
}
cout << "]";
}
if (settings.CPU) {
cout << "CPU: [";
for (auto a : primesCPU) {
cout << a << ", ";
}
cout << "]";
}
}
else {
auto i = primesGPU.begin(), j = primesCPU.begin();
for (;;)
{
if (settings.GPU) {
cout << *i;
}
if (settings.CPU) {
cout << "\t" << *j;
}
if (i != primesGPU.end()) {
i++;
}
if (j != primesCPU.end()) {
j++;
}
if (i == primesGPU.end() || j == primesCPU.end()) {
break;
}
cout << endl;
}
}
cout << endl;
}
int main(void)
{
vector<uint>primesGPU;
vector<uint>primesCPU;
big CPU_time = -1, GPU_time = -1;
Modes settings = Load();
cout << endl;
cout << endl;
if (settings.GPU) {
cout << "GPU Rozpoczyna Prace" << endl;
GPU_time = Sito_GPU(settings.num, primesGPU, settings);
cout << "GPU Zakonczylo Prace" << endl;
}
if (settings.CPU) {
cout << "CPU Rozpoczyna Prace" << endl;
CPU_time = Sito_CPU(settings.num, primesCPU, settings);
cout << "CPU Zakonczylo Prace" << endl;
}
Show(primesGPU, primesCPU, settings, CPU_time, GPU_time);
system("pause");
return 0;
} | 38b8d85725e8e97b4367f61c15679e875a83214e.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <conio.h>
#include <iostream>
#include <vector>
#include <chrono>
#include <math.h>
#include <string>
/**********************************************************************************
* Super szybkie równoległe sito erastotelesa v2.0
* Aby skompilować progra trzeba posiadać specjalny kompilator invidii
* Aby uruchomic wspolbierzna wersje algorytmu należy miec karte graficzna invidii
* Opis algorytmu zamieszczony na onedrive
* Algorytm jest w wersji 1.0 (glowne ograniczenie maksymalne N to ok. 1mld
*///*******************************************************************************
#define AutoThreads
//#define Default
//#define DEBUG
#define DefaultSectorNum 16
#define ThreadsPerBlock 512
#define DefaultNum (1<<10)
#define DefaultToShow 1000
#define big unsigned long long int
#define uint unsigned int
const uint IntSize = (sizeof(int) * 8);
using namespace std;
using namespace std::chrono;
void GetError(cudaError Err) {
cout << "WYSTAPIL BLAD: " << cudaGetErrorString(Err) << endl;
}
//funkcje GPU ulatwiajace operacje na bitach (dostep do pos'tego bitu w tabeli a)
__device__ inline bool pos(int* a, uint pos) {
return (a[pos / 32] >> (pos % 32)) & 1;
}
//funkcje GPU ulatwiajace operacje na bitach (ustawienie pos'tego bitu w tabeli a)
__device__ inline void set(int* a, uint pos, bool val = true) {
if (val) { //ustawia pos'ty bit w ci�gu a na warto�� val
atomicOr(&a[pos / 32], (1 << (pos % 32)));
}
else {
atomicAnd(&a[pos / 32], ~((int)(1 << (pos % 32))));
}
}
//Te same funcje ale dla CPU
inline bool Pos(int* a, big pos) {
return (a[pos / IntSize] >> (pos % IntSize)) & 1;
}
inline void Set(int* a, big pos, bool val) {
if (val) {
a[pos / IntSize] |= (1 << (pos % IntSize));
}
else {
a[pos / IntSize] &= ~((char)(1 << (pos % IntSize)));
}
}
struct Modes {
big Algoritm;
big num;
bool GPU = false;
bool CPU = false;
big toShow;
bool ShowLast;
bool ShowinRows;
};
//IMPLEMENTACJE ALGORYTMÓW DLA CPU + zoptymalizowana tablica boolowska
class BinaryArry2d
{
char* Data;
big AllocSize;
big Size;
public:
BinaryArry2d(const big sizeX) {
big rX = (sizeX - 1) / 8 + 1;
Data = new char[rX];
AllocSize = rX;
Size = sizeX;
}
void Fill(bool fill) {
if (fill) {
for (int i = 0; i < AllocSize; i++) {
Data[i] = 255;
}
}
else {
for (int i = 0; i < AllocSize; i++) {
Data[i] = 0;
}
}
}
BinaryArry2d& operator ^= (big x) {
Data[x / 8] ^= (1 << (x % 8));
return *this;
}
bool operator[](big x) {
return (Data[x / 8] >> (x % 8)) & 1;
}
void set(big x, bool type) {
if (type) {
Data[x / 8] |= (1 << (x % 8));
}
else {
Data[x / 8] &= ~((char)(1 << (x % 8)));
}
}
uint size() {
return Size;
}
uint allocSize() {
return AllocSize;
}
~BinaryArry2d() {
delete[] Data;
}
};
void SieveOfAtkin(big num, BinaryArry2d &Array) {
Array.Fill(false);
Array.set(2, true);
Array.set(3, true);
Array.set(5, true);
big sqrtnum = sqrt(num + 1);
big n, mod;
for (big x = 1; x <= sqrtnum; x++)
{
for (big y = 1; y <= sqrtnum; y += 2)
{
n = 4 * x*x + y * y;
if (n <= num) {
mod = n % 60;
if (mod == 1 || mod == 13 || mod == 17 || mod == 29 || mod == 37 || mod == 41 || mod == 49 || mod == 53) {
Array ^= n;
}
}
}
}
for (big x = 1; x <= sqrtnum; x += 2)
{
for (big y = 2; y <= sqrtnum; y += 2)
{
n = 3 * x*x + y * y;
if (n <= num) {
mod = n % 60;
if (mod == 7 || mod == 19 || mod == 31 || mod == 43) {
Array ^= n;
}
}
}
}
for (big x = 2; x <= sqrtnum; x++)
{
for (big y = 1; y <= x - 1; y++)
{
n = 3 * x*x - y * y;
if (n <= num) {
mod = n % 60;
if (mod == 11 || mod == 23 || mod == 47 || mod == 59) {
Array ^= n;
}
}
}
}
for (big i = 5; i <= sqrtnum; i++)
{
if (Array[i])
{
for (big j = i * i; j <= num; j += i)
{
Array.set(j, false);
}
}
}
}
void SieveOfSundaram(big n, BinaryArry2d & Array)
{
Array.Fill(false);
const uint nNew = (n - 2) / 2;
uint pos = 0;
for (uint i = 1; i <= nNew; i++) {
for (uint j = i; (i + j + 2 * i*j) <= nNew; j++) {
pos = i + j + 2 * i*j;
Array.set(pos, true);
}
}
}
void SieveOfEratostenes(big n, BinaryArry2d & Array) {
uint sqrtn = sqrt(n);
Array.Fill(true);
for (uint i = 2; i <= sqrtn; i++) {
if (Array[i]) {
for (int j = i * i; j <= n; j += i) {
Array.set(j, false);
}
}
}
}
//#############################################################
//# #
//# Funkcje GPU #
//# #
//#############################################################
//USTAW TABELE a NA SAME FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF...
__global__ void memoryset(int *a, big num, const big AllocSize)
{
uint tid = blockDim.x*blockIdx.x + threadIdx.x;
const uint move = blockDim.x * gridDim.x;
while (tid < AllocSize) {
a[tid] = -1;
tid += move;
}
}
//WSPÓŁBIERZNIE WYTNIJ WSZYSTKIE WIELOKROTNOSCI x Z TABELI a
__global__ void CutMultiples(int *a, const uint xx, const uint x, const uint Num)
{
uint tid = threadIdx.x + blockIdx.x*blockDim.x;
const uint move = blockDim.x * gridDim.x;
uint Pos = xx + x * tid;
while (Pos <= Num) {
set(a, Pos, false);
tid += move;
Pos = xx + x * tid;
}
}
unsigned big Sito_GPUv2(const unsigned big num, vector<uint>&Array, Modes Settings) {
big BufNum;
#ifndef DefaultSectorNum
for (uint i = 0; i < 16; i++)
{
BufNum = 1 << i;
if (num / BufNum <= 1073741824) {
break;
}
}
#else
BufNum = DefaultSectorNum;
#endif // DefaultSectorSize
auto ts = high_resolution_clock::now(); //chrono - mierzenie czasu
const uint sqrtNum = ceil(sqrt(num));
cudaError Error; //Wylapywanie bledow CUDA'Y
cudaStream_t* Streams = new cudaStream_t[BufNum];
const big AllocSize = (num) / IntSize + 1; //ROZMIAR ALOKACJI NA CPU
const big BuffAllocSize = (num / BufNum) / IntSize + 1;//ROZMIAR ALOKACJI SEKTORA
const big BuffSize = (num / BufNum);//ROZMIAR 1 SEKTORA
cudaDeviceProp GPU; //Wlasciwosci GPU
//Init
cudaGetDeviceProperties(&GPU, 0);
for (int i = 0; i < BufNum; i++)
{
cudaStreamCreate(&(Streams[i]));
}
//Alloc
int *a = new int[AllocSize];
int *gpu_a;
Error = cudaMalloc((void**)&gpu_a, AllocSize * sizeof(int));
if (Error != cudaSuccess) {
cout << "Za malo VRAMU" << endl;
free(a);
cudaFree(gpu_a);
return false;
}
//Wypelnianie tabeli FFFFFFFFFFFFF
for (big i = 0; i < AllocSize; i++)
{
a[i] = -1;
}
//Przygotowywanie siatki
#ifdef AutoThreads
uint blocks = GPU.multiProcessorCount * 2;
uint threads = ThreadsPerBlock;
#else
uint blocks = 256;
uint threads = 256;
#endif // AutoThreads
cout << "Specyfikacja GPU 0: " << endl;
cout << "Nazwa: \t" << GPU.name << endl;
cout << "Major: \t" << GPU.major << endl;
cout << "ALOKACJA:" << endl;
cout << "Sector num: \t" << BufNum << endl;
cout << "Mem: \t" << GPU.totalGlobalMem << "B, " << GPU.totalGlobalMem*0.000000001f << "GB" << endl;
cout << "Alloc: \t" << AllocSize * 4 << "B, " << (float)(AllocSize * 4 * 100) / GPU.totalGlobalMem << "%" << endl;
cout << "SectorSize: \t" << BuffAllocSize * 4 << endl;
cout << "Sector: \t" << BuffSize << endl;
////#####################################################////
//// GŁÓWNY ALGORYTM ////
////#####################################################////
for (uint x = 0; x < BufNum; x++)
{
memoryset << < blocks, threads, 0, Streams[x] >> > ((gpu_a + x * BuffAllocSize), num, BuffAllocSize);
}
//Zmienne pomocnicze
big i = 2, // iterator
y = 0, // Start
x = 0,
z = 0,
ii = 0, // kwadrat i
bii = 0, // (i-1)^2
size = 0, // ilosc bajtów do skopiowania
adress = 0; // adres od którego ma zaczac sie kopiowanie
for (; i <= sqrtNum; i++)//wykonaj na i od 2 do sqrtNum
{
if (Pos(a, i)) { //Jezeli A[i] == true:
y = 0;
ii = i * i;
x = ii / BuffSize;
CutMultiples << < blocks, threads, 0, Streams[x] >> > (gpu_a + (ii / IntSize), ii%BuffSize, i, BuffSize);
x++;
z = (uint)ceil((float)(BuffSize - i) / i)*i;
//Wytnij wielokrotnosci i
for (int* gpu_a_dev = gpu_a + (BuffSize) / IntSize; x < BufNum; x++)
{
y = z + i;
CutMultiples << < blocks, threads, 0, Streams[x] >> > (gpu_a_dev, y%BuffSize, i, BuffSize);
cudaDeviceSynchronize();
cudaMemcpy(a, gpu_a, AllocSize * sizeof(int), cudaMemcpyDeviceToHost);
z = (uint)ceil((float)(y - i) / i)*i;
gpu_a_dev += (BuffSize) / IntSize;
}
//Oblicz adres od ktorego ma zaczac sie kopiowanie i ilosc bajtow do skopiowania
size = ii / 32 - bii / 32 + 1;
adress = bii / 32;
bii = ii;
cudaDeviceSynchronize();
//Kopiowanie
cudaMemcpy(&a[adress], &gpu_a[adress], size * sizeof(int), cudaMemcpyDeviceToHost);
#ifdef DEBUG
Error = cudaGetLastError();
if (Error != cudaSuccess) {
GetError(Error);
free(a);
cudaFree(gpu_a);
return -1;
}
int* currentadress = &a[adress];
int* endadress = &a[AllocSize - 1];
cudaMemcpy(a, gpu_a, AllocSize * sizeof(int), cudaMemcpyDeviceToHost);
#endif // DEBUG
}
}
//Ostatnia iteracja nie kopjuje wiec skopjuj od (i-1)^2 do konca tabeli
i -= 1;
ii = i * i;
size = ii / 32 - bii / 32 + 1;
i -= 1;
bii = i * i;
adress = AllocSize - size;
cudaMemcpy(&a[adress], &gpu_a[adress], size * sizeof(int), cudaMemcpyDeviceToHost);
//Zmierz czas
auto te = high_resolution_clock::now();
auto GPU_Time = duration_cast<milliseconds>(te - ts);
//Wpisz wyniki do tablicy dynamicznej
Array.clear();
uint counter = 0;
for (big i = 2; i < num; i++)
{
if (Pos(a, i)) {
Array.push_back(i);
}
if (counter > Settings.toShow) {
break;
}
counter++;
}
if (Settings.ShowLast) {
for (big i = Settings.num - 1; i > 0; i--)
{
if (Pos(a, i)) {
Array.push_back(i);
break;
}
}
}
free(a);
cudaFree(gpu_a);
return GPU_Time.count();
}//NIEDZIALA
unsigned big Sito_GPU(const unsigned big num, vector<uint>&Array, Modes Settings) {
auto ts = high_resolution_clock::now(); //chrono - mierzenie czasu
const uint sqrtNum = ceil(sqrt(num));
cudaError Error; //Wylapywanie bledow CUDA'Y
cudaStream_t Stream; //Bajer do optymalizacji
const big AllocSize = (num) / IntSize + 1; //Rozmiar Alokacji
cudaDeviceProp GPU; //Wlasciwosci GPU
//Init
cudaGetDeviceProperties(&GPU, 0);
cudaStreamCreate(&Stream);
//Alloc
int *a = new int[AllocSize];
int *gpu_a;
Error = cudaMalloc((void**)&gpu_a, AllocSize * sizeof(int));
if (Error != cudaSuccess) {
cout << "Za malo VRAMU" << endl;
free(a);
cudaFree(gpu_a);
return false;
}
//Wypelnianie tabeli FFFFFFFFFFFFF
for (big i = 0; i < AllocSize; i++)
{
a[i] = -1;
}
//Przygotowywanie siatki
#ifdef AutoThreads
uint blocks = GPU.multiProcessorCount * 2;
uint threads = ThreadsPerBlock;
#else
uint blocks = 256;
uint threads = 256;
#endif // AutoThreads
cout << "Specyfikacja GPU 0: " << endl;
cout << "Nazwa: \t" << GPU.name << endl;
cout << "Major: \t" << GPU.major << endl;
cout << "Mem: \t" << GPU.totalGlobalMem << "B, " << GPU.totalGlobalMem*0.000000001f << "GB" << endl;
cout << "Alloc: \t" << AllocSize * 4 << "B, " << (float)(AllocSize * 4 * 100) / GPU.totalGlobalMem << "%" << endl;
////#####################################################////
//// GŁÓWNY ALGORYTM ////
////#####################################################////
memoryset << < blocks, threads >> > (gpu_a, num, AllocSize);
//Zmienne pomocnicze
big i = 2, // iterator
ii = 0, // kwadrat i
bii = 0, // (i-1)^2
size = 0, // ilosc bajtów do skopiowania
adress = 0; // adres od którego ma zaczac sie kopiowanie
for (; i <= sqrtNum; i++)//wykonaj na i od 2 do sqrtNum
{
if (Pos(a, i)) { //Jezeli A[i] == true:
ii = i * i;
//Wytnij wielokrotnosci i
CutMultiples << < blocks, threads, 0, Stream >> > (gpu_a, ii, i, num);
//Oblicz adres od ktorego ma zaczac sie kopiowanie i ilosc bajtow do skopiowania
size = ii / 32 - bii / 32 + 1;
adress = bii / 32;
bii = ii;
#ifdef DEBUG
Error = cudaGetLastError();
if (Error != cudaSuccess) {
GetError(Error);
free(a);
cudaFree(gpu_a);
return -1;
}
int* currentadress = &a[adress];
int* endadress = &a[AllocSize - 1];
#endif // DEBUG
//Kopiowanie
cudaMemcpyAsync(&a[adress], &gpu_a[adress], size * sizeof(int), cudaMemcpyDeviceToHost, Stream);
}
}
//Ostatnia iteracja nie kopjuje wiec skopjuj od (i-1)^2 do konca tabeli
i -= 1;
ii = i * i;
size = ii / 32 - bii / 32 + 1;
i -= 1;
bii = i * i;
adress = AllocSize - size;
cudaMemcpy(&a[adress], &gpu_a[adress], size * sizeof(int), cudaMemcpyDeviceToHost);
//Zmierz czas
auto te = high_resolution_clock::now();
auto GPU_Time = duration_cast<milliseconds>(te - ts);
//Wpisz wyniki do tablicy dynamicznej
Array.clear();
uint counter = 0;
for (big i = 2; i < num; i++)
{
if (Pos(a, i)) {
Array.push_back(i);
}
if (counter > Settings.toShow) {
break;
}
counter++;
}
if (Settings.ShowLast) {
for (big i = Settings.num - 1; i > 0; i--)
{
if (Pos(a, i)) {
Array.push_back(i);
break;
}
}
}
free(a);
cudaFree(gpu_a);
return GPU_Time.count();
}
unsigned big Sito_CPU(const big num, vector<uint>&Array, Modes Settings) {
auto ts = high_resolution_clock::now();
//Tablica boolowska
BinaryArry2d binArray(num + 1);
//Wybierz algorytm
switch (Settings.Algoritm)
{
case 1:
SieveOfEratostenes(num, binArray);
break;
case 2:
SieveOfAtkin(num, binArray);
break;
case 3:
SieveOfSundaram(num, binArray);
break;
default:
break;
}
//Zmierz czas
auto te = high_resolution_clock::now();
auto CPU_Time = duration_cast<milliseconds>(te - ts);
//Wpisz wyniki do tabeli dynamicznej
Array.clear();
uint counter = 0;
if (Settings.Algoritm != 3) {
for (uint i = 2; i < num; i++)
{
if (binArray[i]) {
Array.push_back(i);
}
if (counter > Settings.toShow) {
break;
}
counter++;
}
if (Settings.ShowLast) {
for (int i = Settings.num; i > 0; i--)
{
if (binArray[i]) {
Array.push_back(i);
break;
}
}
}
}
else {
Array.push_back(2);
for (uint i = 1; i < (num - 2) / 2; i++)
{
if (!binArray[i]) {
Array.push_back(2 * i + 1);
}
if (counter > Settings.toShow) {
break;
}
counter++;
}
if (Settings.ShowLast) {
for (big i = (num - 2) / 2; i > 0; i--)
{
if (!binArray[i]) {
Array.push_back(2 * i + 1);
break;
}
}
}
}
return (big)CPU_Time.count();
}
Modes Load() {
cout << "******************************************" << endl;
cout << "*********SUPER SPEED SIEVE ON GPU*********" << endl;
cout << "*************M. Zlotorowicz***************" << endl;
cout << "******************************************" << endl;
cout << endl;
#ifndef Default
cout << "Ktorego urzadzenia chcesz uzyc?" << endl;
cout << "1. GPU" << endl;
cout << "2. CPU" << endl;
cout << "3. GPU i CPU" << endl;
Modes settings;
int dev;
do {
cin >> dev;
} while (dev > 3);
switch (dev)
{
case 1:
settings.GPU = true;
break;
case 2:
settings.CPU = true;
break;
case 3:
settings.CPU = true;
settings.GPU = true;
break;
default:
break;
}
if (settings.CPU) {
cout << "Jakiego Sita chcesz uzyc (CPU)?" << endl;
cout << "1. Erastotenesa" << endl;
cout << "2. Atkina" << endl;
cout << "3. Sundarama" << endl;
do {
cin >> settings.Algoritm;
} while (settings.Algoritm > 3);
}
else {
settings.Algoritm = -1;
}
string NUM;
cout << "Podaj zakres do sprawdzenia(MAX: 2147483648 (31T):" << endl;
cout << "LiczbaGB - Tyle liczb by zmiescily sie w podanej liczbe GB" << endl;
cout << "LiczbaZ - 1 i Z zer" << endl;
cout << "LiczbaT - 2^T potegi" << endl;
cin >> NUM;
size_t at = NUM.find("GB", 0);
if (at != string::npos) {
settings.num = stoi(NUM.substr(0, NUM.size() - 2)) * 8000000000;
}
else {
size_t at = NUM.find("Z", 0);
if (at != string::npos) {
settings.num = 1 * pow(10, stoi(NUM.substr(0, NUM.size() - 1)));
}
else {
size_t at = NUM.find("T", 0);
if (at != string::npos) {
settings.num = pow(2, stoi(NUM.substr(0, NUM.size() - 1)));
}
else {
settings.num = stoll(NUM);
}
}
}
cout << settings.num << " Liczb" << endl;
if (settings.num > 60000000000 && settings.CPU) {
cout << "UWAGA! dla tak duzego n CPU moze liczyc naprawde dlugi czas..." << endl;
}
cout << "Podaj ilosc Elementow do wyswietlenia" << endl;
cin >> settings.toShow;
cout << "Czy chcesz zobaczyc Ostatnia liczbe pierwsza? (t/n)" << endl;
char t;
cin >> t;
if (t == 't') {
settings.ShowLast = true;
}
else {
settings.ShowLast = false;
}
cout << "Wyswietlic w rzedach? (t/n)" << endl;
cin >> t;
if (t == 't') {
settings.ShowinRows = true;
}
else {
settings.ShowinRows = false;
}
#else //Default
Modes settings;
settings.Algoritm = 2;
settings.CPU = true;
settings.GPU = true;
settings.num = DefaultNum;
settings.ShowLast = true;
settings.toShow = DefaultToShow;
settings.ShowinRows = true;
#endif
return settings;
}
void Show(vector<uint>primesGPU, vector<uint>primesCPU, Modes settings, big CPU_time, big GPU_time) {
if (settings.GPU) {
cout << "GPU";
}
if (settings.CPU) {
cout << "\t" << "CPU";
}
cout << endl;
if (settings.GPU && settings.CPU) {
cout << "GPU JEST SZYBSZE OD CPU " << (float)CPU_time / GPU_time << " razy. " << endl;
}
if (settings.GPU) {
cout << GPU_time << "ms";
}
if (settings.CPU) {
cout << "\t" << CPU_time << "ms";
}
cout << endl;
if (!settings.ShowinRows) {
if (settings.GPU) {
cout << "GPU: [";
for (auto a : primesGPU) {
cout << a << ", ";
}
cout << "]";
}
if (settings.CPU) {
cout << "CPU: [";
for (auto a : primesCPU) {
cout << a << ", ";
}
cout << "]";
}
}
else {
auto i = primesGPU.begin(), j = primesCPU.begin();
for (;;)
{
if (settings.GPU) {
cout << *i;
}
if (settings.CPU) {
cout << "\t" << *j;
}
if (i != primesGPU.end()) {
i++;
}
if (j != primesCPU.end()) {
j++;
}
if (i == primesGPU.end() || j == primesCPU.end()) {
break;
}
cout << endl;
}
}
cout << endl;
}
int main(void)
{
vector<uint>primesGPU;
vector<uint>primesCPU;
big CPU_time = -1, GPU_time = -1;
Modes settings = Load();
cout << endl;
cout << endl;
if (settings.GPU) {
cout << "GPU Rozpoczyna Prace" << endl;
GPU_time = Sito_GPU(settings.num, primesGPU, settings);
cout << "GPU Zakonczylo Prace" << endl;
}
if (settings.CPU) {
cout << "CPU Rozpoczyna Prace" << endl;
CPU_time = Sito_CPU(settings.num, primesCPU, settings);
cout << "CPU Zakonczylo Prace" << endl;
}
Show(primesGPU, primesCPU, settings, CPU_time, GPU_time);
system("pause");
return 0;
} |
eaa1a40c29ded09ca2f493c99cd4e1fe3f0e9c73.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "print_int.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int leng = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
print_int), dim3(gridBlock),dim3(threadBlock), 0, 0, x,leng);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
print_int), dim3(gridBlock),dim3(threadBlock), 0, 0, x,leng);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
print_int), dim3(gridBlock),dim3(threadBlock), 0, 0, x,leng);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | eaa1a40c29ded09ca2f493c99cd4e1fe3f0e9c73.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "print_int.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int leng = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
print_int<<<gridBlock,threadBlock>>>(x,leng);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
print_int<<<gridBlock,threadBlock>>>(x,leng);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
print_int<<<gridBlock,threadBlock>>>(x,leng);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
197266433bdafd13c4ab2a89717e82f85411b964.hip | // !!! This is a file automatically generated by hipify!!!
/*
c_team
10 29
cuda A*B+C
+cublas
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
#define IDX2C(i,j,ld) (((j)*(ld))+( i )) // index
#define m 6 // a mxk
#define n 4 // b kxn
#define k 5 // c mxn
int main(void) {
hipError_t cudaStat; // hipMalloc status
hipblasStatus_t stat; // CUBLAS functions status
hipblasHandle_t handle; // CUBLAS context
int i, j; // i index j index
//
float* a;
float* b;
float* c;
// .
a = (float*)malloc(m*k * sizeof(float)); // host memory for a
b = (float*)malloc(k*n * sizeof(float)); // host memory for b
c = (float*)malloc(m*n * sizeof(float)); // host memory for c
// cublas cloumn major
// a .
int ind = 11; // 1 1
for (j = 0; j < k; j++) {
for (i = 0; i < m; i++) {
a[IDX2C(i, j, m)] = (float)ind++;
}
}
// 11,17,23,29,35
// 12,18,24,30,36
// 13,19,25,31,37
// 14,20,26,32,38
// a
printf("a:\n");
for (i = 0; i < m; i++) {
for (j = 0; j < k; j++)
{
printf("%5.0f", a[IDX2C(i, j, m)]);
}
printf("\n");
}
// b
ind = 11; // 1 1
for (j = 0; j < n; j++) {
for (i = 0; i < k; i++) {
b[IDX2C(i, j, k)] = (float)ind++;
}
}
//b:
//11,16,21,26
//12,17,22,27
//13,18,23,28
//14,19,24,29
//15,20,25,30
printf("b:\n");
for (i = 0; i < k; i++) {
for (j = 0; j < n; j++) {
printf("%5.0f", b[IDX2C(i, j, k)]);
}
printf("\n");
}
ind = 11; // 1 1
for(j=0;j<n;j++){
for(i=0;i<m;i++){
c[IDX2C(i,j,m)]=(float)ind++;
}
}
//c:
//11,17,23,29
//12,18,24,30
//13,19,25,31
//14,20,26,32
//15,21,27,33
//16,22,28,34
printf("c:\n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
printf("%5.0f", c[IDX2C(i, j, m)]);
}
printf("\n");
}
// gpu
float* d_a;
float* d_b;
float* d_c;
// gpu
cudaStat=hipMalloc((void**)&d_a,m*k*sizeof(*a));
cudaStat=hipMalloc((void**)&d_b,k*n*sizeof(*b));
cudaStat=hipMalloc((void**)&d_c,m*n*sizeof(*c));
stat = hipblasCreate(&handle); // cublas
// gpu
stat = hipblasSetMatrix(m, k, sizeof(*a), a, m, d_a, m);//a -> d_a
stat = hipblasSetMatrix(k,n,sizeof(*b),b,k,d_b,k);//b -> d_b
stat = hipblasSetMatrix(m,n,sizeof(*c),c,m,d_c,m);//c -> d_c
// matrix -matrix multiplication: d_c = al*d_a*d_b + bet*d_c
// d_a -mxk matrix , d_b -kxn matrix , d_c -mxn matrix;
// C=al*A*B+C
// C 0 A*B
// C A*B+C .
// al,bet scalar .
float al = 1.0f; // al=1
float bet=1.0f; //bet=1
stat = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &al, d_a, m, d_b, k, &bet, d_c, m);
stat = hipblasGetMatrix(m, n, sizeof(*c), d_c, m, c, m); //cp d_c->c
printf("c after Sgemm :\n");
for(i=0;i<m;i++){
for(j=0;j<n;j++){
printf("%7.0f",c[IDX2C(i,j,m)]); //print c after Sgemm
}
printf("\n");
}
// gpu
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipblasDestroy(handle); //
// cpu
free(a);
free(b);
free(c);
return 0;
}
| 197266433bdafd13c4ab2a89717e82f85411b964.cu |
/*
병렬프로그래밍 c_team
10월 29일 과제
cuda를 이용하여 행렬 A*B+C 연산 구현
+cublas 라이브러리를 사용할 것
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#define IDX2C(i,j,ld) (((j)*(ld))+( i )) // 메모리 index
#define m 6 // a 는 mxk 행렬
#define n 4 // b 는 kxn 행렬
#define k 5 // c 는 mxn 행렬
int main(void) {
cudaError_t cudaStat; // cudaMalloc status
cublasStatus_t stat; // CUBLAS functions status
cublasHandle_t handle; // CUBLAS context
int i, j; // i는 행의 index j는 열의 index
// 행열 메모리 주소를 가리킬 포인터
float* a;
float* b;
float* c;
// 메모리를 행렬의 크기만큼 할당한다.
a = (float*)malloc(m*k * sizeof(float)); // host memory for a
b = (float*)malloc(k*n * sizeof(float)); // host memory for b
c = (float*)malloc(m*n * sizeof(float)); // host memory for c
// cublas 행렬곱 연산은 cloumn major이기 때문에
// 행렬 a를 열 기준으로 값을 저장한다.
int ind = 11; // 행렬의 1행 1열의 값
for (j = 0; j < k; j++) {
for (i = 0; i < m; i++) {
a[IDX2C(i, j, m)] = (float)ind++;
}
}
// 11,17,23,29,35
// 12,18,24,30,36
// 13,19,25,31,37
// 14,20,26,32,38
// a 행렬 출력
printf("a:\n");
for (i = 0; i < m; i++) {
for (j = 0; j < k; j++)
{
printf("%5.0f", a[IDX2C(i, j, m)]);
}
printf("\n");
}
// b행렬
ind = 11; // 행렬 1행 1열의 값
for (j = 0; j < n; j++) {
for (i = 0; i < k; i++) {
b[IDX2C(i, j, k)] = (float)ind++;
}
}
//b:
//11,16,21,26
//12,17,22,27
//13,18,23,28
//14,19,24,29
//15,20,25,30
printf("b:\n");
for (i = 0; i < k; i++) {
for (j = 0; j < n; j++) {
printf("%5.0f", b[IDX2C(i, j, k)]);
}
printf("\n");
}
ind = 11; // 1행 1열의 값
for(j=0;j<n;j++){
for(i=0;i<m;i++){
c[IDX2C(i,j,m)]=(float)ind++;
}
}
//c:
//11,17,23,29
//12,18,24,30
//13,19,25,31
//14,20,26,32
//15,21,27,33
//16,22,28,34
printf("c:\n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
printf("%5.0f", c[IDX2C(i, j, m)]);
}
printf("\n");
}
// gpu로 값을 넘주는 작업
float* d_a;
float* d_b;
float* d_c;
// 행렬 크기만큼 gpu 메모리 할당
cudaStat=cudaMalloc((void**)&d_a,m*k*sizeof(*a));
cudaStat=cudaMalloc((void**)&d_b,k*n*sizeof(*b));
cudaStat=cudaMalloc((void**)&d_c,m*n*sizeof(*c));
stat = cublasCreate(&handle); // cublas 초기화
// 행렬의 값을 gpu메모리로 복사
stat = cublasSetMatrix(m, k, sizeof(*a), a, m, d_a, m);//a -> d_a
stat = cublasSetMatrix(k,n,sizeof(*b),b,k,d_b,k);//b -> d_b
stat = cublasSetMatrix(m,n,sizeof(*c),c,m,d_c,m);//c -> d_c
// matrix -matrix multiplication: d_c = al*d_a*d_b + bet*d_c
// d_a -mxk matrix , d_b -kxn matrix , d_c -mxn matrix;
// 사용하는 함수는 C=al*A*B+C 결과를 가진다
// 따라서 C가 0행렬이라면 A*B의 결과만
// C의 값이 존재한다면 A*B+C의 결과르 가진다.
// al,bet는 행렬의 scalar 이다.
float al = 1.0f; // al=1
float bet=1.0f; //bet=1
stat = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &al, d_a, m, d_b, k, &bet, d_c, m);
stat = cublasGetMatrix(m, n, sizeof(*c), d_c, m, c, m); //cp d_c->c
printf("c after Sgemm :\n");
for(i=0;i<m;i++){
for(j=0;j<n;j++){
printf("%7.0f",c[IDX2C(i,j,m)]); //print c after Sgemm
}
printf("\n");
}
// gpu 메모리 헤재
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cublasDestroy(handle); //쿠다 명령어 삭제
// cpu 메모리 헤제
free(a);
free(b);
free(c);
return 0;
}
|
bb2132efb477d1d190b305c890d1011395f9e1f9.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/device/cuda_util.h"
namespace oneflow {
template<>
void Memcpy<DeviceType::kGPU>(DeviceCtx* ctx, void* dst, const void* src, size_t sz,
hipMemcpyKind kind) {
if (dst == src) { return; }
CudaCheck(hipMemcpyAsync(dst, src, sz, kind, ctx->cuda_stream()));
}
template<>
void Memset<DeviceType::kGPU>(DeviceCtx* ctx, void* dst, const char value, size_t sz) {
CudaCheck(hipMemsetAsync(dst, value, sz, ctx->cuda_stream()));
}
} // namespace oneflow
| bb2132efb477d1d190b305c890d1011395f9e1f9.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/device/cuda_util.h"
namespace oneflow {
template<>
void Memcpy<DeviceType::kGPU>(DeviceCtx* ctx, void* dst, const void* src, size_t sz,
cudaMemcpyKind kind) {
if (dst == src) { return; }
CudaCheck(cudaMemcpyAsync(dst, src, sz, kind, ctx->cuda_stream()));
}
template<>
void Memset<DeviceType::kGPU>(DeviceCtx* ctx, void* dst, const char value, size_t sz) {
CudaCheck(cudaMemsetAsync(dst, value, sz, ctx->cuda_stream()));
}
} // namespace oneflow
|
700378819e9372e7b9a96ace5ee81c6ac358117c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _NEURALNET_KERNEL_H_
#define _NEURALNET_KERNEL_H_
#include <stdio.h>
#include "neuralnet.h"
// TODO: place each data vector (row of input_d + bias term) within constant memory
// Neural net relu matrix multiplication kernel
// - height x width = HIDDEN_UNITS x FEATURES+1
// - input = 1 x width
// - weights = flattened matrix = height x width --> NEED TO TRANSPOSE
// - returns output = relu(input * transpose(weights)) = 1 x height
__global__ void ReluMulKernel(float *input, float *weights, float *output)
{
// TODO: input vector --> constant memory
// local const values
const int height = HIDDEN_UNITS;
const int width = FEATURES + 1;
// index into flattened weights matrix
int tx = threadIdx.x;
int i = blockDim.x * blockIdx.x + tx;
// index into the input vector
int row = i / width;
// index into the output vector
int col = i % width;
// local products vector
__shared__ float local_output[height];
if ((i < height * width) && (row < height))
{
// local_output = input * transpose(weights)
atomicAdd(&local_output[row], input[col] * weights[i]);
__syncthreads(); // wait for everyone to add to local_output
if (tx < height)
{
// integrate the local products with global
atomicAdd(&output[tx], local_output[tx]);
}
__syncthreads(); // wait for local products -> global
// apply relu function
if ((i < height) && (output[i] < 0.0)) output[i] = 0.0; // TODO: try to reduce divergence here?
} // END if within weights
}
// Neural net softmax matrix multiplication kernel
// - height x width = HIDDEN_UNITS+1 x NO_OF_CLASSES
// - input = 1 x (height-1)
// - weights = flattened matrix = height x width
// - returns output = softmax(input * weights) = 1 x width
__global__ void SoftmaxMulKernel(float *input, float *weights, float *output, int start)
{
// TODO: input vector --> constant memory
// local const values
const int height = HIDDEN_UNITS + 1;
const int width = NO_OF_CLASSES;
// index into flattened weights matrix
int i = blockDim.x * blockIdx.x + threadIdx.x;
// index into the input vector
int row = i / width;
// index into the output vector
int col = i % width;
// local products vector
__shared__ float local_output[width];
// if (i < width){ output[i] = 10.0;}
if ((i < height * width) && (row < height))
{
//atomicAdd(&output[col], input[row] * weights[i]);
if (row == 0)
{
// apply bias
atomicAdd(&local_output[col], BIAS * weights[i]);
}
else
{
// adjust index into input since input has one less element
atomicAdd(&local_output[col], input[row-1] * weights[i]);
}
__syncthreads(); // wait for everyone to add to local_output
if (threadIdx.x < width)
{
// integrate the local products with global
atomicAdd(&output[start + i], local_output[i]);
__syncthreads();
}
// apply softmax function
if (i < width)
{
__shared__ float sum;
// 1. store the exp() of each output value
__shared__ float exp_vector[width];
exp_vector[i] = expf(output[start + i]);
// 2. calculate the sum of all the exponent values
// --> width < BLOCK_SIZE, so this will only be in the first block
if (threadIdx.x == 0) sum = 0.0;
__syncthreads(); // wait for sum to be zeroed
atomicAdd(&sum, exp_vector[i]);
// 3. store new output value
output[start + i] = exp_vector[i] / sum;
} // END if within output
} // END if within weights
}
// output = delta1
__global__ void BackPropMulKernel1(float *A, float *z, float *output)
{
//delta v = transpose(z) * A
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i < (HIDDEN_UNITS+1)) {
for(int k = 0; k < NO_OF_CLASSES; k++) {
if(i == 0)
output[i*NO_OF_CLASSES + k] = A[k];
else
output[i*NO_OF_CLASSES + k] = z[i-1] * A[k];
}
}
}
__global__ void BackPropMulKernel2(float *A, float *data, float *v, float *deltaw, float *w, float *z)
{
int h = threadIdx.x + blockDim.x * blockIdx.x;
if(h < HIDDEN_UNITS){
float check = 0;
for(int i = 0 ; i < (FEATURES+1) ; i++){
check += data[i]*w[h*(FEATURES+1)+i];
}
if(check > 0) {
float sum = 0;
for(int k = 0 ; k < NO_OF_CLASSES ; k++){
sum += A[k]*v[h*NO_OF_CLASSES + k];
}
float temp = eta*sum*z[h]*(1-z[h]);
for(int j = 0 ; j < (FEATURES+1) ; j++){
deltaw[h*(FEATURES+1) + j] = temp*data[j];
}
} else {
for(int j = 0 ; j < (FEATURES+1) ; j++){
deltaw[h*(FEATURES+1) + j] = 0;
}
}
}
}
// vectorAddition: matrix = matrix + deltaMatrix
// - matrix = flattened with length N
__global__ void vectorAddition(float *matrix, float *deltaMatrix, int N)
{
// index into the vectors
/*int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) matrix[i] += deltaMatrix[i];*/
// initial index using thread coarsening
int i = COURSENESS*(blockDim.x * blockIdx.x) + threadIdx.x;
int count = 0;
while (i < N && count < COURSENESS)
{
matrix[i] += deltaMatrix[i];
// move to next block
i += blockDim.x;
count++;
}
}
__global__ void vectorSubtraction(float* input, float* output, float* delta, int N, int start)
{
// index into the vectors
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) output[i] = input[start + i] - delta[start + i];
}
void getAccuracy(float *input_d, float *onehotR_d, float *R_d, float *w, float *v)
{
//initialize deltaw
float *deltaw;
hipMalloc((void**)&deltaw, (FEATURES+1)*HIDDEN_UNITS*sizeof(float));
hipMemset(deltaw, 0, (FEATURES+1)*HIDDEN_UNITS*sizeof(float));
//initialize deltav
float *deltav;
hipMalloc((void**)&deltav, (HIDDEN_UNITS+1)*NO_OF_CLASSES*sizeof(float));
hipMemset(deltav, 0, (HIDDEN_UNITS+1)*NO_OF_CLASSES*sizeof(float));
//initialize z
float *z;
hipMalloc((void**)&z, HIDDEN_UNITS*sizeof(float));
hipMemset(z, 0, HIDDEN_UNITS*sizeof(float));
//initialize y
/*
TODO: change y to a matrix of size NUMBER_ELEMENTS*NO_OF_CLASSES
*/
float *y;
hipMalloc((void**)&y, NUM_ELEMENTS*NO_OF_CLASSES*sizeof(float));
hipMemset(y, 0, NUM_ELEMENTS*NO_OF_CLASSES*sizeof(float));
float *A;
hipMalloc((void**)&A, NO_OF_CLASSES*sizeof(float));
hipMemset(A, 0, NO_OF_CLASSES*sizeof(float));
int i = 0;
while(i<EPOCH)
{
//iterate through the data
for (int k = 0; k<NUM_ELEMENTS; k++)
{
// point to portion of the input array (within device global memory)
float *data = &input_d[k*(FEATURES+1)];
int blocks = 0;
//kernel call for z = relu(x * w^T) --> data = x, w = flattened matrix
// divide w up into blocks -- one thread per matrix element
blocks = ((FEATURES+1)*HIDDEN_UNITS) / BLOCK_SIZE;
if (((FEATURES+1)*HIDDEN_UNITS) % BLOCK_SIZE != 0) blocks++;
hipLaunchKernelGGL(( ReluMulKernel), dim3(blocks), dim3(BLOCK_SIZE) , 0, 0, data, w, z);
hipDeviceSynchronize();
//kernel call for y = softmax(z * v) --> v = flattened matrix
// divide v up into blocks -- one thread per matrix element
blocks = ((HIDDEN_UNITS+1)*NO_OF_CLASSES) / BLOCK_SIZE;
if (((HIDDEN_UNITS+1)*NO_OF_CLASSES) % BLOCK_SIZE != 0) blocks++;
hipLaunchKernelGGL(( SoftmaxMulKernel), dim3(blocks), dim3(BLOCK_SIZE) , 0, 0, z, v, y, k*NO_OF_CLASSES);
hipDeviceSynchronize();
//calculate A = eta*(one hot R - one hot Y) -- no kernel call for this, will be a vector
/* for(int i = 0 ; i < NO_OF_CLASSES ; i++)
{
A[i] = onehotR_d[k * NO_OF_CLASSES + i] - y[k*NO_OF_CLASSES + i];
}
*/
// (input, output, delta, N, start)
hipLaunchKernelGGL(( vectorSubtraction), dim3(1),dim3(BLOCK_SIZE), 0, 0, onehotR_d, A, y, NO_OF_CLASSES, k*NO_OF_CLASSES);
hipDeviceSynchronize();
//kernel call for delta v
int kernel1_grid_size = (int)ceil((float)(HIDDEN_UNITS+1)/BLOCK_SIZE);
hipLaunchKernelGGL(( BackPropMulKernel1), dim3(kernel1_grid_size),dim3(BLOCK_SIZE), 0, 0, A, z, deltav);
hipDeviceSynchronize();
//kernel call for delta w
int kernel2_grid_size = (int)ceil((float)HIDDEN_UNITS/BLOCK_SIZE);
hipLaunchKernelGGL(( BackPropMulKernel2), dim3(kernel2_grid_size),dim3(BLOCK_SIZE), 0, 0, A, data, v, deltaw,w,z);
hipDeviceSynchronize();
//kernel call for updating v values
// using thread coarsening
blocks = ((HIDDEN_UNITS+1)*NO_OF_CLASSES) / (BLOCK_SIZE*COURSENESS);
if (((HIDDEN_UNITS+1)*NO_OF_CLASSES) % (BLOCK_SIZE*COURSENESS) != 0) blocks++;
hipLaunchKernelGGL(( vectorAddition) , dim3(blocks) , dim3(BLOCK_SIZE) , 0, 0, v, deltav, (HIDDEN_UNITS+1)*NO_OF_CLASSES);
hipDeviceSynchronize();
//kernel call for updating w values
// using thread coarsening
blocks = ((FEATURES+1)*HIDDEN_UNITS) / (BLOCK_SIZE*COURSENESS);
if (((FEATURES+1)*HIDDEN_UNITS) % (BLOCK_SIZE*COURSENESS) != 0) blocks++;
hipLaunchKernelGGL(( vectorAddition) , dim3(blocks) , dim3(BLOCK_SIZE) , 0, 0, w, deltaw, (FEATURES+1)*HIDDEN_UNITS);
hipDeviceSynchronize();
}
i++;
}
//calculate error rate
float error = calculateErrorRate(R_d, y);
}
float calculateErrorRate(float * r, float *y)
{
int count = 0;
for(int i = 0; i < NUM_ELEMENTS; i++){
if(r[i]==(float)predictY(&y[i * NO_OF_CLASSES])){
count++;
}
}
return (float)count / NUM_ELEMENTS;
}
int predictY(float *y){
int maxindex = 0;
float max = y[0];
for(int j = 1; j<NO_OF_CLASSES; j++){
if(y[j]>max){
max = y[j];
maxindex = j;
}
}
return maxindex;
}
void allocateDeviceArray(float **deviceArray, unsigned int size)
{
hipMalloc((void**)deviceArray, size*sizeof(float));
}
void copyDataHostToDevice(float *deviceArray, float *hostArray, unsigned int size)
{
hipMemcpy(deviceArray, hostArray , size*sizeof(float), hipMemcpyHostToDevice);
}
void copyDataDeviceToHost(float *deviceArray, float *hostArray, unsigned int size)
{
hipMemcpy(hostArray, deviceArray , size*sizeof(float), hipMemcpyDeviceToHost);
}
void freeMemory(float *deviceArray)
{
hipFree(&deviceArray);
}
#endif // #ifndef _NEURALNET_KERNEL_H_
| 700378819e9372e7b9a96ace5ee81c6ac358117c.cu | #ifndef _NEURALNET_KERNEL_H_
#define _NEURALNET_KERNEL_H_
#include <stdio.h>
#include "neuralnet.h"
// TODO: place each data vector (row of input_d + bias term) within constant memory
// Neural net relu matrix multiplication kernel
// - height x width = HIDDEN_UNITS x FEATURES+1
// - input = 1 x width
// - weights = flattened matrix = height x width --> NEED TO TRANSPOSE
// - returns output = relu(input * transpose(weights)) = 1 x height
__global__ void ReluMulKernel(float *input, float *weights, float *output)
{
// TODO: input vector --> constant memory
// local const values
const int height = HIDDEN_UNITS;
const int width = FEATURES + 1;
// index into flattened weights matrix
int tx = threadIdx.x;
int i = blockDim.x * blockIdx.x + tx;
// index into the input vector
int row = i / width;
// index into the output vector
int col = i % width;
// local products vector
__shared__ float local_output[height];
if ((i < height * width) && (row < height))
{
// local_output = input * transpose(weights)
atomicAdd(&local_output[row], input[col] * weights[i]);
__syncthreads(); // wait for everyone to add to local_output
if (tx < height)
{
// integrate the local products with global
atomicAdd(&output[tx], local_output[tx]);
}
__syncthreads(); // wait for local products -> global
// apply relu function
if ((i < height) && (output[i] < 0.0)) output[i] = 0.0; // TODO: try to reduce divergence here?
} // END if within weights
}
// Neural net softmax matrix multiplication kernel
// - height x width = HIDDEN_UNITS+1 x NO_OF_CLASSES
// - input = 1 x (height-1)
// - weights = flattened matrix = height x width
// - returns output = softmax(input * weights) = 1 x width
__global__ void SoftmaxMulKernel(float *input, float *weights, float *output, int start)
{
// TODO: input vector --> constant memory
// local const values
const int height = HIDDEN_UNITS + 1;
const int width = NO_OF_CLASSES;
// index into flattened weights matrix
int i = blockDim.x * blockIdx.x + threadIdx.x;
// index into the input vector
int row = i / width;
// index into the output vector
int col = i % width;
// local products vector
__shared__ float local_output[width];
// if (i < width){ output[i] = 10.0;}
if ((i < height * width) && (row < height))
{
//atomicAdd(&output[col], input[row] * weights[i]);
if (row == 0)
{
// apply bias
atomicAdd(&local_output[col], BIAS * weights[i]);
}
else
{
// adjust index into input since input has one less element
atomicAdd(&local_output[col], input[row-1] * weights[i]);
}
__syncthreads(); // wait for everyone to add to local_output
if (threadIdx.x < width)
{
// integrate the local products with global
atomicAdd(&output[start + i], local_output[i]);
__syncthreads();
}
// apply softmax function
if (i < width)
{
__shared__ float sum;
// 1. store the exp() of each output value
__shared__ float exp_vector[width];
exp_vector[i] = expf(output[start + i]);
// 2. calculate the sum of all the exponent values
// --> width < BLOCK_SIZE, so this will only be in the first block
if (threadIdx.x == 0) sum = 0.0;
__syncthreads(); // wait for sum to be zeroed
atomicAdd(&sum, exp_vector[i]);
// 3. store new output value
output[start + i] = exp_vector[i] / sum;
} // END if within output
} // END if within weights
}
// output = delta1
__global__ void BackPropMulKernel1(float *A, float *z, float *output)
{
//delta v = transpose(z) * A
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i < (HIDDEN_UNITS+1)) {
for(int k = 0; k < NO_OF_CLASSES; k++) {
if(i == 0)
output[i*NO_OF_CLASSES + k] = A[k];
else
output[i*NO_OF_CLASSES + k] = z[i-1] * A[k];
}
}
}
__global__ void BackPropMulKernel2(float *A, float *data, float *v, float *deltaw, float *w, float *z)
{
int h = threadIdx.x + blockDim.x * blockIdx.x;
if(h < HIDDEN_UNITS){
float check = 0;
for(int i = 0 ; i < (FEATURES+1) ; i++){
check += data[i]*w[h*(FEATURES+1)+i];
}
if(check > 0) {
float sum = 0;
for(int k = 0 ; k < NO_OF_CLASSES ; k++){
sum += A[k]*v[h*NO_OF_CLASSES + k];
}
float temp = eta*sum*z[h]*(1-z[h]);
for(int j = 0 ; j < (FEATURES+1) ; j++){
deltaw[h*(FEATURES+1) + j] = temp*data[j];
}
} else {
for(int j = 0 ; j < (FEATURES+1) ; j++){
deltaw[h*(FEATURES+1) + j] = 0;
}
}
}
}
// vectorAddition: matrix = matrix + deltaMatrix
// - matrix = flattened with length N
__global__ void vectorAddition(float *matrix, float *deltaMatrix, int N)
{
// index into the vectors
/*int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) matrix[i] += deltaMatrix[i];*/
// initial index using thread coarsening
int i = COURSENESS*(blockDim.x * blockIdx.x) + threadIdx.x;
int count = 0;
while (i < N && count < COURSENESS)
{
matrix[i] += deltaMatrix[i];
// move to next block
i += blockDim.x;
count++;
}
}
__global__ void vectorSubtraction(float* input, float* output, float* delta, int N, int start)
{
// index into the vectors
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) output[i] = input[start + i] - delta[start + i];
}
void getAccuracy(float *input_d, float *onehotR_d, float *R_d, float *w, float *v)
{
//initialize deltaw
float *deltaw;
cudaMalloc((void**)&deltaw, (FEATURES+1)*HIDDEN_UNITS*sizeof(float));
cudaMemset(deltaw, 0, (FEATURES+1)*HIDDEN_UNITS*sizeof(float));
//initialize deltav
float *deltav;
cudaMalloc((void**)&deltav, (HIDDEN_UNITS+1)*NO_OF_CLASSES*sizeof(float));
cudaMemset(deltav, 0, (HIDDEN_UNITS+1)*NO_OF_CLASSES*sizeof(float));
//initialize z
float *z;
cudaMalloc((void**)&z, HIDDEN_UNITS*sizeof(float));
cudaMemset(z, 0, HIDDEN_UNITS*sizeof(float));
//initialize y
/*
TODO: change y to a matrix of size NUMBER_ELEMENTS*NO_OF_CLASSES
*/
float *y;
cudaMalloc((void**)&y, NUM_ELEMENTS*NO_OF_CLASSES*sizeof(float));
cudaMemset(y, 0, NUM_ELEMENTS*NO_OF_CLASSES*sizeof(float));
float *A;
cudaMalloc((void**)&A, NO_OF_CLASSES*sizeof(float));
cudaMemset(A, 0, NO_OF_CLASSES*sizeof(float));
int i = 0;
while(i<EPOCH)
{
//iterate through the data
for (int k = 0; k<NUM_ELEMENTS; k++)
{
// point to portion of the input array (within device global memory)
float *data = &input_d[k*(FEATURES+1)];
int blocks = 0;
//kernel call for z = relu(x * w^T) --> data = x, w = flattened matrix
// divide w up into blocks -- one thread per matrix element
blocks = ((FEATURES+1)*HIDDEN_UNITS) / BLOCK_SIZE;
if (((FEATURES+1)*HIDDEN_UNITS) % BLOCK_SIZE != 0) blocks++;
ReluMulKernel<<< blocks, BLOCK_SIZE >>>(data, w, z);
cudaDeviceSynchronize();
//kernel call for y = softmax(z * v) --> v = flattened matrix
// divide v up into blocks -- one thread per matrix element
blocks = ((HIDDEN_UNITS+1)*NO_OF_CLASSES) / BLOCK_SIZE;
if (((HIDDEN_UNITS+1)*NO_OF_CLASSES) % BLOCK_SIZE != 0) blocks++;
SoftmaxMulKernel<<< blocks, BLOCK_SIZE >>>(z, v, y, k*NO_OF_CLASSES);
cudaDeviceSynchronize();
//calculate A = eta*(one hot R - one hot Y) -- no kernel call for this, will be a vector
/* for(int i = 0 ; i < NO_OF_CLASSES ; i++)
{
A[i] = onehotR_d[k * NO_OF_CLASSES + i] - y[k*NO_OF_CLASSES + i];
}
*/
// (input, output, delta, N, start)
vectorSubtraction<<<1,BLOCK_SIZE>>>(onehotR_d, A, y, NO_OF_CLASSES, k*NO_OF_CLASSES);
cudaDeviceSynchronize();
//kernel call for delta v
int kernel1_grid_size = (int)ceil((float)(HIDDEN_UNITS+1)/BLOCK_SIZE);
BackPropMulKernel1<<<kernel1_grid_size,BLOCK_SIZE>>>(A, z, deltav);
cudaDeviceSynchronize();
//kernel call for delta w
int kernel2_grid_size = (int)ceil((float)HIDDEN_UNITS/BLOCK_SIZE);
BackPropMulKernel2<<<kernel2_grid_size,BLOCK_SIZE>>>(A, data, v, deltaw,w,z);
cudaDeviceSynchronize();
//kernel call for updating v values
// using thread coarsening
blocks = ((HIDDEN_UNITS+1)*NO_OF_CLASSES) / (BLOCK_SIZE*COURSENESS);
if (((HIDDEN_UNITS+1)*NO_OF_CLASSES) % (BLOCK_SIZE*COURSENESS) != 0) blocks++;
vectorAddition <<< blocks , BLOCK_SIZE >>> (v, deltav, (HIDDEN_UNITS+1)*NO_OF_CLASSES);
cudaDeviceSynchronize();
//kernel call for updating w values
// using thread coarsening
blocks = ((FEATURES+1)*HIDDEN_UNITS) / (BLOCK_SIZE*COURSENESS);
if (((FEATURES+1)*HIDDEN_UNITS) % (BLOCK_SIZE*COURSENESS) != 0) blocks++;
vectorAddition <<< blocks , BLOCK_SIZE >>> (w, deltaw, (FEATURES+1)*HIDDEN_UNITS);
cudaDeviceSynchronize();
}
i++;
}
//calculate error rate
float error = calculateErrorRate(R_d, y);
}
float calculateErrorRate(float * r, float *y)
{
int count = 0;
for(int i = 0; i < NUM_ELEMENTS; i++){
if(r[i]==(float)predictY(&y[i * NO_OF_CLASSES])){
count++;
}
}
return (float)count / NUM_ELEMENTS;
}
int predictY(float *y){
int maxindex = 0;
float max = y[0];
for(int j = 1; j<NO_OF_CLASSES; j++){
if(y[j]>max){
max = y[j];
maxindex = j;
}
}
return maxindex;
}
void allocateDeviceArray(float **deviceArray, unsigned int size)
{
cudaMalloc((void**)deviceArray, size*sizeof(float));
}
void copyDataHostToDevice(float *deviceArray, float *hostArray, unsigned int size)
{
cudaMemcpy(deviceArray, hostArray , size*sizeof(float), cudaMemcpyHostToDevice);
}
void copyDataDeviceToHost(float *deviceArray, float *hostArray, unsigned int size)
{
cudaMemcpy(hostArray, deviceArray , size*sizeof(float), cudaMemcpyDeviceToHost);
}
void freeMemory(float *deviceArray)
{
cudaFree(&deviceArray);
}
#endif // #ifndef _NEURALNET_KERNEL_H_
|
03ba59d44336caa1af3519552631d80fea70c46f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_cooperative_groups.h>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/sequence.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/lists/lists_column_device_view.cuh>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/scan.h>
#include <type_traits>
#include "row_conversion.hpp"
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
#include <cuda/barrier>
#endif
#include <algorithm>
#include <cstdarg>
#include <cstdint>
#include <iostream>
#include <iterator>
#include <limits>
#include <tuple>
constexpr auto JCUDF_ROW_ALIGNMENT = 8;
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
constexpr auto NUM_TILES_PER_KERNEL_FROM_ROWS = 2;
constexpr auto NUM_TILES_PER_KERNEL_TO_ROWS = 2;
constexpr auto NUM_TILES_PER_KERNEL_LOADED = 2;
constexpr auto NUM_VALIDITY_TILES_PER_KERNEL = 8;
constexpr auto NUM_VALIDITY_TILES_PER_KERNEL_LOADED = 2;
constexpr auto MAX_BATCH_SIZE = std::numeric_limits<cudf::size_type>::max();
// needed to suppress warning about cuda::barrier
#pragma nv_diag_suppress static_var_with_dynamic_init
#endif
using namespace cudf;
using detail::make_device_uvector_async;
using rmm::device_uvector;
namespace cudf {
namespace jni {
namespace detail {
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
/************************************************************************
* This module converts data from row-major to column-major and from column-major
* to row-major. It is a transpose of the data of sorts, but there are a few
* complicating factors. They are spelled out below:
*
* Row Batches:
* The row data has to fit inside a
* cuDF column, which limits it to 2 gigs currently. The calling code attempts
* to keep the data size under 2 gigs, but due to padding this isn't always
* the case, so being able to break this up into multiple columns is necessary.
* Internally, this is referred to as the row batch, which is a group of rows
* that will fit into this 2 gig space requirement. There are typically 1 of
* these batches, but there can be 2.
*
* Async Memcpy:
* The CUDA blocks are using memcpy_async, which allows for the device to
* schedule memcpy operations and then wait on them to complete at a later
* time with a barrier. The recommendation is to double-buffer the work
* so that processing can occur while a copy operation is being completed.
* On Ampere or later hardware there is dedicated hardware to do this copy
* and on pre-Ampere it should generate the same code that a hand-rolled
* loop would generate, so performance should be the same or better than
* a hand-rolled kernel.
*
* Tile Info:
* Each CUDA block will work on NUM_TILES_PER_KERNEL_*_ROWS tile infos
* before exiting. It will have enough shared memory available to load
* NUM_TILES_PER_KERNEL_LOADED tiles at one time. The block will load
* as many tiles as it can fit into shared memory and then wait on the
* first tile to completely load before processing. Processing in this
* case means copying the data from shared memory back out to device
* memory via memcpy_async. This kernel is completely memory bound.
*
* Batch Data:
* This structure contains all the row batches and some book-keeping
* data necessary for the batches such as row numbers for the batches.
*
* Tiles:
* The tile info describes a tile of data to process. In a GPU with
* 48KB of shared memory each tile uses approximately 24KB of memory
* which equates to about 144 bytes in each direction. The tiles are
* kept as square as possible to attempt to coalesce memory operations.
* The taller a tile is the better coalescing of columns, but row
* coalescing suffers. The wider a tile is the better the row coalescing,
* but columns coalescing suffers. The code attempts to produce a square
* tile to balance the coalescing. It starts by figuring out the optimal
* byte length and then adding columns to the data until the tile is too
* large. Since rows are different width with different alignment
* requirements, this isn't typically exact. Once a width is found the
* tiles are generated vertically with that width and height and then
* the process repeats. This means all the tiles will be the same
* height, but will have different widths based on what columns they
* encompass. Tiles in a vertical row will all have the same dimensions.
*
* --------------------------------
* | 4 5.0f || True 8 3 1 |
* | 3 6.0f || False 3 1 1 |
* | 2 7.0f || True 7 4 1 |
* | 1 8.0f || False 2 5 1 |
* --------------------------------
* | 0 9.0f || True 6 7 1 |
* ...
************************************************************************/
/**
* @brief The CUDA blocks work on one or more tile_info structs of data.
* This structure defines the workspaces for the blocks.
*
*/
struct tile_info {
int start_col;
int start_row;
int end_col;
int end_row;
int batch_number;
__device__ inline size_type get_shared_row_size(size_type const *const col_offsets,
size_type const *const col_sizes) const {
return util::round_up_unsafe(col_offsets[end_col] + col_sizes[end_col] - col_offsets[start_col],
JCUDF_ROW_ALIGNMENT);
}
__device__ inline size_type num_cols() const { return end_col - start_col + 1; }
__device__ inline size_type num_rows() const { return end_row - start_row + 1; }
};
/**
* @brief Returning rows is done in a byte cudf column. This is limited in size by
* `size_type` and so output is broken into batches of rows that fit inside
* this limit.
*
*/
struct row_batch {
size_type num_bytes; // number of bytes in this batch
size_type row_count; // number of rows in the batch
device_uvector<size_type> row_offsets; // offsets column of output cudf column
};
/**
* @brief Holds information about the batches of data to be processed
*
*/
struct batch_data {
device_uvector<size_type> batch_row_offsets; // offset column of returned cudf column
device_uvector<size_type> d_batch_row_boundaries; // row numbers for the start of each batch
std::vector<size_type>
batch_row_boundaries; // row numbers for the start of each batch: 0, 1500, 2700
std::vector<row_batch> row_batches; // information about each batch such as byte count
};
struct row_offset_functor {
row_offset_functor(size_type fixed_width_only_row_size)
: _fixed_width_only_row_size(fixed_width_only_row_size){};
__device__ inline size_type operator()(int row_number, int tile_row_start) const {
return (row_number - tile_row_start) * _fixed_width_only_row_size;
}
size_type _fixed_width_only_row_size;
};
#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
/**
* @brief Copies data from row-based JCUDF format to column-based cudf format.
*
* This optimized version of the conversion is faster for fixed-width tables
* that do not have more than 100 columns.
*
* @param num_rows number of rows in the incoming table
* @param num_columns number of columns in the incoming table
* @param row_size length in bytes of each row
* @param input_offset_in_row offset to each row of data
* @param num_bytes total number of bytes in the incoming data
* @param output_data array of pointers to the output data
* @param output_nm array of pointers to the output null masks
* @param input_data pointing to the incoming row data
*/
__global__ void
copy_from_rows_fixed_width_optimized(const size_type num_rows, const size_type num_columns,
const size_type row_size, const size_type *input_offset_in_row,
const size_type *num_bytes, int8_t **output_data,
bitmask_type **output_nm, const int8_t *input_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// For simplicity we will refer to this as a row_group
// In practice we have found writing more than 4 columns of data per thread
// results in performance loss. As such we are using a 2 dimensional
// kernel in terms of threads, but not in terms of blocks. Columns are
// controlled by the y dimension (there is no y dimension in blocks). Rows
// are controlled by the x dimension (there are multiple blocks in the x
// dimension).
size_type const rows_per_group = blockDim.x;
size_type const row_group_start = blockIdx.x;
size_type const row_group_stride = gridDim.x;
size_type const row_group_end = (num_rows + rows_per_group - 1) / rows_per_group + 1;
extern __shared__ int8_t shared_data[];
// Because we are copying fixed width only data and we stride the rows
// this thread will always start copying from shared data in the same place
int8_t *row_tmp = &shared_data[row_size * threadIdx.x];
int8_t *row_vld_tmp = &row_tmp[input_offset_in_row[num_columns - 1] + num_bytes[num_columns - 1]];
for (auto row_group_index = row_group_start; row_group_index < row_group_end;
row_group_index += row_group_stride) {
// Step 1: Copy the data into shared memory
// We know row_size is always aligned with and a multiple of int64_t;
int64_t *long_shared = reinterpret_cast<int64_t *>(shared_data);
int64_t const *long_input = reinterpret_cast<int64_t const *>(input_data);
auto const shared_output_index = threadIdx.x + (threadIdx.y * blockDim.x);
auto const shared_output_stride = blockDim.x * blockDim.y;
auto const row_index_end = ::min(num_rows, ((row_group_index + 1) * rows_per_group));
auto const num_rows_in_group = row_index_end - (row_group_index * rows_per_group);
auto const shared_length = row_size * num_rows_in_group;
size_type const shared_output_end = shared_length / sizeof(int64_t);
auto const start_input_index = (row_size * row_group_index * rows_per_group) / sizeof(int64_t);
for (size_type shared_index = shared_output_index; shared_index < shared_output_end;
shared_index += shared_output_stride) {
long_shared[shared_index] = long_input[start_input_index + shared_index];
}
// Wait for all of the data to be in shared memory
__syncthreads();
// Step 2 copy the data back out
// Within the row group there should be 1 thread for each row. This is a
// requirement for launching the kernel
auto const row_index = (row_group_index * rows_per_group) + threadIdx.x;
// But we might not use all of the threads if the number of rows does not go
// evenly into the thread count. We don't want those threads to exit yet
// because we may need them to copy data in for the next row group.
uint32_t active_mask = __ballot_sync(0xffffffff, row_index < num_rows);
if (row_index < num_rows) {
auto const col_index_start = threadIdx.y;
auto const col_index_stride = blockDim.y;
for (auto col_index = col_index_start; col_index < num_columns;
col_index += col_index_stride) {
auto const col_size = num_bytes[col_index];
int8_t const *col_tmp = &(row_tmp[input_offset_in_row[col_index]]);
int8_t *col_output = output_data[col_index];
switch (col_size) {
case 1: {
col_output[row_index] = *col_tmp;
break;
}
case 2: {
int16_t *short_col_output = reinterpret_cast<int16_t *>(col_output);
short_col_output[row_index] = *reinterpret_cast<const int16_t *>(col_tmp);
break;
}
case 4: {
int32_t *int_col_output = reinterpret_cast<int32_t *>(col_output);
int_col_output[row_index] = *reinterpret_cast<const int32_t *>(col_tmp);
break;
}
case 8: {
int64_t *long_col_output = reinterpret_cast<int64_t *>(col_output);
long_col_output[row_index] = *reinterpret_cast<const int64_t *>(col_tmp);
break;
}
default: {
auto const output_offset = col_size * row_index;
// TODO this should just not be supported for fixed width columns, but just in case...
for (auto b = 0; b < col_size; b++) {
col_output[b + output_offset] = col_tmp[b];
}
break;
}
}
bitmask_type *nm = output_nm[col_index];
int8_t *valid_byte = &row_vld_tmp[col_index / 8];
size_type byte_bit_offset = col_index % 8;
int predicate = *valid_byte & (1 << byte_bit_offset);
uint32_t bitmask = __ballot_sync(active_mask, predicate);
if (row_index % 32 == 0) {
nm[word_index(row_index)] = bitmask;
}
} // end column loop
} // end row copy
// wait for the row_group to be totally copied before starting on the next row group
__syncthreads();
}
}
__global__ void copy_to_rows_fixed_width_optimized(
const size_type start_row, const size_type num_rows, const size_type num_columns,
const size_type row_size, const size_type *output_offset_in_row, const size_type *num_bytes,
const int8_t **input_data, const bitmask_type **input_nm, int8_t *output_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// We do not support copying a subset of the columns in a row yet, so we don't
// currently support a row that is wider than shared memory.
// For simplicity we will refer to this as a row_group
// In practice we have found reading more than 4 columns of data per thread
// results in performance loss. As such we are using a 2 dimensional
// kernel in terms of threads, but not in terms of blocks. Columns are
// controlled by the y dimension (there is no y dimension in blocks). Rows
// are controlled by the x dimension (there are multiple blocks in the x
// dimension).
size_type rows_per_group = blockDim.x;
size_type row_group_start = blockIdx.x;
size_type row_group_stride = gridDim.x;
size_type row_group_end = (num_rows + rows_per_group - 1) / rows_per_group + 1;
extern __shared__ int8_t shared_data[];
// Because we are copying fixed width only data and we stride the rows
// this thread will always start copying to shared data in the same place
int8_t *row_tmp = &shared_data[row_size * threadIdx.x];
int8_t *row_vld_tmp =
&row_tmp[output_offset_in_row[num_columns - 1] + num_bytes[num_columns - 1]];
for (size_type row_group_index = row_group_start; row_group_index < row_group_end;
row_group_index += row_group_stride) {
// Within the row group there should be 1 thread for each row. This is a
// requirement for launching the kernel
size_type row_index = start_row + (row_group_index * rows_per_group) + threadIdx.x;
// But we might not use all of the threads if the number of rows does not go
// evenly into the thread count. We don't want those threads to exit yet
// because we may need them to copy data back out.
if (row_index < (start_row + num_rows)) {
size_type col_index_start = threadIdx.y;
size_type col_index_stride = blockDim.y;
for (size_type col_index = col_index_start; col_index < num_columns;
col_index += col_index_stride) {
size_type col_size = num_bytes[col_index];
int8_t *col_tmp = &(row_tmp[output_offset_in_row[col_index]]);
const int8_t *col_input = input_data[col_index];
switch (col_size) {
case 1: {
*col_tmp = col_input[row_index];
break;
}
case 2: {
const int16_t *short_col_input = reinterpret_cast<const int16_t *>(col_input);
*reinterpret_cast<int16_t *>(col_tmp) = short_col_input[row_index];
break;
}
case 4: {
const int32_t *int_col_input = reinterpret_cast<const int32_t *>(col_input);
*reinterpret_cast<int32_t *>(col_tmp) = int_col_input[row_index];
break;
}
case 8: {
const int64_t *long_col_input = reinterpret_cast<const int64_t *>(col_input);
*reinterpret_cast<int64_t *>(col_tmp) = long_col_input[row_index];
break;
}
default: {
size_type input_offset = col_size * row_index;
// TODO this should just not be supported for fixed width columns, but just in case...
for (size_type b = 0; b < col_size; b++) {
col_tmp[b] = col_input[b + input_offset];
}
break;
}
}
// atomicOr only works on 32 bit or 64 bit aligned values, and not byte aligned
// so we have to rewrite the addresses to make sure that it is 4 byte aligned
int8_t *valid_byte = &row_vld_tmp[col_index / 8];
size_type byte_bit_offset = col_index % 8;
uint64_t fixup_bytes = reinterpret_cast<uint64_t>(valid_byte) % 4;
int32_t *valid_int = reinterpret_cast<int32_t *>(valid_byte - fixup_bytes);
size_type int_bit_offset = byte_bit_offset + (fixup_bytes * 8);
// Now copy validity for the column
if (input_nm[col_index]) {
if (bit_is_set(input_nm[col_index], row_index)) {
atomicOr_block(valid_int, 1 << int_bit_offset);
} else {
atomicAnd_block(valid_int, ~(1 << int_bit_offset));
}
} else {
// It is valid so just set the bit
atomicOr_block(valid_int, 1 << int_bit_offset);
}
} // end column loop
} // end row copy
// wait for the row_group to be totally copied into shared memory
__syncthreads();
// Step 2: Copy the data back out
// We know row_size is always aligned with and a multiple of int64_t;
int64_t *long_shared = reinterpret_cast<int64_t *>(shared_data);
int64_t *long_output = reinterpret_cast<int64_t *>(output_data);
size_type shared_input_index = threadIdx.x + (threadIdx.y * blockDim.x);
size_type shared_input_stride = blockDim.x * blockDim.y;
size_type row_index_end = ((row_group_index + 1) * rows_per_group);
if (row_index_end > num_rows) {
row_index_end = num_rows;
}
size_type num_rows_in_group = row_index_end - (row_group_index * rows_per_group);
size_type shared_length = row_size * num_rows_in_group;
size_type shared_input_end = shared_length / sizeof(int64_t);
size_type start_output_index = (row_size * row_group_index * rows_per_group) / sizeof(int64_t);
for (size_type shared_index = shared_input_index; shared_index < shared_input_end;
shared_index += shared_input_stride) {
long_output[start_output_index + shared_index] = long_shared[shared_index];
}
__syncthreads();
// Go for the next round
}
}
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
/**
* @brief copy data from cudf columns into JCUDF format, which is row-based
*
* @tparam RowOffsetIter iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile shared memory amount each `tile_info` is using
* @param tile_infos span of `tile_info` structs the define the work
* @param input_data pointer to raw table data
* @param col_sizes array of sizes for each element in a column - one per column
* @param col_offsets offset into input data row for each column's start
* @param row_offsets offset to a specific row in the output data
* @param batch_row_boundaries row numbers for batch starts
* @param output_data pointer to output data
*
*/
template <typename RowOffsetIter>
__global__ void copy_to_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile,
device_span<const tile_info> tile_infos, const int8_t **input_data,
const size_type *col_sizes, const size_type *col_offsets,
RowOffsetIter row_offsets, size_type const *batch_row_boundaries,
int8_t **output_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// This has been broken up for us in the tile_info struct, so we don't have
// any calculation to do here, but it is important to note.
constexpr unsigned stages_count = NUM_TILES_PER_KERNEL_LOADED;
auto group = cooperative_groups::this_thread_block();
extern __shared__ int8_t shared_data[];
int8_t *shared[stages_count] = {shared_data, shared_data + shmem_used_per_tile};
__shared__ cuda::barrier<cuda::thread_scope_block> tile_barrier[NUM_TILES_PER_KERNEL_LOADED];
if (group.thread_rank() == 0) {
for (int i = 0; i < NUM_TILES_PER_KERNEL_LOADED; ++i) {
init(&tile_barrier[i], group.size());
}
}
group.sync();
auto const tiles_remaining =
::min(static_cast<uint>(tile_infos.size()) - blockIdx.x * NUM_TILES_PER_KERNEL_TO_ROWS,
static_cast<uint>(NUM_TILES_PER_KERNEL_TO_ROWS));
size_t fetch_index; //< tile we are currently fetching
size_t processing_index; //< tile we are currently processing
for (processing_index = fetch_index = 0; processing_index < tiles_remaining; ++processing_index) {
// Fetch ahead up to NUM_TILES_PER_KERNEL_LOADED
for (; fetch_index < tiles_remaining && fetch_index < (processing_index + stages_count);
++fetch_index) {
auto const fetch_tile = tile_infos[blockIdx.x * NUM_TILES_PER_KERNEL_TO_ROWS + fetch_index];
auto const num_fetch_cols = fetch_tile.num_cols();
auto const num_fetch_rows = fetch_tile.num_rows();
auto const num_elements_in_tile = num_fetch_cols * num_fetch_rows;
auto const fetch_tile_row_size = fetch_tile.get_shared_row_size(col_offsets, col_sizes);
auto const starting_column_offset = col_offsets[fetch_tile.start_col];
auto &fetch_barrier = tile_barrier[fetch_index % NUM_TILES_PER_KERNEL_LOADED];
// wait for the last use of the memory to be completed
if (fetch_index >= NUM_TILES_PER_KERNEL_LOADED) {
fetch_barrier.arrive_and_wait();
}
// to do the copy we need to do n column copies followed by m element copies OR
// we have to do m element copies followed by r row copies. When going from column
// to row it is much easier to copy by elements first otherwise we would need a running
// total of the column sizes for our tile, which isn't readily available. This makes it
// more appealing to copy element-wise from input data into shared matching the end layout
// and do row-based memcopies out.
auto const shared_buffer_base = shared[fetch_index % stages_count];
for (auto el = static_cast<int>(threadIdx.x); el < num_elements_in_tile; el += blockDim.x) {
auto const relative_col = el / num_fetch_rows;
auto const relative_row = el % num_fetch_rows;
auto const absolute_col = relative_col + fetch_tile.start_col;
auto const absolute_row = relative_row + fetch_tile.start_row;
auto const col_size = col_sizes[absolute_col];
auto const col_offset = col_offsets[absolute_col];
auto const relative_col_offset = col_offset - starting_column_offset;
auto const shared_offset = relative_row * fetch_tile_row_size + relative_col_offset;
auto const input_src = input_data[absolute_col] + col_size * absolute_row;
// copy the element from global memory
switch (col_size) {
case 2:
cuda::memcpy_async(&shared_buffer_base[shared_offset], input_src,
cuda::aligned_size_t<2>(col_size), fetch_barrier);
break;
case 4:
cuda::memcpy_async(&shared_buffer_base[shared_offset], input_src,
cuda::aligned_size_t<4>(col_size), fetch_barrier);
break;
case 8:
cuda::memcpy_async(&shared_buffer_base[shared_offset], input_src,
cuda::aligned_size_t<8>(col_size), fetch_barrier);
break;
default:
cuda::memcpy_async(&shared_buffer_base[shared_offset], input_src, col_size,
fetch_barrier);
break;
}
}
}
auto &processing_barrier = tile_barrier[processing_index % NUM_TILES_PER_KERNEL_LOADED];
processing_barrier.arrive_and_wait();
auto const tile = tile_infos[blockIdx.x * NUM_TILES_PER_KERNEL_TO_ROWS + processing_index];
auto const tile_row_size = tile.get_shared_row_size(col_offsets, col_sizes);
auto const column_offset = col_offsets[tile.start_col];
auto const tile_output_buffer = output_data[tile.batch_number];
auto const row_batch_start =
tile.batch_number == 0 ? 0 : batch_row_boundaries[tile.batch_number];
// copy entire row 8 bytes at a time
constexpr auto bytes_per_chunk = 8;
auto const chunks_per_row = util::div_rounding_up_unsafe(tile_row_size, bytes_per_chunk);
auto const total_chunks = chunks_per_row * tile.num_rows();
for (auto i = threadIdx.x; i < total_chunks; i += blockDim.x) {
// determine source address of my chunk
auto const relative_row = i / chunks_per_row;
auto const relative_chunk_offset = (i % chunks_per_row) * bytes_per_chunk;
auto const output_dest = tile_output_buffer +
row_offsets(relative_row + tile.start_row, row_batch_start) +
column_offset + relative_chunk_offset;
auto const input_src = &shared[processing_index % stages_count]
[tile_row_size * relative_row + relative_chunk_offset];
cuda::memcpy_async(output_dest, input_src,
cuda::aligned_size_t<bytes_per_chunk>(bytes_per_chunk),
processing_barrier);
}
}
// wait on the last copies to complete
for (uint i = 0; i < ::min(stages_count, tiles_remaining); ++i) {
tile_barrier[i].arrive_and_wait();
}
}
/**
* @brief copy data from row-based format to cudf columns
*
* @tparam RowOffsetIter iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile amount of shared memory that is used by a tile
* @param row_offsets offset to a specific row in the output data
* @param batch_row_boundaries row numbers for batch starts
* @param output_data pointer to output data, partitioned by data size
* @param validity_offsets offset into input data row for validity data
* @param tile_infos information about the tiles of work
* @param input_nm pointer to input data
*
*/
template <typename RowOffsetIter>
__global__ void
copy_validity_to_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile, RowOffsetIter row_offsets,
size_type const *batch_row_boundaries, int8_t **output_data,
const size_type validity_offset, device_span<const tile_info> tile_infos,
const bitmask_type **input_nm) {
extern __shared__ int8_t shared_data[];
int8_t *shared_tiles[NUM_VALIDITY_TILES_PER_KERNEL_LOADED] = {
shared_data, shared_data + shmem_used_per_tile / 2};
using cudf::detail::warp_size;
// each thread of warp reads a single int32 of validity - so we read 128 bytes
// then ballot_sync the bits and write the result to shmem
// after we fill shared mem memcpy it out in a blob.
// probably need knobs for number of rows vs columns to balance read/write
auto group = cooperative_groups::this_thread_block();
int const tiles_remaining =
::min(static_cast<uint>(tile_infos.size()) - blockIdx.x * NUM_VALIDITY_TILES_PER_KERNEL,
static_cast<uint>(NUM_VALIDITY_TILES_PER_KERNEL));
__shared__ cuda::barrier<cuda::thread_scope_block>
shared_tile_barriers[NUM_VALIDITY_TILES_PER_KERNEL_LOADED];
if (group.thread_rank() == 0) {
for (int i = 0; i < NUM_VALIDITY_TILES_PER_KERNEL_LOADED; ++i) {
init(&shared_tile_barriers[i], group.size());
}
}
group.sync();
for (int validity_tile = 0; validity_tile < tiles_remaining; ++validity_tile) {
if (validity_tile >= NUM_VALIDITY_TILES_PER_KERNEL_LOADED) {
shared_tile_barriers[validity_tile % NUM_VALIDITY_TILES_PER_KERNEL_LOADED].arrive_and_wait();
}
int8_t *this_shared_tile = shared_tiles[validity_tile % NUM_VALIDITY_TILES_PER_KERNEL_LOADED];
auto tile = tile_infos[blockIdx.x * NUM_VALIDITY_TILES_PER_KERNEL + validity_tile];
auto const num_tile_cols = tile.num_cols();
auto const num_tile_rows = tile.num_rows();
auto const num_sections_x = util::div_rounding_up_unsafe(num_tile_cols, 32);
auto const num_sections_y = util::div_rounding_up_unsafe(num_tile_rows, 32);
auto const validity_data_row_length = util::round_up_unsafe(
util::div_rounding_up_unsafe(num_tile_cols, CHAR_BIT), JCUDF_ROW_ALIGNMENT);
auto const total_sections = num_sections_x * num_sections_y;
int const warp_id = threadIdx.x / warp_size;
int const lane_id = threadIdx.x % warp_size;
auto const warps_per_tile = ::max(1u, blockDim.x / warp_size);
// the tile is divided into sections. A warp operates on a section at a time.
for (int my_section_idx = warp_id; my_section_idx < total_sections;
my_section_idx += warps_per_tile) {
// convert to rows and cols
auto const section_x = my_section_idx % num_sections_x;
auto const section_y = my_section_idx / num_sections_x;
auto const relative_col = section_x * 32 + lane_id;
auto const relative_row = section_y * 32;
auto const absolute_col = relative_col + tile.start_col;
auto const absolute_row = relative_row + tile.start_row;
auto const participation_mask = __ballot_sync(0xFFFFFFFF, absolute_col < num_columns);
if (absolute_col < num_columns) {
auto my_data = input_nm[absolute_col] != nullptr ?
input_nm[absolute_col][absolute_row / 32] :
std::numeric_limits<uint32_t>::max();
// every thread that is participating in the warp has 4 bytes, but it's column-based
// data and we need it in row-based. So we shuffle the bits around with ballot_sync to
// make the bytes we actually write.
bitmask_type dw_mask = 1;
for (int i = 0; i < 32 && relative_row + i < num_rows; ++i, dw_mask <<= 1) {
auto validity_data = __ballot_sync(participation_mask, my_data & dw_mask);
// lead thread in each warp writes data
auto const validity_write_offset =
validity_data_row_length * (relative_row + i) + relative_col / CHAR_BIT;
if (threadIdx.x % warp_size == 0) {
*reinterpret_cast<int32_t *>(&this_shared_tile[validity_write_offset]) = validity_data;
}
}
}
}
// make sure entire tile has finished copy
group.sync();
auto const output_data_base =
output_data[tile.batch_number] + validity_offset + tile.start_col / CHAR_BIT;
// now async memcpy the shared memory out to the final destination 4 bytes at a time since we do
// 32-row chunks
constexpr auto bytes_per_chunk = 8;
auto const row_bytes = util::div_rounding_up_unsafe(num_tile_cols, CHAR_BIT);
auto const chunks_per_row = util::div_rounding_up_unsafe(row_bytes, bytes_per_chunk);
auto const total_chunks = chunks_per_row * tile.num_rows();
auto &processing_barrier =
shared_tile_barriers[validity_tile % NUM_VALIDITY_TILES_PER_KERNEL_LOADED];
auto const tail_bytes = row_bytes % bytes_per_chunk;
auto const row_batch_start =
tile.batch_number == 0 ? 0 : batch_row_boundaries[tile.batch_number];
for (auto i = threadIdx.x; i < total_chunks; i += blockDim.x) {
// determine source address of my chunk
auto const relative_row = i / chunks_per_row;
auto const col_chunk = i % chunks_per_row;
auto const relative_chunk_offset = col_chunk * bytes_per_chunk;
auto const output_dest = output_data_base +
row_offsets(relative_row + tile.start_row, row_batch_start) +
relative_chunk_offset;
auto const input_src =
&this_shared_tile[validity_data_row_length * relative_row + relative_chunk_offset];
if (tail_bytes > 0 && col_chunk == chunks_per_row - 1)
cuda::memcpy_async(output_dest, input_src, tail_bytes, processing_barrier);
else
cuda::memcpy_async(output_dest, input_src,
cuda::aligned_size_t<bytes_per_chunk>(bytes_per_chunk),
processing_barrier);
}
}
// wait for last tiles of data to arrive
for (int validity_tile = 0;
validity_tile < tiles_remaining % NUM_VALIDITY_TILES_PER_KERNEL_LOADED; ++validity_tile) {
shared_tile_barriers[validity_tile].arrive_and_wait();
}
}
/**
* @brief copy data from row-based format to cudf columns
*
* @tparam RowOffsetIter iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile amount of shared memory that is used by a tile
* @param row_offsets offset to a specific row in the input data
* @param batch_row_boundaries row numbers for batch starts
* @param output_data pointers to column data
* @param col_sizes array of sizes for each element in a column - one per column
* @param col_offsets offset into input data row for each column's start
* @param tile_infos information about the tiles of work
* @param input_data pointer to input data
*
*/
template <typename RowOffsetIter>
__global__ void copy_from_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile, RowOffsetIter row_offsets,
size_type const *batch_row_boundaries, int8_t **output_data,
const size_type *col_sizes, const size_type *col_offsets,
device_span<const tile_info> tile_infos, const int8_t *input_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// This has been broken up for us in the tile_info struct, so we don't have
// any calculation to do here, but it is important to note.
// to speed up some of the random access memory we do, we copy col_sizes and col_offsets
// to shared memory for each of the tiles that we work on
constexpr unsigned stages_count = NUM_TILES_PER_KERNEL_LOADED;
auto group = cooperative_groups::this_thread_block();
extern __shared__ int8_t shared_data[];
int8_t *shared[stages_count] = {shared_data, shared_data + shmem_used_per_tile};
__shared__ cuda::barrier<cuda::thread_scope_block> tile_barrier[NUM_TILES_PER_KERNEL_LOADED];
if (group.thread_rank() == 0) {
for (int i = 0; i < NUM_TILES_PER_KERNEL_LOADED; ++i) {
init(&tile_barrier[i], group.size());
}
}
group.sync();
auto tiles_remaining =
::min(static_cast<uint>(tile_infos.size()) - blockIdx.x * NUM_TILES_PER_KERNEL_FROM_ROWS,
static_cast<uint>(NUM_TILES_PER_KERNEL_FROM_ROWS));
size_t fetch_index;
size_t processing_index;
for (processing_index = fetch_index = 0; processing_index < tiles_remaining; ++processing_index) {
// Fetch ahead up to stages_count groups
for (; fetch_index < static_cast<size_t>(tiles_remaining) &&
fetch_index < (processing_index + stages_count);
++fetch_index) {
auto const fetch_tile = tile_infos[blockIdx.x * NUM_TILES_PER_KERNEL_FROM_ROWS + fetch_index];
auto const fetch_tile_start_row = fetch_tile.start_row;
auto const starting_col_offset = col_offsets[fetch_tile.start_col];
auto const fetch_tile_row_size = fetch_tile.get_shared_row_size(col_offsets, col_sizes);
auto &fetch_barrier = tile_barrier[fetch_index % NUM_TILES_PER_KERNEL_LOADED];
auto const row_batch_start =
fetch_tile.batch_number == 0 ? 0 : batch_row_boundaries[fetch_tile.batch_number];
// if we have fetched all buffers, we need to wait for processing
// to complete on them before we can use them again
if (fetch_index > NUM_TILES_PER_KERNEL_LOADED) {
fetch_barrier.arrive_and_wait();
}
for (auto row = fetch_tile_start_row + static_cast<int>(threadIdx.x);
row <= fetch_tile.end_row; row += blockDim.x) {
auto shared_offset = (row - fetch_tile_start_row) * fetch_tile_row_size;
// copy the data
cuda::memcpy_async(&shared[fetch_index % stages_count][shared_offset],
&input_data[row_offsets(row, row_batch_start) + starting_col_offset],
fetch_tile_row_size, fetch_barrier);
}
}
auto &processing_barrier = tile_barrier[processing_index % NUM_TILES_PER_KERNEL_LOADED];
// ensure our data is ready
processing_barrier.arrive_and_wait();
auto const tile = tile_infos[blockIdx.x * NUM_TILES_PER_KERNEL_FROM_ROWS + processing_index];
auto const rows_in_tile = tile.num_rows();
auto const cols_in_tile = tile.num_cols();
auto const tile_row_size = tile.get_shared_row_size(col_offsets, col_sizes);
// now we copy from shared memory to final destination.
// the data is laid out in rows in shared memory, so the reads
// for a column will be "vertical". Because of this and the different
// sizes for each column, this portion is handled on row/column basis.
// to prevent each thread working on a single row and also to ensure
// that all threads can do work in the case of more threads than rows,
// we do a global index instead of a double for loop with col/row.
for (int index = threadIdx.x; index < rows_in_tile * cols_in_tile; index += blockDim.x) {
auto const relative_col = index % cols_in_tile;
auto const relative_row = index / cols_in_tile;
auto const absolute_col = relative_col + tile.start_col;
auto const absolute_row = relative_row + tile.start_row;
auto const shared_memory_row_offset = tile_row_size * relative_row;
auto const shared_memory_offset =
col_offsets[absolute_col] - col_offsets[tile.start_col] + shared_memory_row_offset;
auto const column_size = col_sizes[absolute_col];
int8_t *shmem_src = &shared[processing_index % stages_count][shared_memory_offset];
int8_t *dst = &output_data[absolute_col][absolute_row * column_size];
cuda::memcpy_async(dst, shmem_src, column_size, processing_barrier);
}
group.sync();
}
// wait on the last copies to complete
for (uint i = 0; i < ::min(stages_count, tiles_remaining); ++i) {
tile_barrier[i].arrive_and_wait();
}
}
/**
* @brief copy data from row-based format to cudf columns
*
* @tparam RowOffsetIter iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile amount of shared memory that is used by a tile
* @param row_offsets offset to a specific row in the input data
* @param batch_row_boundaries row numbers for batch starts
* @param output_nm pointers to null masks for columns
* @param validity_offsets offset into input data row for validity data
* @param tile_infos information about the tiles of work
* @param input_data pointer to input data
*
*/
template <typename RowOffsetIter>
__global__ void
copy_validity_from_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile, RowOffsetIter row_offsets,
size_type const *batch_row_boundaries, bitmask_type **output_nm,
const size_type validity_offset, device_span<const tile_info> tile_infos,
const int8_t *input_data) {
extern __shared__ int8_t shared_data[];
int8_t *shared_tiles[NUM_VALIDITY_TILES_PER_KERNEL_LOADED] = {
shared_data, shared_data + shmem_used_per_tile / 2};
using cudf::detail::warp_size;
// each thread of warp reads a single byte of validity - so we read 32 bytes
// then ballot_sync the bits and write the result to shmem
// after we fill shared mem memcpy it out in a blob.
// probably need knobs for number of rows vs columns to balance read/write
auto group = cooperative_groups::this_thread_block();
int const tiles_remaining =
::min(static_cast<uint>(tile_infos.size()) - blockIdx.x * NUM_VALIDITY_TILES_PER_KERNEL,
static_cast<uint>(NUM_VALIDITY_TILES_PER_KERNEL));
__shared__ cuda::barrier<cuda::thread_scope_block>
shared_tile_barriers[NUM_VALIDITY_TILES_PER_KERNEL_LOADED];
if (group.thread_rank() == 0) {
for (int i = 0; i < NUM_VALIDITY_TILES_PER_KERNEL_LOADED; ++i) {
init(&shared_tile_barriers[i], group.size());
}
}
group.sync();
for (int validity_tile = 0; validity_tile < tiles_remaining; ++validity_tile) {
if (validity_tile >= NUM_VALIDITY_TILES_PER_KERNEL_LOADED) {
auto const validity_index = validity_tile % NUM_VALIDITY_TILES_PER_KERNEL_LOADED;
shared_tile_barriers[validity_index].arrive_and_wait();
}
int8_t *this_shared_tile = shared_tiles[validity_tile % 2];
auto const tile = tile_infos[blockIdx.x * NUM_VALIDITY_TILES_PER_KERNEL + validity_tile];
auto const tile_start_col = tile.start_col;
auto const tile_start_row = tile.start_row;
auto const num_tile_cols = tile.num_cols();
auto const num_tile_rows = tile.num_rows();
constexpr auto rows_per_read = 32;
auto const num_sections_x = util::div_rounding_up_safe(num_tile_cols, CHAR_BIT);
auto const num_sections_y = util::div_rounding_up_safe(num_tile_rows, rows_per_read);
auto const validity_data_col_length = num_sections_y * 4; // words to bytes
auto const total_sections = num_sections_x * num_sections_y;
int const warp_id = threadIdx.x / warp_size;
int const lane_id = threadIdx.x % warp_size;
auto const warps_per_tile = ::max(1u, blockDim.x / warp_size);
// the tile is divided into sections. A warp operates on a section at a time.
for (int my_section_idx = warp_id; my_section_idx < total_sections;
my_section_idx += warps_per_tile) {
// convert section to row and col
auto const section_x = my_section_idx % num_sections_x;
auto const section_y = my_section_idx / num_sections_x;
auto const relative_col = section_x * CHAR_BIT;
auto const relative_row = section_y * rows_per_read + lane_id;
auto const absolute_col = relative_col + tile_start_col;
auto const absolute_row = relative_row + tile_start_row;
auto const row_batch_start =
tile.batch_number == 0 ? 0 : batch_row_boundaries[tile.batch_number];
auto const participation_mask = __ballot_sync(0xFFFFFFFF, absolute_row < num_rows);
if (absolute_row < num_rows) {
auto const my_byte = input_data[row_offsets(absolute_row, row_batch_start) +
validity_offset + absolute_col / CHAR_BIT];
// so every thread that is participating in the warp has a byte, but it's row-based
// data and we need it in column-based. So we shuffle the bits around to make
// the bytes we actually write.
for (int i = 0, byte_mask = 1; i < CHAR_BIT && relative_col + i < num_columns;
++i, byte_mask <<= 1) {
auto validity_data = __ballot_sync(participation_mask, my_byte & byte_mask);
// lead thread in each warp writes data
if (threadIdx.x % warp_size == 0) {
auto const validity_write_offset =
validity_data_col_length * (relative_col + i) + relative_row / CHAR_BIT;
*reinterpret_cast<int32_t *>(&this_shared_tile[validity_write_offset]) = validity_data;
}
}
}
}
// make sure entire tile has finished copy
group.sync();
// now async memcpy the shared memory out to the final destination 8 bytes at a time
constexpr auto bytes_per_chunk = 8;
auto const col_bytes = util::div_rounding_up_unsafe(num_tile_rows, CHAR_BIT);
auto const chunks_per_col = util::div_rounding_up_unsafe(col_bytes, bytes_per_chunk);
auto const total_chunks = chunks_per_col * num_tile_cols;
auto &processing_barrier =
shared_tile_barriers[validity_tile % NUM_VALIDITY_TILES_PER_KERNEL_LOADED];
auto const tail_bytes = col_bytes % bytes_per_chunk;
for (auto i = threadIdx.x; i < total_chunks; i += blockDim.x) {
// determine source address of my chunk
auto const relative_col = i / chunks_per_col;
auto const row_chunk = i % chunks_per_col;
auto const absolute_col = relative_col + tile_start_col;
auto const relative_chunk_byte_offset = row_chunk * bytes_per_chunk;
auto const output_dest = output_nm[absolute_col] + word_index(tile_start_row) + row_chunk * 2;
auto const input_src =
&this_shared_tile[validity_data_col_length * relative_col + relative_chunk_byte_offset];
if (tail_bytes > 0 && row_chunk == chunks_per_col - 1) {
cuda::memcpy_async(output_dest, input_src, tail_bytes, processing_barrier);
} else {
cuda::memcpy_async(output_dest, input_src,
cuda::aligned_size_t<bytes_per_chunk>(bytes_per_chunk),
processing_barrier);
}
}
}
// wait for last tiles of data to arrive
auto const num_tiles_to_wait = tiles_remaining > NUM_VALIDITY_TILES_PER_KERNEL_LOADED ?
NUM_VALIDITY_TILES_PER_KERNEL_LOADED :
tiles_remaining;
for (int validity_tile = 0; validity_tile < num_tiles_to_wait; ++validity_tile) {
shared_tile_barriers[validity_tile].arrive_and_wait();
}
}
#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
/**
* @brief Calculate the dimensions of the kernel for fixed width only columns.
*
* @param [in] num_columns the number of columns being copied.
* @param [in] num_rows the number of rows being copied.
* @param [in] size_per_row the size each row takes up when padded.
* @param [out] blocks the size of the blocks for the kernel
* @param [out] threads the size of the threads for the kernel
* @return the size in bytes of shared memory needed for each block.
*/
static int calc_fixed_width_kernel_dims(const size_type num_columns, const size_type num_rows,
const size_type size_per_row, dim3 &blocks, dim3 &threads) {
// We have found speed degrades when a thread handles more than 4 columns.
// Each block is 2 dimensional. The y dimension indicates the columns.
// We limit this to 32 threads in the y dimension so we can still
// have at least 32 threads in the x dimension (1 warp) which should
// result in better coalescing of memory operations. We also
// want to guarantee that we are processing a multiple of 32 threads
// in the x dimension because we use atomic operations at the block
// level when writing validity data out to main memory, and that would
// need to change if we split a word of validity data between blocks.
int const y_block_size = min(util::div_rounding_up_safe(num_columns, 4), 32);
int const x_possible_block_size = 1024 / y_block_size;
// 48KB is the default setting for shared memory per block according to the cuda tutorials
// If someone configures the GPU to only have 16 KB this might not work.
int const max_shared_size = 48 * 1024;
// If we don't have enough shared memory there is no point in having more threads
// per block that will just sit idle
auto const max_block_size = ::min(x_possible_block_size, max_shared_size / size_per_row);
// Make sure that the x dimension is a multiple of 32 this not only helps
// coalesce memory access it also lets us do a ballot sync for validity to write
// the data back out the warp level. If x is a multiple of 32 then each thread in the y
// dimension is associated with one or more warps, that should correspond to the validity
// words directly.
int const block_size = (max_block_size / 32) * 32;
CUDF_EXPECTS(block_size != 0, "Row size is too large to fit in shared memory");
// The maximum number of blocks supported in the x dimension is 2 ^ 31 - 1
// but in practice haveing too many can cause some overhead that I don't totally
// understand. Playing around with this haveing as little as 600 blocks appears
// to be able to saturate memory on V100, so this is an order of magnitude higher
// to try and future proof this a bit.
int const num_blocks = std::clamp((num_rows + block_size - 1) / block_size, 1, 10240);
blocks.x = num_blocks;
blocks.y = 1;
blocks.z = 1;
threads.x = block_size;
threads.y = y_block_size;
threads.z = 1;
return size_per_row * block_size;
}
/**
* When converting to rows it is possible that the size of the table was too big to fit
* in a single column. This creates an output column for a subset of the rows in a table
* going from start row and containing the next num_rows. Most of the parameters passed
* into this function are common between runs and should be calculated once.
*/
static std::unique_ptr<column> fixed_width_convert_to_rows(
const size_type start_row, const size_type num_rows, const size_type num_columns,
const size_type size_per_row, rmm::device_uvector<size_type> &column_start,
rmm::device_uvector<size_type> &column_size, rmm::device_uvector<const int8_t *> &input_data,
rmm::device_uvector<const bitmask_type *> &input_nm, const scalar &zero,
const scalar &scalar_size_per_row, rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
int64_t const total_allocation = size_per_row * num_rows;
// We made a mistake in the split somehow
CUDF_EXPECTS(total_allocation < std::numeric_limits<size_type>::max(),
"Table is too large to fit!");
// Allocate and set the offsets row for the byte array
std::unique_ptr<column> offsets =
cudf::detail::sequence(num_rows + 1, zero, scalar_size_per_row, stream);
std::unique_ptr<column> data =
make_numeric_column(data_type(type_id::INT8), static_cast<size_type>(total_allocation),
mask_state::UNALLOCATED, stream, mr);
dim3 blocks;
dim3 threads;
int shared_size =
detail::calc_fixed_width_kernel_dims(num_columns, num_rows, size_per_row, blocks, threads);
hipLaunchKernelGGL(( copy_to_rows_fixed_width_optimized), dim3(blocks), dim3(threads), shared_size, stream.value(),
start_row, num_rows, num_columns, size_per_row, column_start.data(), column_size.data(),
input_data.data(), input_nm.data(), data->mutable_view().data<int8_t>());
return make_lists_column(num_rows, std::move(offsets), std::move(data), 0,
rmm::device_buffer{0, rmm::cuda_stream_default, mr}, stream, mr);
}
static inline bool are_all_fixed_width(std::vector<data_type> const &schema) {
return std::all_of(schema.begin(), schema.end(),
[](const data_type &t) { return is_fixed_width(t); });
}
/**
* @brief Given a set of fixed width columns, calculate how the data will be laid out in memory.
*
* @param [in] schema the types of columns that need to be laid out.
* @param [out] column_start the byte offset where each column starts in the row.
* @param [out] column_size the size in bytes of the data for each columns in the row.
* @return the size in bytes each row needs.
*/
static inline int32_t compute_fixed_width_layout(std::vector<data_type> const &schema,
std::vector<size_type> &column_start,
std::vector<size_type> &column_size) {
// We guarantee that the start of each column is 64-bit aligned so anything can go
// there, but to make the code simple we will still do an alignment for it.
int32_t at_offset = 0;
for (auto col = schema.begin(); col < schema.end(); col++) {
size_type s = size_of(*col);
column_size.emplace_back(s);
std::size_t allocation_needed = s;
std::size_t alignment_needed = allocation_needed; // They are the same for fixed width types
at_offset = util::round_up_unsafe(at_offset, static_cast<int32_t>(alignment_needed));
column_start.emplace_back(at_offset);
at_offset += allocation_needed;
}
// Now we need to add in space for validity
// Eventually we can think about nullable vs not nullable, but for now we will just always add
// it in
int32_t const validity_bytes_needed =
util::div_rounding_up_safe<int32_t>(schema.size(), CHAR_BIT);
// validity comes at the end and is byte aligned so we can pack more in.
at_offset += validity_bytes_needed;
// Now we need to pad the end so all rows are 64 bit aligned
return util::round_up_unsafe(at_offset, JCUDF_ROW_ALIGNMENT);
}
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
/**
* @brief Compute information about a table such as bytes per row and offsets.
*
* @tparam iterator iterator of column schema data
* @param begin starting iterator of column schema
* @param end ending iterator of column schema
* @param column_starts column start offsets
* @param column_sizes size in bytes of each column
* @return size of the fixed_width data portion of a row.
*/
template <typename iterator>
static size_type compute_column_information(iterator begin, iterator end,
std::vector<size_type> &column_starts,
std::vector<size_type> &column_sizes) {
size_type fixed_width_size_per_row = 0;
for (auto cv = begin; cv != end; ++cv) {
auto col_type = std::get<0>(*cv);
bool nested_type = is_compound(col_type);
// a list or string column will write a single uint64
// of data here for offset/length
auto col_size = nested_type ? 8 : size_of(col_type);
// align size for this type
size_type const alignment_needed = col_size; // They are the same for fixed width types
fixed_width_size_per_row = util::round_up_unsafe(fixed_width_size_per_row, alignment_needed);
column_starts.push_back(fixed_width_size_per_row);
column_sizes.push_back(col_size);
fixed_width_size_per_row += col_size;
}
auto validity_offset = fixed_width_size_per_row;
column_starts.push_back(validity_offset);
return util::round_up_unsafe(
fixed_width_size_per_row +
util::div_rounding_up_safe(static_cast<size_type>(std::distance(begin, end)), CHAR_BIT),
JCUDF_ROW_ALIGNMENT);
}
/**
* @brief Build `tile_info` for the validity data to break up the work.
*
* @param num_columns number of columns in the table
* @param num_rows number of rows in the table
* @param shmem_limit_per_tile size of shared memory available to a single gpu tile
* @param row_batches batched row information for multiple output locations
* @return vector of `tile_info` structs for validity data
*/
std::vector<detail::tile_info>
build_validity_tile_infos(size_type const &num_columns, size_type const &num_rows,
size_type const &shmem_limit_per_tile,
std::vector<row_batch> const &row_batches) {
auto const desired_rows_and_columns = static_cast<int>(sqrt(shmem_limit_per_tile));
auto const column_stride = util::round_up_unsafe(
[&]() {
if (desired_rows_and_columns > num_columns) {
// not many columns, group it into 8s and ship it off
return ::min(CHAR_BIT, num_columns);
} else {
return util::round_down_safe(desired_rows_and_columns, CHAR_BIT);
}
}(),
JCUDF_ROW_ALIGNMENT);
// we fit as much as we can given the column stride
// note that an element in the table takes just 1 bit, but a row with a single
// element still takes 8 bytes!
auto const bytes_per_row = util::round_up_safe(
util::div_rounding_up_unsafe(column_stride, CHAR_BIT), JCUDF_ROW_ALIGNMENT);
auto const row_stride =
::min(num_rows, util::round_down_safe(shmem_limit_per_tile / bytes_per_row, 64));
std::vector<detail::tile_info> validity_tile_infos;
validity_tile_infos.reserve(num_columns / column_stride * num_rows / row_stride);
for (int col = 0; col < num_columns; col += column_stride) {
int current_tile_row_batch = 0;
int rows_left_in_batch = row_batches[current_tile_row_batch].row_count;
int row = 0;
while (row < num_rows) {
if (rows_left_in_batch == 0) {
current_tile_row_batch++;
rows_left_in_batch = row_batches[current_tile_row_batch].row_count;
}
int const tile_height = ::min(row_stride, rows_left_in_batch);
validity_tile_infos.emplace_back(detail::tile_info{
col, row, ::min(col + column_stride - 1, num_columns - 1), row + tile_height - 1});
row += tile_height;
rows_left_in_batch -= tile_height;
}
}
return validity_tile_infos;
}
/**
* @brief functor that returns the size of a row or 0 is row is greater than the number of rows in
* the table
*
* @tparam RowSize iterator that returns the size of a specific row
*/
template <typename RowSize> struct row_size_functor {
row_size_functor(size_type row_end, RowSize row_sizes, size_type last_row_end)
: _row_end(row_end), _row_sizes(row_sizes), _last_row_end(last_row_end) {}
__device__ inline uint64_t operator()(int i) const {
return i >= _row_end ? 0 : _row_sizes[i + _last_row_end];
}
size_type _row_end;
RowSize _row_sizes;
size_type _last_row_end;
};
/**
* @brief Builds batches of rows that will fit in the size limit of a column.
*
* @tparam RowSize iterator that gives the size of a specific row of the table.
* @param num_rows Total number of rows in the table
* @param row_sizes iterator that gives the size of a specific row of the table.
* @param all_fixed_width bool indicating all data in this table is fixed width
* @param stream stream to operate on for this work
* @param mr memory resource used to allocate any returned data
* @returns vector of size_type's that indicate row numbers for batch boundaries and a
* device_uvector of row offsets
*/
template <typename RowSize>
batch_data build_batches(size_type num_rows, RowSize row_sizes, bool all_fixed_width,
rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) {
auto const total_size = thrust::reduce(rmm::exec_policy(stream), row_sizes, row_sizes + num_rows);
auto const num_batches = static_cast<int32_t>(
util::div_rounding_up_safe(total_size, static_cast<uint64_t>(MAX_BATCH_SIZE)));
auto const num_offsets = num_batches + 1;
std::vector<row_batch> row_batches;
std::vector<size_type> batch_row_boundaries;
device_uvector<size_type> batch_row_offsets(all_fixed_width ? 0 : num_rows, stream);
// at most max gpu memory / 2GB iterations.
batch_row_boundaries.reserve(num_offsets);
batch_row_boundaries.push_back(0);
size_type last_row_end = 0;
device_uvector<uint64_t> cumulative_row_sizes(num_rows, stream);
thrust::inclusive_scan(rmm::exec_policy(stream), row_sizes, row_sizes + num_rows,
cumulative_row_sizes.begin());
while (static_cast<int>(batch_row_boundaries.size()) < num_offsets) {
// find the next MAX_BATCH_SIZE boundary
size_type const row_end =
((thrust::lower_bound(rmm::exec_policy(stream), cumulative_row_sizes.begin(),
cumulative_row_sizes.begin() + (num_rows - last_row_end),
MAX_BATCH_SIZE) -
cumulative_row_sizes.begin()) +
last_row_end);
// build offset list for each row in this batch
auto const num_rows_in_batch = row_end - last_row_end;
// build offset list for each row in this batch
auto const num_entries = row_end - last_row_end + 1;
device_uvector<size_type> output_batch_row_offsets(num_entries, stream, mr);
auto row_size_iter_bounded = cudf::detail::make_counting_transform_iterator(
0, row_size_functor(row_end, row_sizes, last_row_end));
thrust::exclusive_scan(rmm::exec_policy(stream), row_size_iter_bounded,
row_size_iter_bounded + num_entries, output_batch_row_offsets.begin());
auto const batch_bytes = output_batch_row_offsets.element(num_rows_in_batch, stream);
// The output_batch_row_offsets vector is used as the offset column of the returned data. This
// needs to be individually allocated, but the kernel needs a contiguous array of offsets or
// more global lookups are necessary.
if (!all_fixed_width) {
hipMemcpy(batch_row_offsets.data() + last_row_end, output_batch_row_offsets.data(),
num_rows_in_batch * sizeof(size_type), hipMemcpyDeviceToDevice);
}
batch_row_boundaries.push_back(row_end);
row_batches.push_back({batch_bytes, num_rows_in_batch, std::move(output_batch_row_offsets)});
last_row_end = row_end;
}
return {std::move(batch_row_offsets), make_device_uvector_async(batch_row_boundaries, stream),
std::move(batch_row_boundaries), std::move(row_batches)};
}
/**
* @brief Computes the number of tiles necessary given a tile height and batch offsets
*
* @param batch_row_boundaries row boundaries for each batch
* @param desired_tile_height height of each tile in the table
* @param stream stream to use
* @return number of tiles necessary
*/
int compute_tile_counts(device_span<size_type const> const &batch_row_boundaries,
int desired_tile_height, rmm::cuda_stream_view stream) {
size_type const num_batches = batch_row_boundaries.size() - 1;
device_uvector<size_type> num_tiles(num_batches, stream);
auto iter = thrust::make_counting_iterator(0);
thrust::transform(rmm::exec_policy(stream), iter, iter + num_batches, num_tiles.begin(),
[desired_tile_height,
batch_row_boundaries =
batch_row_boundaries.data()] __device__(auto batch_index) -> size_type {
return util::div_rounding_up_unsafe(batch_row_boundaries[batch_index + 1] -
batch_row_boundaries[batch_index],
desired_tile_height);
});
return thrust::reduce(rmm::exec_policy(stream), num_tiles.begin(), num_tiles.end());
}
/**
* @brief Builds the `tile_info` structs for a given table.
*
* @param tiles span of tiles to populate
* @param batch_row_boundaries boundary to row batches
* @param column_start starting column of the tile
* @param column_end ending column of the tile
* @param desired_tile_height height of the tile
* @param total_number_of_rows total number of rows in the table
* @param stream stream to use
* @return number of tiles created
*/
size_type
build_tiles(device_span<tile_info> tiles,
device_uvector<size_type> const &batch_row_boundaries, // comes from build_batches
int column_start, int column_end, int desired_tile_height, int total_number_of_rows,
rmm::cuda_stream_view stream) {
size_type const num_batches = batch_row_boundaries.size() - 1;
device_uvector<size_type> num_tiles(num_batches, stream);
auto iter = thrust::make_counting_iterator(0);
thrust::transform(rmm::exec_policy(stream), iter, iter + num_batches, num_tiles.begin(),
[desired_tile_height,
batch_row_boundaries =
batch_row_boundaries.data()] __device__(auto batch_index) -> size_type {
return util::div_rounding_up_unsafe(batch_row_boundaries[batch_index + 1] -
batch_row_boundaries[batch_index],
desired_tile_height);
});
size_type const total_tiles =
thrust::reduce(rmm::exec_policy(stream), num_tiles.begin(), num_tiles.end());
device_uvector<size_type> tile_starts(num_batches + 1, stream);
auto tile_iter = cudf::detail::make_counting_transform_iterator(
0, [num_tiles = num_tiles.data(), num_batches] __device__(auto i) {
return (i < num_batches) ? num_tiles[i] : 0;
});
thrust::exclusive_scan(rmm::exec_policy(stream), tile_iter, tile_iter + num_batches + 1,
tile_starts.begin()); // in tiles
thrust::transform(
rmm::exec_policy(stream), iter, iter + total_tiles, tiles.begin(),
[=, tile_starts = tile_starts.data(),
batch_row_boundaries = batch_row_boundaries.data()] __device__(size_type tile_index) {
// what batch this tile falls in
auto const batch_index_iter =
thrust::upper_bound(thrust::seq, tile_starts, tile_starts + num_batches, tile_index);
auto const batch_index = std::distance(tile_starts, batch_index_iter) - 1;
// local index within the tile
int const local_tile_index = tile_index - tile_starts[batch_index];
// the start row for this batch.
int const batch_row_start = batch_row_boundaries[batch_index];
// the start row for this tile
int const tile_row_start = batch_row_start + (local_tile_index * desired_tile_height);
// the end row for this tile
int const max_row =
::min(total_number_of_rows - 1,
batch_index + 1 > num_batches ?
std::numeric_limits<size_type>::max() :
static_cast<int>(batch_row_boundaries[batch_index + 1]) - 1);
int const tile_row_end =
::min(batch_row_start + ((local_tile_index + 1) * desired_tile_height) - 1, max_row);
// stuff the tile
return tile_info{column_start, tile_row_start, column_end, tile_row_end,
static_cast<int>(batch_index)};
});
return total_tiles;
}
/**
* @brief Determines what data should be operated on by each tile for the incoming table.
*
* @tparam TileCallback Callback that receives the start and end columns of tiles
* @param column_sizes vector of the size of each column
* @param column_starts vector of the offset of each column
* @param first_row_batch_size size of the first row batch to limit max tile size since a tile
* is unable to span batches
* @param total_number_of_rows total number of rows in the table
* @param shmem_limit_per_tile shared memory allowed per tile
* @param f callback function called when building a tile
*/
template <typename TileCallback>
void determine_tiles(std::vector<size_type> const &column_sizes,
std::vector<size_type> const &column_starts,
size_type const first_row_batch_size, size_type const total_number_of_rows,
size_type const &shmem_limit_per_tile, TileCallback f) {
// tile infos are organized with the tile going "down" the columns
// this provides the most coalescing of memory access
int current_tile_width = 0;
int current_tile_start_col = 0;
// the ideal tile height has lots of 8-byte reads and 8-byte writes. The optimal read/write
// would be memory cache line sized access, but since other tiles will read/write the edges
// this may not turn out to be overly important. For now, we will attempt to build a square
// tile as far as byte sizes. x * y = shared_mem_size. Which translates to x^2 =
// shared_mem_size since we want them equal, so height and width are sqrt(shared_mem_size). The
// trick is that it's in bytes, not rows or columns.
auto const optimal_square_len = static_cast<size_type>(sqrt(shmem_limit_per_tile));
auto const tile_height =
std::clamp(util::round_up_safe<int>(
::min(optimal_square_len / column_sizes[0], total_number_of_rows), 32),
1, first_row_batch_size);
int row_size = 0;
// march each column and build the tiles of appropriate sizes
for (uint col = 0; col < column_sizes.size(); ++col) {
auto const col_size = column_sizes[col];
// align size for this type
auto const alignment_needed = col_size; // They are the same for fixed width types
auto const row_size_aligned = util::round_up_unsafe(row_size, alignment_needed);
auto const row_size_with_this_col = row_size_aligned + col_size;
auto const row_size_with_end_pad =
util::round_up_unsafe(row_size_with_this_col, JCUDF_ROW_ALIGNMENT);
if (row_size_with_end_pad * tile_height > shmem_limit_per_tile) {
// too large, close this tile, generate vertical tiles and restart
f(current_tile_start_col, col == 0 ? col : col - 1, tile_height);
row_size =
util::round_up_unsafe((column_starts[col] + column_sizes[col]) & 7, alignment_needed);
row_size += col_size; // alignment required for shared memory tile boundary to match
// alignment of output row
current_tile_start_col = col;
current_tile_width = 0;
} else {
row_size = row_size_with_this_col;
current_tile_width++;
}
}
// build last set of tiles
if (current_tile_width > 0) {
f(current_tile_start_col, static_cast<int>(column_sizes.size()) - 1, tile_height);
}
}
#endif // #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
} // namespace detail
std::vector<std::unique_ptr<column>> convert_to_rows(table_view const &tbl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
auto const num_columns = tbl.num_columns();
auto const num_rows = tbl.num_rows();
auto const fixed_width_only = std::all_of(
tbl.begin(), tbl.end(), [](column_view const &c) { return is_fixed_width(c.type()); });
int device_id;
CUDA_TRY(hipGetDevice(&device_id));
int total_shmem_in_bytes;
CUDA_TRY(
hipDeviceGetAttribute(&total_shmem_in_bytes, hipDeviceAttributeMaxSharedMemoryPerBlock, device_id));
// Need to reduce total shmem available by the size of barriers in the kernel's shared memory
total_shmem_in_bytes -=
sizeof(cuda::barrier<cuda::thread_scope_block>) * NUM_TILES_PER_KERNEL_LOADED;
auto const shmem_limit_per_tile = total_shmem_in_bytes / NUM_TILES_PER_KERNEL_LOADED;
// break up the work into tiles, which are a starting and ending row/col #.
// this tile size is calculated based on the shared memory size available
// we want a single tile to fill up the entire shared memory space available
// for the transpose-like conversion.
// There are two different processes going on here. The GPU conversion of the data
// and the writing of the data into the list of byte columns that are a maximum of
// 2 gigs each due to offset maximum size. The GPU conversion portion has to understand
// this limitation because the column must own the data inside and as a result it must be
// a distinct allocation for that column. Copying the data into these final buffers would
// be prohibitively expensive, so care is taken to ensure the GPU writes to the proper buffer.
// The tiles are broken at the boundaries of specific rows based on the row sizes up
// to that point. These are row batches and they are decided first before building the
// tiles so the tiles can be properly cut around them.
// Get the pointers to the input columnar data ready
auto data_begin = thrust::make_transform_iterator(
tbl.begin(), [](auto const &c) { return c.template data<int8_t>(); });
std::vector<int8_t const *> input_data(data_begin, data_begin + tbl.num_columns());
auto nm_begin =
thrust::make_transform_iterator(tbl.begin(), [](auto const &c) { return c.null_mask(); });
std::vector<bitmask_type const *> input_nm(nm_begin, nm_begin + tbl.num_columns());
auto dev_input_data = make_device_uvector_async(input_data, stream, mr);
auto dev_input_nm = make_device_uvector_async(input_nm, stream, mr);
std::vector<size_type> column_sizes; // byte size of each column
std::vector<size_type> column_starts; // offset of column inside a row including alignment
column_sizes.reserve(num_columns);
column_starts.reserve(num_columns + 1); // we add a final offset for validity data start
auto schema_column_iter =
thrust::make_transform_iterator(thrust::make_counting_iterator(0),
[&tbl](auto i) -> std::tuple<data_type, column_view const> {
return {tbl.column(i).type(), tbl.column(i)};
});
auto const fixed_width_size_per_row = detail::compute_column_information(
schema_column_iter, schema_column_iter + num_columns, column_starts, column_sizes);
auto dev_col_sizes = make_device_uvector_async(column_sizes, stream, mr);
auto dev_col_starts = make_device_uvector_async(column_starts, stream, mr);
// total encoded row size. This includes fixed-width data, validity, and variable-width data.
auto row_size_iter = thrust::make_constant_iterator<uint64_t>(fixed_width_size_per_row);
auto batch_info = detail::build_batches(num_rows, row_size_iter, fixed_width_only, stream, mr);
// the first batch always exists unless we were sent an empty table
auto const first_batch_size = batch_info.row_batches[0].row_count;
std::vector<rmm::device_buffer> output_buffers;
std::vector<int8_t *> output_data;
output_data.reserve(batch_info.row_batches.size());
output_buffers.reserve(batch_info.row_batches.size());
std::transform(batch_info.row_batches.begin(), batch_info.row_batches.end(),
std::back_inserter(output_buffers), [&](auto const &batch) {
return rmm::device_buffer(batch.num_bytes, stream, mr);
});
std::transform(output_buffers.begin(), output_buffers.end(), std::back_inserter(output_data),
[](auto &buf) { return static_cast<int8_t *>(buf.data()); });
auto dev_output_data = make_device_uvector_async(output_data, stream, mr);
int info_count = 0;
detail::determine_tiles(
column_sizes, column_starts, first_batch_size, num_rows, shmem_limit_per_tile,
[&gpu_batch_row_boundaries = batch_info.d_batch_row_boundaries, &info_count,
&stream](int const start_col, int const end_col, int const tile_height) {
int i = detail::compute_tile_counts(gpu_batch_row_boundaries, tile_height, stream);
info_count += i;
});
// allocate space for tiles
device_uvector<detail::tile_info> gpu_tile_infos(info_count, stream);
int tile_offset = 0;
detail::determine_tiles(
column_sizes, column_starts, first_batch_size, num_rows, shmem_limit_per_tile,
[&gpu_batch_row_boundaries = batch_info.d_batch_row_boundaries, &gpu_tile_infos, num_rows,
&tile_offset, stream](int const start_col, int const end_col, int const tile_height) {
tile_offset += detail::build_tiles(
{gpu_tile_infos.data() + tile_offset, gpu_tile_infos.size() - tile_offset},
gpu_batch_row_boundaries, start_col, end_col, tile_height, num_rows, stream);
});
// blast through the entire table and convert it
dim3 blocks(util::div_rounding_up_unsafe(gpu_tile_infos.size(), NUM_TILES_PER_KERNEL_TO_ROWS));
dim3 threads(256);
auto validity_tile_infos = detail::build_validity_tile_infos(
num_columns, num_rows, shmem_limit_per_tile, batch_info.row_batches);
auto dev_validity_tile_infos = make_device_uvector_async(validity_tile_infos, stream);
dim3 validity_blocks(
util::div_rounding_up_unsafe(validity_tile_infos.size(), NUM_VALIDITY_TILES_PER_KERNEL));
dim3 validity_threads(::min(validity_tile_infos.size() * 32, 128lu));
detail::row_offset_functor offset_functor(fixed_width_size_per_row);
hipLaunchKernelGGL(( detail::copy_to_rows), dim3(blocks), dim3(threads), total_shmem_in_bytes, stream.value(),
num_rows, num_columns, shmem_limit_per_tile, gpu_tile_infos, dev_input_data.data(),
dev_col_sizes.data(), dev_col_starts.data(), offset_functor,
batch_info.d_batch_row_boundaries.data(),
reinterpret_cast<int8_t **>(dev_output_data.data()));
hipLaunchKernelGGL(( detail::copy_validity_to_rows), dim3(validity_blocks), dim3(validity_threads), total_shmem_in_bytes,
stream.value(),
num_rows, num_columns, shmem_limit_per_tile, offset_functor,
batch_info.d_batch_row_boundaries.data(), dev_output_data.data(), column_starts.back(),
dev_validity_tile_infos, dev_input_nm.data());
// split up the output buffer into multiple buffers based on row batch sizes
// and create list of byte columns
std::vector<std::unique_ptr<column>> ret;
auto counting_iter = thrust::make_counting_iterator(0);
std::transform(counting_iter, counting_iter + batch_info.row_batches.size(),
std::back_inserter(ret), [&](auto batch) {
auto const offset_count = batch_info.row_batches[batch].row_offsets.size();
auto offsets = std::make_unique<column>(
data_type{type_id::INT32}, (size_type)offset_count,
batch_info.row_batches[batch].row_offsets.release());
auto data = std::make_unique<column>(data_type{type_id::INT8},
batch_info.row_batches[batch].num_bytes,
std::move(output_buffers[batch]));
return make_lists_column(
batch_info.row_batches[batch].row_count, std::move(offsets), std::move(data),
0, rmm::device_buffer{0, rmm::cuda_stream_default, mr}, stream, mr);
});
return ret;
#else
CUDF_FAIL("Column to row conversion optimization requires volta or later hardware.");
return {};
#endif // #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
}
std::vector<std::unique_ptr<column>>
convert_to_rows_fixed_width_optimized(table_view const &tbl, rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
auto const num_columns = tbl.num_columns();
std::vector<data_type> schema;
schema.resize(num_columns);
std::transform(tbl.begin(), tbl.end(), schema.begin(),
[](auto i) -> data_type { return i.type(); });
if (detail::are_all_fixed_width(schema)) {
std::vector<size_type> column_start;
std::vector<size_type> column_size;
int32_t const size_per_row =
detail::compute_fixed_width_layout(schema, column_start, column_size);
auto dev_column_start = make_device_uvector_async(column_start, stream, mr);
auto dev_column_size = make_device_uvector_async(column_size, stream, mr);
// Make the number of rows per batch a multiple of 32 so we don't have to worry about
// splitting validity at a specific row offset. This might change in the future.
auto const max_rows_per_batch =
util::round_down_safe(std::numeric_limits<size_type>::max() / size_per_row, 32);
auto const num_rows = tbl.num_rows();
// Get the pointers to the input columnar data ready
std::vector<const int8_t *> input_data;
std::vector<bitmask_type const *> input_nm;
for (size_type column_number = 0; column_number < num_columns; column_number++) {
column_view cv = tbl.column(column_number);
input_data.emplace_back(cv.data<int8_t>());
input_nm.emplace_back(cv.null_mask());
}
auto dev_input_data = make_device_uvector_async(input_data, stream, mr);
auto dev_input_nm = make_device_uvector_async(input_nm, stream, mr);
using ScalarType = scalar_type_t<size_type>;
auto zero = make_numeric_scalar(data_type(type_id::INT32), stream.value());
zero->set_valid_async(true, stream);
static_cast<ScalarType *>(zero.get())->set_value(0, stream);
auto step = make_numeric_scalar(data_type(type_id::INT32), stream.value());
step->set_valid_async(true, stream);
static_cast<ScalarType *>(step.get())->set_value(static_cast<size_type>(size_per_row), stream);
std::vector<std::unique_ptr<column>> ret;
for (size_type row_start = 0; row_start < num_rows; row_start += max_rows_per_batch) {
size_type row_count = num_rows - row_start;
row_count = row_count > max_rows_per_batch ? max_rows_per_batch : row_count;
ret.emplace_back(detail::fixed_width_convert_to_rows(
row_start, row_count, num_columns, size_per_row, dev_column_start, dev_column_size,
dev_input_data, dev_input_nm, *zero, *step, stream, mr));
}
return ret;
} else {
CUDF_FAIL("Only fixed width types are currently supported");
}
}
std::unique_ptr<table> convert_from_rows(lists_column_view const &input,
std::vector<data_type> const &schema,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
// verify that the types are what we expect
column_view child = input.child();
auto const list_type = child.type().id();
CUDF_EXPECTS(list_type == type_id::INT8 || list_type == type_id::UINT8,
"Only a list of bytes is supported as input");
auto const num_columns = schema.size();
auto const num_rows = input.parent().size();
int device_id;
CUDA_TRY(hipGetDevice(&device_id));
int total_shmem_in_bytes;
CUDA_TRY(
hipDeviceGetAttribute(&total_shmem_in_bytes, hipDeviceAttributeMaxSharedMemoryPerBlock, device_id));
// Need to reduce total shmem available by the size of barriers in the kernel's shared memory
total_shmem_in_bytes -=
sizeof(cuda::barrier<cuda::thread_scope_block>) * NUM_TILES_PER_KERNEL_LOADED;
int shmem_limit_per_tile = total_shmem_in_bytes / NUM_TILES_PER_KERNEL_LOADED;
std::vector<size_type> column_starts;
std::vector<size_type> column_sizes;
auto iter = thrust::make_transform_iterator(thrust::make_counting_iterator(0), [&schema](auto i) {
return std::make_tuple(schema[i], nullptr);
});
auto const fixed_width_size_per_row =
detail::compute_column_information(iter, iter + num_columns, column_starts, column_sizes);
// Ideally we would check that the offsets are all the same, etc. but for now
// this is probably fine
CUDF_EXPECTS(fixed_width_size_per_row * num_rows == child.size(),
"The layout of the data appears to be off");
auto dev_col_starts = make_device_uvector_async(column_starts, stream, mr);
auto dev_col_sizes = make_device_uvector_async(column_sizes, stream, mr);
// Allocate the columns we are going to write into
std::vector<std::unique_ptr<column>> output_columns;
std::vector<int8_t *> output_data;
std::vector<bitmask_type *> output_nm;
for (int i = 0; i < static_cast<int>(num_columns); i++) {
auto column =
make_fixed_width_column(schema[i], num_rows, mask_state::UNINITIALIZED, stream, mr);
auto mut = column->mutable_view();
output_data.emplace_back(mut.data<int8_t>());
output_nm.emplace_back(mut.null_mask());
output_columns.emplace_back(std::move(column));
}
// build the row_batches from the passed in list column
std::vector<detail::row_batch> row_batches;
row_batches.push_back(
{detail::row_batch{child.size(), num_rows, device_uvector<size_type>(0, stream)}});
auto dev_output_data = make_device_uvector_async(output_data, stream, mr);
auto dev_output_nm = make_device_uvector_async(output_nm, stream, mr);
// only ever get a single batch when going from rows, so boundaries
// are 0, num_rows
constexpr auto num_batches = 2;
device_uvector<size_type> gpu_batch_row_boundaries(num_batches, stream);
thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator(0),
thrust::make_counting_iterator(num_batches), gpu_batch_row_boundaries.begin(),
[num_rows] __device__(auto i) { return i == 0 ? 0 : num_rows; });
int info_count = 0;
detail::determine_tiles(column_sizes, column_starts, num_rows, num_rows, shmem_limit_per_tile,
[&gpu_batch_row_boundaries, &info_count,
&stream](int const start_col, int const end_col, int const tile_height) {
info_count += detail::compute_tile_counts(gpu_batch_row_boundaries,
tile_height, stream);
});
// allocate space for tiles
device_uvector<detail::tile_info> gpu_tile_infos(info_count, stream);
int tile_offset = 0;
detail::determine_tiles(
column_sizes, column_starts, num_rows, num_rows, shmem_limit_per_tile,
[&gpu_batch_row_boundaries, &gpu_tile_infos, num_rows, &tile_offset,
stream](int const start_col, int const end_col, int const tile_height) {
tile_offset += detail::build_tiles(
{gpu_tile_infos.data() + tile_offset, gpu_tile_infos.size() - tile_offset},
gpu_batch_row_boundaries, start_col, end_col, tile_height, num_rows, stream);
});
dim3 blocks(util::div_rounding_up_unsafe(gpu_tile_infos.size(), NUM_TILES_PER_KERNEL_FROM_ROWS));
dim3 threads(::min(::min(256, shmem_limit_per_tile / 8), static_cast<int>(child.size())));
auto validity_tile_infos =
detail::build_validity_tile_infos(num_columns, num_rows, shmem_limit_per_tile, row_batches);
auto dev_validity_tile_infos = make_device_uvector_async(validity_tile_infos, stream);
dim3 validity_blocks(
util::div_rounding_up_unsafe(validity_tile_infos.size(), NUM_VALIDITY_TILES_PER_KERNEL));
dim3 validity_threads(::min(validity_tile_infos.size() * 32, 128lu));
detail::row_offset_functor offset_functor(fixed_width_size_per_row);
hipLaunchKernelGGL(( detail::copy_from_rows), dim3(blocks), dim3(threads), total_shmem_in_bytes, stream.value(),
num_rows, num_columns, shmem_limit_per_tile, offset_functor, gpu_batch_row_boundaries.data(),
dev_output_data.data(), dev_col_sizes.data(), dev_col_starts.data(), gpu_tile_infos,
child.data<int8_t>());
hipLaunchKernelGGL(( detail::copy_validity_from_rows), dim3(validity_blocks), dim3(validity_threads), total_shmem_in_bytes,
stream.value(),
num_rows, num_columns, shmem_limit_per_tile, offset_functor, gpu_batch_row_boundaries.data(),
dev_output_nm.data(), column_starts.back(), dev_validity_tile_infos, child.data<int8_t>());
return std::make_unique<table>(std::move(output_columns));
#else
CUDF_FAIL("Row to column conversion optimization requires volta or later hardware.");
return {};
#endif // #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
}
std::unique_ptr<table> convert_from_rows_fixed_width_optimized(
lists_column_view const &input, std::vector<data_type> const &schema,
rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) {
// verify that the types are what we expect
column_view child = input.child();
auto const list_type = child.type().id();
CUDF_EXPECTS(list_type == type_id::INT8 || list_type == type_id::UINT8,
"Only a list of bytes is supported as input");
auto const num_columns = schema.size();
if (detail::are_all_fixed_width(schema)) {
std::vector<size_type> column_start;
std::vector<size_type> column_size;
auto const num_rows = input.parent().size();
auto const size_per_row = detail::compute_fixed_width_layout(schema, column_start, column_size);
// Ideally we would check that the offsets are all the same, etc. but for now
// this is probably fine
CUDF_EXPECTS(size_per_row * num_rows == child.size(),
"The layout of the data appears to be off");
auto dev_column_start = make_device_uvector_async(column_start, stream);
auto dev_column_size = make_device_uvector_async(column_size, stream);
// Allocate the columns we are going to write into
std::vector<std::unique_ptr<column>> output_columns;
std::vector<int8_t *> output_data;
std::vector<bitmask_type *> output_nm;
for (int i = 0; i < static_cast<int>(num_columns); i++) {
auto column =
make_fixed_width_column(schema[i], num_rows, mask_state::UNINITIALIZED, stream, mr);
auto mut = column->mutable_view();
output_data.emplace_back(mut.data<int8_t>());
output_nm.emplace_back(mut.null_mask());
output_columns.emplace_back(std::move(column));
}
auto dev_output_data = make_device_uvector_async(output_data, stream, mr);
auto dev_output_nm = make_device_uvector_async(output_nm, stream, mr);
dim3 blocks;
dim3 threads;
int shared_size =
detail::calc_fixed_width_kernel_dims(num_columns, num_rows, size_per_row, blocks, threads);
hipLaunchKernelGGL(( detail::copy_from_rows_fixed_width_optimized), dim3(blocks), dim3(threads), shared_size, stream.value(),
num_rows, num_columns, size_per_row, dev_column_start.data(), dev_column_size.data(),
dev_output_data.data(), dev_output_nm.data(), child.data<int8_t>());
return std::make_unique<table>(std::move(output_columns));
} else {
CUDF_FAIL("Only fixed width types are currently supported");
}
}
} // namespace jni
} // namespace cudf
| 03ba59d44336caa1af3519552631d80fea70c46f.cu | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cooperative_groups.h>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/sequence.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/lists/lists_column_device_view.cuh>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/scan.h>
#include <type_traits>
#include "row_conversion.hpp"
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
#include <cuda/barrier>
#endif
#include <algorithm>
#include <cstdarg>
#include <cstdint>
#include <iostream>
#include <iterator>
#include <limits>
#include <tuple>
constexpr auto JCUDF_ROW_ALIGNMENT = 8;
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
constexpr auto NUM_TILES_PER_KERNEL_FROM_ROWS = 2;
constexpr auto NUM_TILES_PER_KERNEL_TO_ROWS = 2;
constexpr auto NUM_TILES_PER_KERNEL_LOADED = 2;
constexpr auto NUM_VALIDITY_TILES_PER_KERNEL = 8;
constexpr auto NUM_VALIDITY_TILES_PER_KERNEL_LOADED = 2;
constexpr auto MAX_BATCH_SIZE = std::numeric_limits<cudf::size_type>::max();
// needed to suppress warning about cuda::barrier
#pragma nv_diag_suppress static_var_with_dynamic_init
#endif
using namespace cudf;
using detail::make_device_uvector_async;
using rmm::device_uvector;
namespace cudf {
namespace jni {
namespace detail {
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
/************************************************************************
* This module converts data from row-major to column-major and from column-major
* to row-major. It is a transpose of the data of sorts, but there are a few
* complicating factors. They are spelled out below:
*
* Row Batches:
* The row data has to fit inside a
* cuDF column, which limits it to 2 gigs currently. The calling code attempts
* to keep the data size under 2 gigs, but due to padding this isn't always
* the case, so being able to break this up into multiple columns is necessary.
* Internally, this is referred to as the row batch, which is a group of rows
* that will fit into this 2 gig space requirement. There are typically 1 of
* these batches, but there can be 2.
*
* Async Memcpy:
* The CUDA blocks are using memcpy_async, which allows for the device to
* schedule memcpy operations and then wait on them to complete at a later
* time with a barrier. The recommendation is to double-buffer the work
* so that processing can occur while a copy operation is being completed.
* On Ampere or later hardware there is dedicated hardware to do this copy
* and on pre-Ampere it should generate the same code that a hand-rolled
* loop would generate, so performance should be the same or better than
* a hand-rolled kernel.
*
* Tile Info:
* Each CUDA block will work on NUM_TILES_PER_KERNEL_*_ROWS tile infos
* before exiting. It will have enough shared memory available to load
* NUM_TILES_PER_KERNEL_LOADED tiles at one time. The block will load
* as many tiles as it can fit into shared memory and then wait on the
* first tile to completely load before processing. Processing in this
* case means copying the data from shared memory back out to device
* memory via memcpy_async. This kernel is completely memory bound.
*
* Batch Data:
* This structure contains all the row batches and some book-keeping
* data necessary for the batches such as row numbers for the batches.
*
* Tiles:
* The tile info describes a tile of data to process. In a GPU with
* 48KB of shared memory each tile uses approximately 24KB of memory
* which equates to about 144 bytes in each direction. The tiles are
* kept as square as possible to attempt to coalesce memory operations.
* The taller a tile is the better coalescing of columns, but row
* coalescing suffers. The wider a tile is the better the row coalescing,
* but columns coalescing suffers. The code attempts to produce a square
* tile to balance the coalescing. It starts by figuring out the optimal
* byte length and then adding columns to the data until the tile is too
* large. Since rows are different width with different alignment
* requirements, this isn't typically exact. Once a width is found the
* tiles are generated vertically with that width and height and then
* the process repeats. This means all the tiles will be the same
* height, but will have different widths based on what columns they
* encompass. Tiles in a vertical row will all have the same dimensions.
*
* --------------------------------
* | 4 5.0f || True 8 3 1 |
* | 3 6.0f || False 3 1 1 |
* | 2 7.0f || True 7 4 1 |
* | 1 8.0f || False 2 5 1 |
* --------------------------------
* | 0 9.0f || True 6 7 1 |
* ...
************************************************************************/
/**
* @brief The CUDA blocks work on one or more tile_info structs of data.
* This structure defines the workspaces for the blocks.
*
*/
struct tile_info {
int start_col;
int start_row;
int end_col;
int end_row;
int batch_number;
__device__ inline size_type get_shared_row_size(size_type const *const col_offsets,
size_type const *const col_sizes) const {
return util::round_up_unsafe(col_offsets[end_col] + col_sizes[end_col] - col_offsets[start_col],
JCUDF_ROW_ALIGNMENT);
}
__device__ inline size_type num_cols() const { return end_col - start_col + 1; }
__device__ inline size_type num_rows() const { return end_row - start_row + 1; }
};
/**
* @brief Returning rows is done in a byte cudf column. This is limited in size by
* `size_type` and so output is broken into batches of rows that fit inside
* this limit.
*
*/
struct row_batch {
size_type num_bytes; // number of bytes in this batch
size_type row_count; // number of rows in the batch
device_uvector<size_type> row_offsets; // offsets column of output cudf column
};
/**
* @brief Holds information about the batches of data to be processed
*
*/
struct batch_data {
device_uvector<size_type> batch_row_offsets; // offset column of returned cudf column
device_uvector<size_type> d_batch_row_boundaries; // row numbers for the start of each batch
std::vector<size_type>
batch_row_boundaries; // row numbers for the start of each batch: 0, 1500, 2700
std::vector<row_batch> row_batches; // information about each batch such as byte count
};
struct row_offset_functor {
row_offset_functor(size_type fixed_width_only_row_size)
: _fixed_width_only_row_size(fixed_width_only_row_size){};
__device__ inline size_type operator()(int row_number, int tile_row_start) const {
return (row_number - tile_row_start) * _fixed_width_only_row_size;
}
size_type _fixed_width_only_row_size;
};
#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
/**
* @brief Copies data from row-based JCUDF format to column-based cudf format.
*
* This optimized version of the conversion is faster for fixed-width tables
* that do not have more than 100 columns.
*
* @param num_rows number of rows in the incoming table
* @param num_columns number of columns in the incoming table
* @param row_size length in bytes of each row
* @param input_offset_in_row offset to each row of data
* @param num_bytes total number of bytes in the incoming data
* @param output_data array of pointers to the output data
* @param output_nm array of pointers to the output null masks
* @param input_data pointing to the incoming row data
*/
__global__ void
copy_from_rows_fixed_width_optimized(const size_type num_rows, const size_type num_columns,
const size_type row_size, const size_type *input_offset_in_row,
const size_type *num_bytes, int8_t **output_data,
bitmask_type **output_nm, const int8_t *input_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// For simplicity we will refer to this as a row_group
// In practice we have found writing more than 4 columns of data per thread
// results in performance loss. As such we are using a 2 dimensional
// kernel in terms of threads, but not in terms of blocks. Columns are
// controlled by the y dimension (there is no y dimension in blocks). Rows
// are controlled by the x dimension (there are multiple blocks in the x
// dimension).
size_type const rows_per_group = blockDim.x;
size_type const row_group_start = blockIdx.x;
size_type const row_group_stride = gridDim.x;
size_type const row_group_end = (num_rows + rows_per_group - 1) / rows_per_group + 1;
extern __shared__ int8_t shared_data[];
// Because we are copying fixed width only data and we stride the rows
// this thread will always start copying from shared data in the same place
int8_t *row_tmp = &shared_data[row_size * threadIdx.x];
int8_t *row_vld_tmp = &row_tmp[input_offset_in_row[num_columns - 1] + num_bytes[num_columns - 1]];
for (auto row_group_index = row_group_start; row_group_index < row_group_end;
row_group_index += row_group_stride) {
// Step 1: Copy the data into shared memory
// We know row_size is always aligned with and a multiple of int64_t;
int64_t *long_shared = reinterpret_cast<int64_t *>(shared_data);
int64_t const *long_input = reinterpret_cast<int64_t const *>(input_data);
auto const shared_output_index = threadIdx.x + (threadIdx.y * blockDim.x);
auto const shared_output_stride = blockDim.x * blockDim.y;
auto const row_index_end = std::min(num_rows, ((row_group_index + 1) * rows_per_group));
auto const num_rows_in_group = row_index_end - (row_group_index * rows_per_group);
auto const shared_length = row_size * num_rows_in_group;
size_type const shared_output_end = shared_length / sizeof(int64_t);
auto const start_input_index = (row_size * row_group_index * rows_per_group) / sizeof(int64_t);
for (size_type shared_index = shared_output_index; shared_index < shared_output_end;
shared_index += shared_output_stride) {
long_shared[shared_index] = long_input[start_input_index + shared_index];
}
// Wait for all of the data to be in shared memory
__syncthreads();
// Step 2 copy the data back out
// Within the row group there should be 1 thread for each row. This is a
// requirement for launching the kernel
auto const row_index = (row_group_index * rows_per_group) + threadIdx.x;
// But we might not use all of the threads if the number of rows does not go
// evenly into the thread count. We don't want those threads to exit yet
// because we may need them to copy data in for the next row group.
uint32_t active_mask = __ballot_sync(0xffffffff, row_index < num_rows);
if (row_index < num_rows) {
auto const col_index_start = threadIdx.y;
auto const col_index_stride = blockDim.y;
for (auto col_index = col_index_start; col_index < num_columns;
col_index += col_index_stride) {
auto const col_size = num_bytes[col_index];
int8_t const *col_tmp = &(row_tmp[input_offset_in_row[col_index]]);
int8_t *col_output = output_data[col_index];
switch (col_size) {
case 1: {
col_output[row_index] = *col_tmp;
break;
}
case 2: {
int16_t *short_col_output = reinterpret_cast<int16_t *>(col_output);
short_col_output[row_index] = *reinterpret_cast<const int16_t *>(col_tmp);
break;
}
case 4: {
int32_t *int_col_output = reinterpret_cast<int32_t *>(col_output);
int_col_output[row_index] = *reinterpret_cast<const int32_t *>(col_tmp);
break;
}
case 8: {
int64_t *long_col_output = reinterpret_cast<int64_t *>(col_output);
long_col_output[row_index] = *reinterpret_cast<const int64_t *>(col_tmp);
break;
}
default: {
auto const output_offset = col_size * row_index;
// TODO this should just not be supported for fixed width columns, but just in case...
for (auto b = 0; b < col_size; b++) {
col_output[b + output_offset] = col_tmp[b];
}
break;
}
}
bitmask_type *nm = output_nm[col_index];
int8_t *valid_byte = &row_vld_tmp[col_index / 8];
size_type byte_bit_offset = col_index % 8;
int predicate = *valid_byte & (1 << byte_bit_offset);
uint32_t bitmask = __ballot_sync(active_mask, predicate);
if (row_index % 32 == 0) {
nm[word_index(row_index)] = bitmask;
}
} // end column loop
} // end row copy
// wait for the row_group to be totally copied before starting on the next row group
__syncthreads();
}
}
__global__ void copy_to_rows_fixed_width_optimized(
const size_type start_row, const size_type num_rows, const size_type num_columns,
const size_type row_size, const size_type *output_offset_in_row, const size_type *num_bytes,
const int8_t **input_data, const bitmask_type **input_nm, int8_t *output_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// We do not support copying a subset of the columns in a row yet, so we don't
// currently support a row that is wider than shared memory.
// For simplicity we will refer to this as a row_group
// In practice we have found reading more than 4 columns of data per thread
// results in performance loss. As such we are using a 2 dimensional
// kernel in terms of threads, but not in terms of blocks. Columns are
// controlled by the y dimension (there is no y dimension in blocks). Rows
// are controlled by the x dimension (there are multiple blocks in the x
// dimension).
size_type rows_per_group = blockDim.x;
size_type row_group_start = blockIdx.x;
size_type row_group_stride = gridDim.x;
size_type row_group_end = (num_rows + rows_per_group - 1) / rows_per_group + 1;
extern __shared__ int8_t shared_data[];
// Because we are copying fixed width only data and we stride the rows
// this thread will always start copying to shared data in the same place
int8_t *row_tmp = &shared_data[row_size * threadIdx.x];
int8_t *row_vld_tmp =
&row_tmp[output_offset_in_row[num_columns - 1] + num_bytes[num_columns - 1]];
for (size_type row_group_index = row_group_start; row_group_index < row_group_end;
row_group_index += row_group_stride) {
// Within the row group there should be 1 thread for each row. This is a
// requirement for launching the kernel
size_type row_index = start_row + (row_group_index * rows_per_group) + threadIdx.x;
// But we might not use all of the threads if the number of rows does not go
// evenly into the thread count. We don't want those threads to exit yet
// because we may need them to copy data back out.
if (row_index < (start_row + num_rows)) {
size_type col_index_start = threadIdx.y;
size_type col_index_stride = blockDim.y;
for (size_type col_index = col_index_start; col_index < num_columns;
col_index += col_index_stride) {
size_type col_size = num_bytes[col_index];
int8_t *col_tmp = &(row_tmp[output_offset_in_row[col_index]]);
const int8_t *col_input = input_data[col_index];
switch (col_size) {
case 1: {
*col_tmp = col_input[row_index];
break;
}
case 2: {
const int16_t *short_col_input = reinterpret_cast<const int16_t *>(col_input);
*reinterpret_cast<int16_t *>(col_tmp) = short_col_input[row_index];
break;
}
case 4: {
const int32_t *int_col_input = reinterpret_cast<const int32_t *>(col_input);
*reinterpret_cast<int32_t *>(col_tmp) = int_col_input[row_index];
break;
}
case 8: {
const int64_t *long_col_input = reinterpret_cast<const int64_t *>(col_input);
*reinterpret_cast<int64_t *>(col_tmp) = long_col_input[row_index];
break;
}
default: {
size_type input_offset = col_size * row_index;
// TODO this should just not be supported for fixed width columns, but just in case...
for (size_type b = 0; b < col_size; b++) {
col_tmp[b] = col_input[b + input_offset];
}
break;
}
}
// atomicOr only works on 32 bit or 64 bit aligned values, and not byte aligned
// so we have to rewrite the addresses to make sure that it is 4 byte aligned
int8_t *valid_byte = &row_vld_tmp[col_index / 8];
size_type byte_bit_offset = col_index % 8;
uint64_t fixup_bytes = reinterpret_cast<uint64_t>(valid_byte) % 4;
int32_t *valid_int = reinterpret_cast<int32_t *>(valid_byte - fixup_bytes);
size_type int_bit_offset = byte_bit_offset + (fixup_bytes * 8);
// Now copy validity for the column
if (input_nm[col_index]) {
if (bit_is_set(input_nm[col_index], row_index)) {
atomicOr_block(valid_int, 1 << int_bit_offset);
} else {
atomicAnd_block(valid_int, ~(1 << int_bit_offset));
}
} else {
// It is valid so just set the bit
atomicOr_block(valid_int, 1 << int_bit_offset);
}
} // end column loop
} // end row copy
// wait for the row_group to be totally copied into shared memory
__syncthreads();
// Step 2: Copy the data back out
// We know row_size is always aligned with and a multiple of int64_t;
int64_t *long_shared = reinterpret_cast<int64_t *>(shared_data);
int64_t *long_output = reinterpret_cast<int64_t *>(output_data);
size_type shared_input_index = threadIdx.x + (threadIdx.y * blockDim.x);
size_type shared_input_stride = blockDim.x * blockDim.y;
size_type row_index_end = ((row_group_index + 1) * rows_per_group);
if (row_index_end > num_rows) {
row_index_end = num_rows;
}
size_type num_rows_in_group = row_index_end - (row_group_index * rows_per_group);
size_type shared_length = row_size * num_rows_in_group;
size_type shared_input_end = shared_length / sizeof(int64_t);
size_type start_output_index = (row_size * row_group_index * rows_per_group) / sizeof(int64_t);
for (size_type shared_index = shared_input_index; shared_index < shared_input_end;
shared_index += shared_input_stride) {
long_output[start_output_index + shared_index] = long_shared[shared_index];
}
__syncthreads();
// Go for the next round
}
}
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
/**
* @brief copy data from cudf columns into JCUDF format, which is row-based
*
* @tparam RowOffsetIter iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile shared memory amount each `tile_info` is using
* @param tile_infos span of `tile_info` structs the define the work
* @param input_data pointer to raw table data
* @param col_sizes array of sizes for each element in a column - one per column
* @param col_offsets offset into input data row for each column's start
* @param row_offsets offset to a specific row in the output data
* @param batch_row_boundaries row numbers for batch starts
* @param output_data pointer to output data
*
*/
template <typename RowOffsetIter>
__global__ void copy_to_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile,
device_span<const tile_info> tile_infos, const int8_t **input_data,
const size_type *col_sizes, const size_type *col_offsets,
RowOffsetIter row_offsets, size_type const *batch_row_boundaries,
int8_t **output_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// This has been broken up for us in the tile_info struct, so we don't have
// any calculation to do here, but it is important to note.
constexpr unsigned stages_count = NUM_TILES_PER_KERNEL_LOADED;
auto group = cooperative_groups::this_thread_block();
extern __shared__ int8_t shared_data[];
int8_t *shared[stages_count] = {shared_data, shared_data + shmem_used_per_tile};
__shared__ cuda::barrier<cuda::thread_scope_block> tile_barrier[NUM_TILES_PER_KERNEL_LOADED];
if (group.thread_rank() == 0) {
for (int i = 0; i < NUM_TILES_PER_KERNEL_LOADED; ++i) {
init(&tile_barrier[i], group.size());
}
}
group.sync();
auto const tiles_remaining =
std::min(static_cast<uint>(tile_infos.size()) - blockIdx.x * NUM_TILES_PER_KERNEL_TO_ROWS,
static_cast<uint>(NUM_TILES_PER_KERNEL_TO_ROWS));
size_t fetch_index; //< tile we are currently fetching
size_t processing_index; //< tile we are currently processing
for (processing_index = fetch_index = 0; processing_index < tiles_remaining; ++processing_index) {
// Fetch ahead up to NUM_TILES_PER_KERNEL_LOADED
for (; fetch_index < tiles_remaining && fetch_index < (processing_index + stages_count);
++fetch_index) {
auto const fetch_tile = tile_infos[blockIdx.x * NUM_TILES_PER_KERNEL_TO_ROWS + fetch_index];
auto const num_fetch_cols = fetch_tile.num_cols();
auto const num_fetch_rows = fetch_tile.num_rows();
auto const num_elements_in_tile = num_fetch_cols * num_fetch_rows;
auto const fetch_tile_row_size = fetch_tile.get_shared_row_size(col_offsets, col_sizes);
auto const starting_column_offset = col_offsets[fetch_tile.start_col];
auto &fetch_barrier = tile_barrier[fetch_index % NUM_TILES_PER_KERNEL_LOADED];
// wait for the last use of the memory to be completed
if (fetch_index >= NUM_TILES_PER_KERNEL_LOADED) {
fetch_barrier.arrive_and_wait();
}
// to do the copy we need to do n column copies followed by m element copies OR
// we have to do m element copies followed by r row copies. When going from column
// to row it is much easier to copy by elements first otherwise we would need a running
// total of the column sizes for our tile, which isn't readily available. This makes it
// more appealing to copy element-wise from input data into shared matching the end layout
// and do row-based memcopies out.
auto const shared_buffer_base = shared[fetch_index % stages_count];
for (auto el = static_cast<int>(threadIdx.x); el < num_elements_in_tile; el += blockDim.x) {
auto const relative_col = el / num_fetch_rows;
auto const relative_row = el % num_fetch_rows;
auto const absolute_col = relative_col + fetch_tile.start_col;
auto const absolute_row = relative_row + fetch_tile.start_row;
auto const col_size = col_sizes[absolute_col];
auto const col_offset = col_offsets[absolute_col];
auto const relative_col_offset = col_offset - starting_column_offset;
auto const shared_offset = relative_row * fetch_tile_row_size + relative_col_offset;
auto const input_src = input_data[absolute_col] + col_size * absolute_row;
// copy the element from global memory
switch (col_size) {
case 2:
cuda::memcpy_async(&shared_buffer_base[shared_offset], input_src,
cuda::aligned_size_t<2>(col_size), fetch_barrier);
break;
case 4:
cuda::memcpy_async(&shared_buffer_base[shared_offset], input_src,
cuda::aligned_size_t<4>(col_size), fetch_barrier);
break;
case 8:
cuda::memcpy_async(&shared_buffer_base[shared_offset], input_src,
cuda::aligned_size_t<8>(col_size), fetch_barrier);
break;
default:
cuda::memcpy_async(&shared_buffer_base[shared_offset], input_src, col_size,
fetch_barrier);
break;
}
}
}
auto &processing_barrier = tile_barrier[processing_index % NUM_TILES_PER_KERNEL_LOADED];
processing_barrier.arrive_and_wait();
auto const tile = tile_infos[blockIdx.x * NUM_TILES_PER_KERNEL_TO_ROWS + processing_index];
auto const tile_row_size = tile.get_shared_row_size(col_offsets, col_sizes);
auto const column_offset = col_offsets[tile.start_col];
auto const tile_output_buffer = output_data[tile.batch_number];
auto const row_batch_start =
tile.batch_number == 0 ? 0 : batch_row_boundaries[tile.batch_number];
// copy entire row 8 bytes at a time
constexpr auto bytes_per_chunk = 8;
auto const chunks_per_row = util::div_rounding_up_unsafe(tile_row_size, bytes_per_chunk);
auto const total_chunks = chunks_per_row * tile.num_rows();
for (auto i = threadIdx.x; i < total_chunks; i += blockDim.x) {
// determine source address of my chunk
auto const relative_row = i / chunks_per_row;
auto const relative_chunk_offset = (i % chunks_per_row) * bytes_per_chunk;
auto const output_dest = tile_output_buffer +
row_offsets(relative_row + tile.start_row, row_batch_start) +
column_offset + relative_chunk_offset;
auto const input_src = &shared[processing_index % stages_count]
[tile_row_size * relative_row + relative_chunk_offset];
cuda::memcpy_async(output_dest, input_src,
cuda::aligned_size_t<bytes_per_chunk>(bytes_per_chunk),
processing_barrier);
}
}
// wait on the last copies to complete
for (uint i = 0; i < std::min(stages_count, tiles_remaining); ++i) {
tile_barrier[i].arrive_and_wait();
}
}
/**
* @brief copy data from row-based format to cudf columns
*
* @tparam RowOffsetIter iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile amount of shared memory that is used by a tile
* @param row_offsets offset to a specific row in the output data
* @param batch_row_boundaries row numbers for batch starts
* @param output_data pointer to output data, partitioned by data size
* @param validity_offsets offset into input data row for validity data
* @param tile_infos information about the tiles of work
* @param input_nm pointer to input data
*
*/
template <typename RowOffsetIter>
__global__ void
copy_validity_to_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile, RowOffsetIter row_offsets,
size_type const *batch_row_boundaries, int8_t **output_data,
const size_type validity_offset, device_span<const tile_info> tile_infos,
const bitmask_type **input_nm) {
extern __shared__ int8_t shared_data[];
int8_t *shared_tiles[NUM_VALIDITY_TILES_PER_KERNEL_LOADED] = {
shared_data, shared_data + shmem_used_per_tile / 2};
using cudf::detail::warp_size;
// each thread of warp reads a single int32 of validity - so we read 128 bytes
// then ballot_sync the bits and write the result to shmem
// after we fill shared mem memcpy it out in a blob.
// probably need knobs for number of rows vs columns to balance read/write
auto group = cooperative_groups::this_thread_block();
int const tiles_remaining =
std::min(static_cast<uint>(tile_infos.size()) - blockIdx.x * NUM_VALIDITY_TILES_PER_KERNEL,
static_cast<uint>(NUM_VALIDITY_TILES_PER_KERNEL));
__shared__ cuda::barrier<cuda::thread_scope_block>
shared_tile_barriers[NUM_VALIDITY_TILES_PER_KERNEL_LOADED];
if (group.thread_rank() == 0) {
for (int i = 0; i < NUM_VALIDITY_TILES_PER_KERNEL_LOADED; ++i) {
init(&shared_tile_barriers[i], group.size());
}
}
group.sync();
for (int validity_tile = 0; validity_tile < tiles_remaining; ++validity_tile) {
if (validity_tile >= NUM_VALIDITY_TILES_PER_KERNEL_LOADED) {
shared_tile_barriers[validity_tile % NUM_VALIDITY_TILES_PER_KERNEL_LOADED].arrive_and_wait();
}
int8_t *this_shared_tile = shared_tiles[validity_tile % NUM_VALIDITY_TILES_PER_KERNEL_LOADED];
auto tile = tile_infos[blockIdx.x * NUM_VALIDITY_TILES_PER_KERNEL + validity_tile];
auto const num_tile_cols = tile.num_cols();
auto const num_tile_rows = tile.num_rows();
auto const num_sections_x = util::div_rounding_up_unsafe(num_tile_cols, 32);
auto const num_sections_y = util::div_rounding_up_unsafe(num_tile_rows, 32);
auto const validity_data_row_length = util::round_up_unsafe(
util::div_rounding_up_unsafe(num_tile_cols, CHAR_BIT), JCUDF_ROW_ALIGNMENT);
auto const total_sections = num_sections_x * num_sections_y;
int const warp_id = threadIdx.x / warp_size;
int const lane_id = threadIdx.x % warp_size;
auto const warps_per_tile = std::max(1u, blockDim.x / warp_size);
// the tile is divided into sections. A warp operates on a section at a time.
for (int my_section_idx = warp_id; my_section_idx < total_sections;
my_section_idx += warps_per_tile) {
// convert to rows and cols
auto const section_x = my_section_idx % num_sections_x;
auto const section_y = my_section_idx / num_sections_x;
auto const relative_col = section_x * 32 + lane_id;
auto const relative_row = section_y * 32;
auto const absolute_col = relative_col + tile.start_col;
auto const absolute_row = relative_row + tile.start_row;
auto const participation_mask = __ballot_sync(0xFFFFFFFF, absolute_col < num_columns);
if (absolute_col < num_columns) {
auto my_data = input_nm[absolute_col] != nullptr ?
input_nm[absolute_col][absolute_row / 32] :
std::numeric_limits<uint32_t>::max();
// every thread that is participating in the warp has 4 bytes, but it's column-based
// data and we need it in row-based. So we shuffle the bits around with ballot_sync to
// make the bytes we actually write.
bitmask_type dw_mask = 1;
for (int i = 0; i < 32 && relative_row + i < num_rows; ++i, dw_mask <<= 1) {
auto validity_data = __ballot_sync(participation_mask, my_data & dw_mask);
// lead thread in each warp writes data
auto const validity_write_offset =
validity_data_row_length * (relative_row + i) + relative_col / CHAR_BIT;
if (threadIdx.x % warp_size == 0) {
*reinterpret_cast<int32_t *>(&this_shared_tile[validity_write_offset]) = validity_data;
}
}
}
}
// make sure entire tile has finished copy
group.sync();
auto const output_data_base =
output_data[tile.batch_number] + validity_offset + tile.start_col / CHAR_BIT;
// now async memcpy the shared memory out to the final destination 4 bytes at a time since we do
// 32-row chunks
constexpr auto bytes_per_chunk = 8;
auto const row_bytes = util::div_rounding_up_unsafe(num_tile_cols, CHAR_BIT);
auto const chunks_per_row = util::div_rounding_up_unsafe(row_bytes, bytes_per_chunk);
auto const total_chunks = chunks_per_row * tile.num_rows();
auto &processing_barrier =
shared_tile_barriers[validity_tile % NUM_VALIDITY_TILES_PER_KERNEL_LOADED];
auto const tail_bytes = row_bytes % bytes_per_chunk;
auto const row_batch_start =
tile.batch_number == 0 ? 0 : batch_row_boundaries[tile.batch_number];
for (auto i = threadIdx.x; i < total_chunks; i += blockDim.x) {
// determine source address of my chunk
auto const relative_row = i / chunks_per_row;
auto const col_chunk = i % chunks_per_row;
auto const relative_chunk_offset = col_chunk * bytes_per_chunk;
auto const output_dest = output_data_base +
row_offsets(relative_row + tile.start_row, row_batch_start) +
relative_chunk_offset;
auto const input_src =
&this_shared_tile[validity_data_row_length * relative_row + relative_chunk_offset];
if (tail_bytes > 0 && col_chunk == chunks_per_row - 1)
cuda::memcpy_async(output_dest, input_src, tail_bytes, processing_barrier);
else
cuda::memcpy_async(output_dest, input_src,
cuda::aligned_size_t<bytes_per_chunk>(bytes_per_chunk),
processing_barrier);
}
}
// wait for last tiles of data to arrive
for (int validity_tile = 0;
validity_tile < tiles_remaining % NUM_VALIDITY_TILES_PER_KERNEL_LOADED; ++validity_tile) {
shared_tile_barriers[validity_tile].arrive_and_wait();
}
}
/**
* @brief copy data from row-based format to cudf columns
*
* @tparam RowOffsetIter iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile amount of shared memory that is used by a tile
* @param row_offsets offset to a specific row in the input data
* @param batch_row_boundaries row numbers for batch starts
* @param output_data pointers to column data
* @param col_sizes array of sizes for each element in a column - one per column
* @param col_offsets offset into input data row for each column's start
* @param tile_infos information about the tiles of work
* @param input_data pointer to input data
*
*/
template <typename RowOffsetIter>
__global__ void copy_from_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile, RowOffsetIter row_offsets,
size_type const *batch_row_boundaries, int8_t **output_data,
const size_type *col_sizes, const size_type *col_offsets,
device_span<const tile_info> tile_infos, const int8_t *input_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// This has been broken up for us in the tile_info struct, so we don't have
// any calculation to do here, but it is important to note.
// to speed up some of the random access memory we do, we copy col_sizes and col_offsets
// to shared memory for each of the tiles that we work on
constexpr unsigned stages_count = NUM_TILES_PER_KERNEL_LOADED;
auto group = cooperative_groups::this_thread_block();
extern __shared__ int8_t shared_data[];
int8_t *shared[stages_count] = {shared_data, shared_data + shmem_used_per_tile};
__shared__ cuda::barrier<cuda::thread_scope_block> tile_barrier[NUM_TILES_PER_KERNEL_LOADED];
if (group.thread_rank() == 0) {
for (int i = 0; i < NUM_TILES_PER_KERNEL_LOADED; ++i) {
init(&tile_barrier[i], group.size());
}
}
group.sync();
auto tiles_remaining =
std::min(static_cast<uint>(tile_infos.size()) - blockIdx.x * NUM_TILES_PER_KERNEL_FROM_ROWS,
static_cast<uint>(NUM_TILES_PER_KERNEL_FROM_ROWS));
size_t fetch_index;
size_t processing_index;
for (processing_index = fetch_index = 0; processing_index < tiles_remaining; ++processing_index) {
// Fetch ahead up to stages_count groups
for (; fetch_index < static_cast<size_t>(tiles_remaining) &&
fetch_index < (processing_index + stages_count);
++fetch_index) {
auto const fetch_tile = tile_infos[blockIdx.x * NUM_TILES_PER_KERNEL_FROM_ROWS + fetch_index];
auto const fetch_tile_start_row = fetch_tile.start_row;
auto const starting_col_offset = col_offsets[fetch_tile.start_col];
auto const fetch_tile_row_size = fetch_tile.get_shared_row_size(col_offsets, col_sizes);
auto &fetch_barrier = tile_barrier[fetch_index % NUM_TILES_PER_KERNEL_LOADED];
auto const row_batch_start =
fetch_tile.batch_number == 0 ? 0 : batch_row_boundaries[fetch_tile.batch_number];
// if we have fetched all buffers, we need to wait for processing
// to complete on them before we can use them again
if (fetch_index > NUM_TILES_PER_KERNEL_LOADED) {
fetch_barrier.arrive_and_wait();
}
for (auto row = fetch_tile_start_row + static_cast<int>(threadIdx.x);
row <= fetch_tile.end_row; row += blockDim.x) {
auto shared_offset = (row - fetch_tile_start_row) * fetch_tile_row_size;
// copy the data
cuda::memcpy_async(&shared[fetch_index % stages_count][shared_offset],
&input_data[row_offsets(row, row_batch_start) + starting_col_offset],
fetch_tile_row_size, fetch_barrier);
}
}
auto &processing_barrier = tile_barrier[processing_index % NUM_TILES_PER_KERNEL_LOADED];
// ensure our data is ready
processing_barrier.arrive_and_wait();
auto const tile = tile_infos[blockIdx.x * NUM_TILES_PER_KERNEL_FROM_ROWS + processing_index];
auto const rows_in_tile = tile.num_rows();
auto const cols_in_tile = tile.num_cols();
auto const tile_row_size = tile.get_shared_row_size(col_offsets, col_sizes);
// now we copy from shared memory to final destination.
// the data is laid out in rows in shared memory, so the reads
// for a column will be "vertical". Because of this and the different
// sizes for each column, this portion is handled on row/column basis.
// to prevent each thread working on a single row and also to ensure
// that all threads can do work in the case of more threads than rows,
// we do a global index instead of a double for loop with col/row.
for (int index = threadIdx.x; index < rows_in_tile * cols_in_tile; index += blockDim.x) {
auto const relative_col = index % cols_in_tile;
auto const relative_row = index / cols_in_tile;
auto const absolute_col = relative_col + tile.start_col;
auto const absolute_row = relative_row + tile.start_row;
auto const shared_memory_row_offset = tile_row_size * relative_row;
auto const shared_memory_offset =
col_offsets[absolute_col] - col_offsets[tile.start_col] + shared_memory_row_offset;
auto const column_size = col_sizes[absolute_col];
int8_t *shmem_src = &shared[processing_index % stages_count][shared_memory_offset];
int8_t *dst = &output_data[absolute_col][absolute_row * column_size];
cuda::memcpy_async(dst, shmem_src, column_size, processing_barrier);
}
group.sync();
}
// wait on the last copies to complete
for (uint i = 0; i < std::min(stages_count, tiles_remaining); ++i) {
tile_barrier[i].arrive_and_wait();
}
}
/**
* @brief copy data from row-based format to cudf columns
*
* @tparam RowOffsetIter iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile amount of shared memory that is used by a tile
* @param row_offsets offset to a specific row in the input data
* @param batch_row_boundaries row numbers for batch starts
* @param output_nm pointers to null masks for columns
* @param validity_offsets offset into input data row for validity data
* @param tile_infos information about the tiles of work
* @param input_data pointer to input data
*
*/
template <typename RowOffsetIter>
__global__ void
copy_validity_from_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile, RowOffsetIter row_offsets,
size_type const *batch_row_boundaries, bitmask_type **output_nm,
const size_type validity_offset, device_span<const tile_info> tile_infos,
const int8_t *input_data) {
extern __shared__ int8_t shared_data[];
int8_t *shared_tiles[NUM_VALIDITY_TILES_PER_KERNEL_LOADED] = {
shared_data, shared_data + shmem_used_per_tile / 2};
using cudf::detail::warp_size;
// each thread of warp reads a single byte of validity - so we read 32 bytes
// then ballot_sync the bits and write the result to shmem
// after we fill shared mem memcpy it out in a blob.
// probably need knobs for number of rows vs columns to balance read/write
auto group = cooperative_groups::this_thread_block();
int const tiles_remaining =
std::min(static_cast<uint>(tile_infos.size()) - blockIdx.x * NUM_VALIDITY_TILES_PER_KERNEL,
static_cast<uint>(NUM_VALIDITY_TILES_PER_KERNEL));
__shared__ cuda::barrier<cuda::thread_scope_block>
shared_tile_barriers[NUM_VALIDITY_TILES_PER_KERNEL_LOADED];
if (group.thread_rank() == 0) {
for (int i = 0; i < NUM_VALIDITY_TILES_PER_KERNEL_LOADED; ++i) {
init(&shared_tile_barriers[i], group.size());
}
}
group.sync();
for (int validity_tile = 0; validity_tile < tiles_remaining; ++validity_tile) {
if (validity_tile >= NUM_VALIDITY_TILES_PER_KERNEL_LOADED) {
auto const validity_index = validity_tile % NUM_VALIDITY_TILES_PER_KERNEL_LOADED;
shared_tile_barriers[validity_index].arrive_and_wait();
}
int8_t *this_shared_tile = shared_tiles[validity_tile % 2];
auto const tile = tile_infos[blockIdx.x * NUM_VALIDITY_TILES_PER_KERNEL + validity_tile];
auto const tile_start_col = tile.start_col;
auto const tile_start_row = tile.start_row;
auto const num_tile_cols = tile.num_cols();
auto const num_tile_rows = tile.num_rows();
constexpr auto rows_per_read = 32;
auto const num_sections_x = util::div_rounding_up_safe(num_tile_cols, CHAR_BIT);
auto const num_sections_y = util::div_rounding_up_safe(num_tile_rows, rows_per_read);
auto const validity_data_col_length = num_sections_y * 4; // words to bytes
auto const total_sections = num_sections_x * num_sections_y;
int const warp_id = threadIdx.x / warp_size;
int const lane_id = threadIdx.x % warp_size;
auto const warps_per_tile = std::max(1u, blockDim.x / warp_size);
// the tile is divided into sections. A warp operates on a section at a time.
for (int my_section_idx = warp_id; my_section_idx < total_sections;
my_section_idx += warps_per_tile) {
// convert section to row and col
auto const section_x = my_section_idx % num_sections_x;
auto const section_y = my_section_idx / num_sections_x;
auto const relative_col = section_x * CHAR_BIT;
auto const relative_row = section_y * rows_per_read + lane_id;
auto const absolute_col = relative_col + tile_start_col;
auto const absolute_row = relative_row + tile_start_row;
auto const row_batch_start =
tile.batch_number == 0 ? 0 : batch_row_boundaries[tile.batch_number];
auto const participation_mask = __ballot_sync(0xFFFFFFFF, absolute_row < num_rows);
if (absolute_row < num_rows) {
auto const my_byte = input_data[row_offsets(absolute_row, row_batch_start) +
validity_offset + absolute_col / CHAR_BIT];
// so every thread that is participating in the warp has a byte, but it's row-based
// data and we need it in column-based. So we shuffle the bits around to make
// the bytes we actually write.
for (int i = 0, byte_mask = 1; i < CHAR_BIT && relative_col + i < num_columns;
++i, byte_mask <<= 1) {
auto validity_data = __ballot_sync(participation_mask, my_byte & byte_mask);
// lead thread in each warp writes data
if (threadIdx.x % warp_size == 0) {
auto const validity_write_offset =
validity_data_col_length * (relative_col + i) + relative_row / CHAR_BIT;
*reinterpret_cast<int32_t *>(&this_shared_tile[validity_write_offset]) = validity_data;
}
}
}
}
// make sure entire tile has finished copy
group.sync();
// now async memcpy the shared memory out to the final destination 8 bytes at a time
constexpr auto bytes_per_chunk = 8;
auto const col_bytes = util::div_rounding_up_unsafe(num_tile_rows, CHAR_BIT);
auto const chunks_per_col = util::div_rounding_up_unsafe(col_bytes, bytes_per_chunk);
auto const total_chunks = chunks_per_col * num_tile_cols;
auto &processing_barrier =
shared_tile_barriers[validity_tile % NUM_VALIDITY_TILES_PER_KERNEL_LOADED];
auto const tail_bytes = col_bytes % bytes_per_chunk;
for (auto i = threadIdx.x; i < total_chunks; i += blockDim.x) {
// determine source address of my chunk
auto const relative_col = i / chunks_per_col;
auto const row_chunk = i % chunks_per_col;
auto const absolute_col = relative_col + tile_start_col;
auto const relative_chunk_byte_offset = row_chunk * bytes_per_chunk;
auto const output_dest = output_nm[absolute_col] + word_index(tile_start_row) + row_chunk * 2;
auto const input_src =
&this_shared_tile[validity_data_col_length * relative_col + relative_chunk_byte_offset];
if (tail_bytes > 0 && row_chunk == chunks_per_col - 1) {
cuda::memcpy_async(output_dest, input_src, tail_bytes, processing_barrier);
} else {
cuda::memcpy_async(output_dest, input_src,
cuda::aligned_size_t<bytes_per_chunk>(bytes_per_chunk),
processing_barrier);
}
}
}
// wait for last tiles of data to arrive
auto const num_tiles_to_wait = tiles_remaining > NUM_VALIDITY_TILES_PER_KERNEL_LOADED ?
NUM_VALIDITY_TILES_PER_KERNEL_LOADED :
tiles_remaining;
for (int validity_tile = 0; validity_tile < num_tiles_to_wait; ++validity_tile) {
shared_tile_barriers[validity_tile].arrive_and_wait();
}
}
#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
/**
* @brief Calculate the dimensions of the kernel for fixed width only columns.
*
* @param [in] num_columns the number of columns being copied.
* @param [in] num_rows the number of rows being copied.
* @param [in] size_per_row the size each row takes up when padded.
* @param [out] blocks the size of the blocks for the kernel
* @param [out] threads the size of the threads for the kernel
* @return the size in bytes of shared memory needed for each block.
*/
static int calc_fixed_width_kernel_dims(const size_type num_columns, const size_type num_rows,
const size_type size_per_row, dim3 &blocks, dim3 &threads) {
// We have found speed degrades when a thread handles more than 4 columns.
// Each block is 2 dimensional. The y dimension indicates the columns.
// We limit this to 32 threads in the y dimension so we can still
// have at least 32 threads in the x dimension (1 warp) which should
// result in better coalescing of memory operations. We also
// want to guarantee that we are processing a multiple of 32 threads
// in the x dimension because we use atomic operations at the block
// level when writing validity data out to main memory, and that would
// need to change if we split a word of validity data between blocks.
int const y_block_size = min(util::div_rounding_up_safe(num_columns, 4), 32);
int const x_possible_block_size = 1024 / y_block_size;
// 48KB is the default setting for shared memory per block according to the cuda tutorials
// If someone configures the GPU to only have 16 KB this might not work.
int const max_shared_size = 48 * 1024;
// If we don't have enough shared memory there is no point in having more threads
// per block that will just sit idle
auto const max_block_size = std::min(x_possible_block_size, max_shared_size / size_per_row);
// Make sure that the x dimension is a multiple of 32 this not only helps
// coalesce memory access it also lets us do a ballot sync for validity to write
// the data back out the warp level. If x is a multiple of 32 then each thread in the y
// dimension is associated with one or more warps, that should correspond to the validity
// words directly.
int const block_size = (max_block_size / 32) * 32;
CUDF_EXPECTS(block_size != 0, "Row size is too large to fit in shared memory");
// The maximum number of blocks supported in the x dimension is 2 ^ 31 - 1
// but in practice haveing too many can cause some overhead that I don't totally
// understand. Playing around with this haveing as little as 600 blocks appears
// to be able to saturate memory on V100, so this is an order of magnitude higher
// to try and future proof this a bit.
int const num_blocks = std::clamp((num_rows + block_size - 1) / block_size, 1, 10240);
blocks.x = num_blocks;
blocks.y = 1;
blocks.z = 1;
threads.x = block_size;
threads.y = y_block_size;
threads.z = 1;
return size_per_row * block_size;
}
/**
* When converting to rows it is possible that the size of the table was too big to fit
* in a single column. This creates an output column for a subset of the rows in a table
* going from start row and containing the next num_rows. Most of the parameters passed
* into this function are common between runs and should be calculated once.
*/
static std::unique_ptr<column> fixed_width_convert_to_rows(
const size_type start_row, const size_type num_rows, const size_type num_columns,
const size_type size_per_row, rmm::device_uvector<size_type> &column_start,
rmm::device_uvector<size_type> &column_size, rmm::device_uvector<const int8_t *> &input_data,
rmm::device_uvector<const bitmask_type *> &input_nm, const scalar &zero,
const scalar &scalar_size_per_row, rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
int64_t const total_allocation = size_per_row * num_rows;
// We made a mistake in the split somehow
CUDF_EXPECTS(total_allocation < std::numeric_limits<size_type>::max(),
"Table is too large to fit!");
// Allocate and set the offsets row for the byte array
std::unique_ptr<column> offsets =
cudf::detail::sequence(num_rows + 1, zero, scalar_size_per_row, stream);
std::unique_ptr<column> data =
make_numeric_column(data_type(type_id::INT8), static_cast<size_type>(total_allocation),
mask_state::UNALLOCATED, stream, mr);
dim3 blocks;
dim3 threads;
int shared_size =
detail::calc_fixed_width_kernel_dims(num_columns, num_rows, size_per_row, blocks, threads);
copy_to_rows_fixed_width_optimized<<<blocks, threads, shared_size, stream.value()>>>(
start_row, num_rows, num_columns, size_per_row, column_start.data(), column_size.data(),
input_data.data(), input_nm.data(), data->mutable_view().data<int8_t>());
return make_lists_column(num_rows, std::move(offsets), std::move(data), 0,
rmm::device_buffer{0, rmm::cuda_stream_default, mr}, stream, mr);
}
static inline bool are_all_fixed_width(std::vector<data_type> const &schema) {
return std::all_of(schema.begin(), schema.end(),
[](const data_type &t) { return is_fixed_width(t); });
}
/**
* @brief Given a set of fixed width columns, calculate how the data will be laid out in memory.
*
* @param [in] schema the types of columns that need to be laid out.
* @param [out] column_start the byte offset where each column starts in the row.
* @param [out] column_size the size in bytes of the data for each columns in the row.
* @return the size in bytes each row needs.
*/
static inline int32_t compute_fixed_width_layout(std::vector<data_type> const &schema,
std::vector<size_type> &column_start,
std::vector<size_type> &column_size) {
// We guarantee that the start of each column is 64-bit aligned so anything can go
// there, but to make the code simple we will still do an alignment for it.
int32_t at_offset = 0;
for (auto col = schema.begin(); col < schema.end(); col++) {
size_type s = size_of(*col);
column_size.emplace_back(s);
std::size_t allocation_needed = s;
std::size_t alignment_needed = allocation_needed; // They are the same for fixed width types
at_offset = util::round_up_unsafe(at_offset, static_cast<int32_t>(alignment_needed));
column_start.emplace_back(at_offset);
at_offset += allocation_needed;
}
// Now we need to add in space for validity
// Eventually we can think about nullable vs not nullable, but for now we will just always add
// it in
int32_t const validity_bytes_needed =
util::div_rounding_up_safe<int32_t>(schema.size(), CHAR_BIT);
// validity comes at the end and is byte aligned so we can pack more in.
at_offset += validity_bytes_needed;
// Now we need to pad the end so all rows are 64 bit aligned
return util::round_up_unsafe(at_offset, JCUDF_ROW_ALIGNMENT);
}
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
/**
* @brief Compute information about a table such as bytes per row and offsets.
*
* @tparam iterator iterator of column schema data
* @param begin starting iterator of column schema
* @param end ending iterator of column schema
* @param column_starts column start offsets
* @param column_sizes size in bytes of each column
* @return size of the fixed_width data portion of a row.
*/
template <typename iterator>
static size_type compute_column_information(iterator begin, iterator end,
std::vector<size_type> &column_starts,
std::vector<size_type> &column_sizes) {
size_type fixed_width_size_per_row = 0;
for (auto cv = begin; cv != end; ++cv) {
auto col_type = std::get<0>(*cv);
bool nested_type = is_compound(col_type);
// a list or string column will write a single uint64
// of data here for offset/length
auto col_size = nested_type ? 8 : size_of(col_type);
// align size for this type
size_type const alignment_needed = col_size; // They are the same for fixed width types
fixed_width_size_per_row = util::round_up_unsafe(fixed_width_size_per_row, alignment_needed);
column_starts.push_back(fixed_width_size_per_row);
column_sizes.push_back(col_size);
fixed_width_size_per_row += col_size;
}
auto validity_offset = fixed_width_size_per_row;
column_starts.push_back(validity_offset);
return util::round_up_unsafe(
fixed_width_size_per_row +
util::div_rounding_up_safe(static_cast<size_type>(std::distance(begin, end)), CHAR_BIT),
JCUDF_ROW_ALIGNMENT);
}
/**
* @brief Build `tile_info` for the validity data to break up the work.
*
* @param num_columns number of columns in the table
* @param num_rows number of rows in the table
* @param shmem_limit_per_tile size of shared memory available to a single gpu tile
* @param row_batches batched row information for multiple output locations
* @return vector of `tile_info` structs for validity data
*/
std::vector<detail::tile_info>
build_validity_tile_infos(size_type const &num_columns, size_type const &num_rows,
size_type const &shmem_limit_per_tile,
std::vector<row_batch> const &row_batches) {
auto const desired_rows_and_columns = static_cast<int>(sqrt(shmem_limit_per_tile));
auto const column_stride = util::round_up_unsafe(
[&]() {
if (desired_rows_and_columns > num_columns) {
// not many columns, group it into 8s and ship it off
return std::min(CHAR_BIT, num_columns);
} else {
return util::round_down_safe(desired_rows_and_columns, CHAR_BIT);
}
}(),
JCUDF_ROW_ALIGNMENT);
// we fit as much as we can given the column stride
// note that an element in the table takes just 1 bit, but a row with a single
// element still takes 8 bytes!
auto const bytes_per_row = util::round_up_safe(
util::div_rounding_up_unsafe(column_stride, CHAR_BIT), JCUDF_ROW_ALIGNMENT);
auto const row_stride =
std::min(num_rows, util::round_down_safe(shmem_limit_per_tile / bytes_per_row, 64));
std::vector<detail::tile_info> validity_tile_infos;
validity_tile_infos.reserve(num_columns / column_stride * num_rows / row_stride);
for (int col = 0; col < num_columns; col += column_stride) {
int current_tile_row_batch = 0;
int rows_left_in_batch = row_batches[current_tile_row_batch].row_count;
int row = 0;
while (row < num_rows) {
if (rows_left_in_batch == 0) {
current_tile_row_batch++;
rows_left_in_batch = row_batches[current_tile_row_batch].row_count;
}
int const tile_height = std::min(row_stride, rows_left_in_batch);
validity_tile_infos.emplace_back(detail::tile_info{
col, row, std::min(col + column_stride - 1, num_columns - 1), row + tile_height - 1});
row += tile_height;
rows_left_in_batch -= tile_height;
}
}
return validity_tile_infos;
}
/**
* @brief functor that returns the size of a row or 0 is row is greater than the number of rows in
* the table
*
* @tparam RowSize iterator that returns the size of a specific row
*/
template <typename RowSize> struct row_size_functor {
row_size_functor(size_type row_end, RowSize row_sizes, size_type last_row_end)
: _row_end(row_end), _row_sizes(row_sizes), _last_row_end(last_row_end) {}
__device__ inline uint64_t operator()(int i) const {
return i >= _row_end ? 0 : _row_sizes[i + _last_row_end];
}
size_type _row_end;
RowSize _row_sizes;
size_type _last_row_end;
};
/**
* @brief Builds batches of rows that will fit in the size limit of a column.
*
* @tparam RowSize iterator that gives the size of a specific row of the table.
* @param num_rows Total number of rows in the table
* @param row_sizes iterator that gives the size of a specific row of the table.
* @param all_fixed_width bool indicating all data in this table is fixed width
* @param stream stream to operate on for this work
* @param mr memory resource used to allocate any returned data
* @returns vector of size_type's that indicate row numbers for batch boundaries and a
* device_uvector of row offsets
*/
template <typename RowSize>
batch_data build_batches(size_type num_rows, RowSize row_sizes, bool all_fixed_width,
rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) {
auto const total_size = thrust::reduce(rmm::exec_policy(stream), row_sizes, row_sizes + num_rows);
auto const num_batches = static_cast<int32_t>(
util::div_rounding_up_safe(total_size, static_cast<uint64_t>(MAX_BATCH_SIZE)));
auto const num_offsets = num_batches + 1;
std::vector<row_batch> row_batches;
std::vector<size_type> batch_row_boundaries;
device_uvector<size_type> batch_row_offsets(all_fixed_width ? 0 : num_rows, stream);
// at most max gpu memory / 2GB iterations.
batch_row_boundaries.reserve(num_offsets);
batch_row_boundaries.push_back(0);
size_type last_row_end = 0;
device_uvector<uint64_t> cumulative_row_sizes(num_rows, stream);
thrust::inclusive_scan(rmm::exec_policy(stream), row_sizes, row_sizes + num_rows,
cumulative_row_sizes.begin());
while (static_cast<int>(batch_row_boundaries.size()) < num_offsets) {
// find the next MAX_BATCH_SIZE boundary
size_type const row_end =
((thrust::lower_bound(rmm::exec_policy(stream), cumulative_row_sizes.begin(),
cumulative_row_sizes.begin() + (num_rows - last_row_end),
MAX_BATCH_SIZE) -
cumulative_row_sizes.begin()) +
last_row_end);
// build offset list for each row in this batch
auto const num_rows_in_batch = row_end - last_row_end;
// build offset list for each row in this batch
auto const num_entries = row_end - last_row_end + 1;
device_uvector<size_type> output_batch_row_offsets(num_entries, stream, mr);
auto row_size_iter_bounded = cudf::detail::make_counting_transform_iterator(
0, row_size_functor(row_end, row_sizes, last_row_end));
thrust::exclusive_scan(rmm::exec_policy(stream), row_size_iter_bounded,
row_size_iter_bounded + num_entries, output_batch_row_offsets.begin());
auto const batch_bytes = output_batch_row_offsets.element(num_rows_in_batch, stream);
// The output_batch_row_offsets vector is used as the offset column of the returned data. This
// needs to be individually allocated, but the kernel needs a contiguous array of offsets or
// more global lookups are necessary.
if (!all_fixed_width) {
cudaMemcpy(batch_row_offsets.data() + last_row_end, output_batch_row_offsets.data(),
num_rows_in_batch * sizeof(size_type), cudaMemcpyDeviceToDevice);
}
batch_row_boundaries.push_back(row_end);
row_batches.push_back({batch_bytes, num_rows_in_batch, std::move(output_batch_row_offsets)});
last_row_end = row_end;
}
return {std::move(batch_row_offsets), make_device_uvector_async(batch_row_boundaries, stream),
std::move(batch_row_boundaries), std::move(row_batches)};
}
/**
* @brief Computes the number of tiles necessary given a tile height and batch offsets
*
* @param batch_row_boundaries row boundaries for each batch
* @param desired_tile_height height of each tile in the table
* @param stream stream to use
* @return number of tiles necessary
*/
int compute_tile_counts(device_span<size_type const> const &batch_row_boundaries,
int desired_tile_height, rmm::cuda_stream_view stream) {
size_type const num_batches = batch_row_boundaries.size() - 1;
device_uvector<size_type> num_tiles(num_batches, stream);
auto iter = thrust::make_counting_iterator(0);
thrust::transform(rmm::exec_policy(stream), iter, iter + num_batches, num_tiles.begin(),
[desired_tile_height,
batch_row_boundaries =
batch_row_boundaries.data()] __device__(auto batch_index) -> size_type {
return util::div_rounding_up_unsafe(batch_row_boundaries[batch_index + 1] -
batch_row_boundaries[batch_index],
desired_tile_height);
});
return thrust::reduce(rmm::exec_policy(stream), num_tiles.begin(), num_tiles.end());
}
/**
* @brief Builds the `tile_info` structs for a given table.
*
* @param tiles span of tiles to populate
* @param batch_row_boundaries boundary to row batches
* @param column_start starting column of the tile
* @param column_end ending column of the tile
* @param desired_tile_height height of the tile
* @param total_number_of_rows total number of rows in the table
* @param stream stream to use
* @return number of tiles created
*/
size_type
build_tiles(device_span<tile_info> tiles,
device_uvector<size_type> const &batch_row_boundaries, // comes from build_batches
int column_start, int column_end, int desired_tile_height, int total_number_of_rows,
rmm::cuda_stream_view stream) {
size_type const num_batches = batch_row_boundaries.size() - 1;
device_uvector<size_type> num_tiles(num_batches, stream);
auto iter = thrust::make_counting_iterator(0);
thrust::transform(rmm::exec_policy(stream), iter, iter + num_batches, num_tiles.begin(),
[desired_tile_height,
batch_row_boundaries =
batch_row_boundaries.data()] __device__(auto batch_index) -> size_type {
return util::div_rounding_up_unsafe(batch_row_boundaries[batch_index + 1] -
batch_row_boundaries[batch_index],
desired_tile_height);
});
size_type const total_tiles =
thrust::reduce(rmm::exec_policy(stream), num_tiles.begin(), num_tiles.end());
device_uvector<size_type> tile_starts(num_batches + 1, stream);
auto tile_iter = cudf::detail::make_counting_transform_iterator(
0, [num_tiles = num_tiles.data(), num_batches] __device__(auto i) {
return (i < num_batches) ? num_tiles[i] : 0;
});
thrust::exclusive_scan(rmm::exec_policy(stream), tile_iter, tile_iter + num_batches + 1,
tile_starts.begin()); // in tiles
thrust::transform(
rmm::exec_policy(stream), iter, iter + total_tiles, tiles.begin(),
[=, tile_starts = tile_starts.data(),
batch_row_boundaries = batch_row_boundaries.data()] __device__(size_type tile_index) {
// what batch this tile falls in
auto const batch_index_iter =
thrust::upper_bound(thrust::seq, tile_starts, tile_starts + num_batches, tile_index);
auto const batch_index = std::distance(tile_starts, batch_index_iter) - 1;
// local index within the tile
int const local_tile_index = tile_index - tile_starts[batch_index];
// the start row for this batch.
int const batch_row_start = batch_row_boundaries[batch_index];
// the start row for this tile
int const tile_row_start = batch_row_start + (local_tile_index * desired_tile_height);
// the end row for this tile
int const max_row =
std::min(total_number_of_rows - 1,
batch_index + 1 > num_batches ?
std::numeric_limits<size_type>::max() :
static_cast<int>(batch_row_boundaries[batch_index + 1]) - 1);
int const tile_row_end =
std::min(batch_row_start + ((local_tile_index + 1) * desired_tile_height) - 1, max_row);
// stuff the tile
return tile_info{column_start, tile_row_start, column_end, tile_row_end,
static_cast<int>(batch_index)};
});
return total_tiles;
}
/**
* @brief Determines what data should be operated on by each tile for the incoming table.
*
* @tparam TileCallback Callback that receives the start and end columns of tiles
* @param column_sizes vector of the size of each column
* @param column_starts vector of the offset of each column
* @param first_row_batch_size size of the first row batch to limit max tile size since a tile
* is unable to span batches
* @param total_number_of_rows total number of rows in the table
* @param shmem_limit_per_tile shared memory allowed per tile
* @param f callback function called when building a tile
*/
template <typename TileCallback>
void determine_tiles(std::vector<size_type> const &column_sizes,
std::vector<size_type> const &column_starts,
size_type const first_row_batch_size, size_type const total_number_of_rows,
size_type const &shmem_limit_per_tile, TileCallback f) {
// tile infos are organized with the tile going "down" the columns
// this provides the most coalescing of memory access
int current_tile_width = 0;
int current_tile_start_col = 0;
// the ideal tile height has lots of 8-byte reads and 8-byte writes. The optimal read/write
// would be memory cache line sized access, but since other tiles will read/write the edges
// this may not turn out to be overly important. For now, we will attempt to build a square
// tile as far as byte sizes. x * y = shared_mem_size. Which translates to x^2 =
// shared_mem_size since we want them equal, so height and width are sqrt(shared_mem_size). The
// trick is that it's in bytes, not rows or columns.
auto const optimal_square_len = static_cast<size_type>(sqrt(shmem_limit_per_tile));
auto const tile_height =
std::clamp(util::round_up_safe<int>(
std::min(optimal_square_len / column_sizes[0], total_number_of_rows), 32),
1, first_row_batch_size);
int row_size = 0;
// march each column and build the tiles of appropriate sizes
for (uint col = 0; col < column_sizes.size(); ++col) {
auto const col_size = column_sizes[col];
// align size for this type
auto const alignment_needed = col_size; // They are the same for fixed width types
auto const row_size_aligned = util::round_up_unsafe(row_size, alignment_needed);
auto const row_size_with_this_col = row_size_aligned + col_size;
auto const row_size_with_end_pad =
util::round_up_unsafe(row_size_with_this_col, JCUDF_ROW_ALIGNMENT);
if (row_size_with_end_pad * tile_height > shmem_limit_per_tile) {
// too large, close this tile, generate vertical tiles and restart
f(current_tile_start_col, col == 0 ? col : col - 1, tile_height);
row_size =
util::round_up_unsafe((column_starts[col] + column_sizes[col]) & 7, alignment_needed);
row_size += col_size; // alignment required for shared memory tile boundary to match
// alignment of output row
current_tile_start_col = col;
current_tile_width = 0;
} else {
row_size = row_size_with_this_col;
current_tile_width++;
}
}
// build last set of tiles
if (current_tile_width > 0) {
f(current_tile_start_col, static_cast<int>(column_sizes.size()) - 1, tile_height);
}
}
#endif // #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
} // namespace detail
std::vector<std::unique_ptr<column>> convert_to_rows(table_view const &tbl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
auto const num_columns = tbl.num_columns();
auto const num_rows = tbl.num_rows();
auto const fixed_width_only = std::all_of(
tbl.begin(), tbl.end(), [](column_view const &c) { return is_fixed_width(c.type()); });
int device_id;
CUDA_TRY(cudaGetDevice(&device_id));
int total_shmem_in_bytes;
CUDA_TRY(
cudaDeviceGetAttribute(&total_shmem_in_bytes, cudaDevAttrMaxSharedMemoryPerBlock, device_id));
// Need to reduce total shmem available by the size of barriers in the kernel's shared memory
total_shmem_in_bytes -=
sizeof(cuda::barrier<cuda::thread_scope_block>) * NUM_TILES_PER_KERNEL_LOADED;
auto const shmem_limit_per_tile = total_shmem_in_bytes / NUM_TILES_PER_KERNEL_LOADED;
// break up the work into tiles, which are a starting and ending row/col #.
// this tile size is calculated based on the shared memory size available
// we want a single tile to fill up the entire shared memory space available
// for the transpose-like conversion.
// There are two different processes going on here. The GPU conversion of the data
// and the writing of the data into the list of byte columns that are a maximum of
// 2 gigs each due to offset maximum size. The GPU conversion portion has to understand
// this limitation because the column must own the data inside and as a result it must be
// a distinct allocation for that column. Copying the data into these final buffers would
// be prohibitively expensive, so care is taken to ensure the GPU writes to the proper buffer.
// The tiles are broken at the boundaries of specific rows based on the row sizes up
// to that point. These are row batches and they are decided first before building the
// tiles so the tiles can be properly cut around them.
// Get the pointers to the input columnar data ready
auto data_begin = thrust::make_transform_iterator(
tbl.begin(), [](auto const &c) { return c.template data<int8_t>(); });
std::vector<int8_t const *> input_data(data_begin, data_begin + tbl.num_columns());
auto nm_begin =
thrust::make_transform_iterator(tbl.begin(), [](auto const &c) { return c.null_mask(); });
std::vector<bitmask_type const *> input_nm(nm_begin, nm_begin + tbl.num_columns());
auto dev_input_data = make_device_uvector_async(input_data, stream, mr);
auto dev_input_nm = make_device_uvector_async(input_nm, stream, mr);
std::vector<size_type> column_sizes; // byte size of each column
std::vector<size_type> column_starts; // offset of column inside a row including alignment
column_sizes.reserve(num_columns);
column_starts.reserve(num_columns + 1); // we add a final offset for validity data start
auto schema_column_iter =
thrust::make_transform_iterator(thrust::make_counting_iterator(0),
[&tbl](auto i) -> std::tuple<data_type, column_view const> {
return {tbl.column(i).type(), tbl.column(i)};
});
auto const fixed_width_size_per_row = detail::compute_column_information(
schema_column_iter, schema_column_iter + num_columns, column_starts, column_sizes);
auto dev_col_sizes = make_device_uvector_async(column_sizes, stream, mr);
auto dev_col_starts = make_device_uvector_async(column_starts, stream, mr);
// total encoded row size. This includes fixed-width data, validity, and variable-width data.
auto row_size_iter = thrust::make_constant_iterator<uint64_t>(fixed_width_size_per_row);
auto batch_info = detail::build_batches(num_rows, row_size_iter, fixed_width_only, stream, mr);
// the first batch always exists unless we were sent an empty table
auto const first_batch_size = batch_info.row_batches[0].row_count;
std::vector<rmm::device_buffer> output_buffers;
std::vector<int8_t *> output_data;
output_data.reserve(batch_info.row_batches.size());
output_buffers.reserve(batch_info.row_batches.size());
std::transform(batch_info.row_batches.begin(), batch_info.row_batches.end(),
std::back_inserter(output_buffers), [&](auto const &batch) {
return rmm::device_buffer(batch.num_bytes, stream, mr);
});
std::transform(output_buffers.begin(), output_buffers.end(), std::back_inserter(output_data),
[](auto &buf) { return static_cast<int8_t *>(buf.data()); });
auto dev_output_data = make_device_uvector_async(output_data, stream, mr);
int info_count = 0;
detail::determine_tiles(
column_sizes, column_starts, first_batch_size, num_rows, shmem_limit_per_tile,
[&gpu_batch_row_boundaries = batch_info.d_batch_row_boundaries, &info_count,
&stream](int const start_col, int const end_col, int const tile_height) {
int i = detail::compute_tile_counts(gpu_batch_row_boundaries, tile_height, stream);
info_count += i;
});
// allocate space for tiles
device_uvector<detail::tile_info> gpu_tile_infos(info_count, stream);
int tile_offset = 0;
detail::determine_tiles(
column_sizes, column_starts, first_batch_size, num_rows, shmem_limit_per_tile,
[&gpu_batch_row_boundaries = batch_info.d_batch_row_boundaries, &gpu_tile_infos, num_rows,
&tile_offset, stream](int const start_col, int const end_col, int const tile_height) {
tile_offset += detail::build_tiles(
{gpu_tile_infos.data() + tile_offset, gpu_tile_infos.size() - tile_offset},
gpu_batch_row_boundaries, start_col, end_col, tile_height, num_rows, stream);
});
// blast through the entire table and convert it
dim3 blocks(util::div_rounding_up_unsafe(gpu_tile_infos.size(), NUM_TILES_PER_KERNEL_TO_ROWS));
dim3 threads(256);
auto validity_tile_infos = detail::build_validity_tile_infos(
num_columns, num_rows, shmem_limit_per_tile, batch_info.row_batches);
auto dev_validity_tile_infos = make_device_uvector_async(validity_tile_infos, stream);
dim3 validity_blocks(
util::div_rounding_up_unsafe(validity_tile_infos.size(), NUM_VALIDITY_TILES_PER_KERNEL));
dim3 validity_threads(std::min(validity_tile_infos.size() * 32, 128lu));
detail::row_offset_functor offset_functor(fixed_width_size_per_row);
detail::copy_to_rows<<<blocks, threads, total_shmem_in_bytes, stream.value()>>>(
num_rows, num_columns, shmem_limit_per_tile, gpu_tile_infos, dev_input_data.data(),
dev_col_sizes.data(), dev_col_starts.data(), offset_functor,
batch_info.d_batch_row_boundaries.data(),
reinterpret_cast<int8_t **>(dev_output_data.data()));
detail::copy_validity_to_rows<<<validity_blocks, validity_threads, total_shmem_in_bytes,
stream.value()>>>(
num_rows, num_columns, shmem_limit_per_tile, offset_functor,
batch_info.d_batch_row_boundaries.data(), dev_output_data.data(), column_starts.back(),
dev_validity_tile_infos, dev_input_nm.data());
// split up the output buffer into multiple buffers based on row batch sizes
// and create list of byte columns
std::vector<std::unique_ptr<column>> ret;
auto counting_iter = thrust::make_counting_iterator(0);
std::transform(counting_iter, counting_iter + batch_info.row_batches.size(),
std::back_inserter(ret), [&](auto batch) {
auto const offset_count = batch_info.row_batches[batch].row_offsets.size();
auto offsets = std::make_unique<column>(
data_type{type_id::INT32}, (size_type)offset_count,
batch_info.row_batches[batch].row_offsets.release());
auto data = std::make_unique<column>(data_type{type_id::INT8},
batch_info.row_batches[batch].num_bytes,
std::move(output_buffers[batch]));
return make_lists_column(
batch_info.row_batches[batch].row_count, std::move(offsets), std::move(data),
0, rmm::device_buffer{0, rmm::cuda_stream_default, mr}, stream, mr);
});
return ret;
#else
CUDF_FAIL("Column to row conversion optimization requires volta or later hardware.");
return {};
#endif // #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
}
std::vector<std::unique_ptr<column>>
convert_to_rows_fixed_width_optimized(table_view const &tbl, rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
auto const num_columns = tbl.num_columns();
std::vector<data_type> schema;
schema.resize(num_columns);
std::transform(tbl.begin(), tbl.end(), schema.begin(),
[](auto i) -> data_type { return i.type(); });
if (detail::are_all_fixed_width(schema)) {
std::vector<size_type> column_start;
std::vector<size_type> column_size;
int32_t const size_per_row =
detail::compute_fixed_width_layout(schema, column_start, column_size);
auto dev_column_start = make_device_uvector_async(column_start, stream, mr);
auto dev_column_size = make_device_uvector_async(column_size, stream, mr);
// Make the number of rows per batch a multiple of 32 so we don't have to worry about
// splitting validity at a specific row offset. This might change in the future.
auto const max_rows_per_batch =
util::round_down_safe(std::numeric_limits<size_type>::max() / size_per_row, 32);
auto const num_rows = tbl.num_rows();
// Get the pointers to the input columnar data ready
std::vector<const int8_t *> input_data;
std::vector<bitmask_type const *> input_nm;
for (size_type column_number = 0; column_number < num_columns; column_number++) {
column_view cv = tbl.column(column_number);
input_data.emplace_back(cv.data<int8_t>());
input_nm.emplace_back(cv.null_mask());
}
auto dev_input_data = make_device_uvector_async(input_data, stream, mr);
auto dev_input_nm = make_device_uvector_async(input_nm, stream, mr);
using ScalarType = scalar_type_t<size_type>;
auto zero = make_numeric_scalar(data_type(type_id::INT32), stream.value());
zero->set_valid_async(true, stream);
static_cast<ScalarType *>(zero.get())->set_value(0, stream);
auto step = make_numeric_scalar(data_type(type_id::INT32), stream.value());
step->set_valid_async(true, stream);
static_cast<ScalarType *>(step.get())->set_value(static_cast<size_type>(size_per_row), stream);
std::vector<std::unique_ptr<column>> ret;
for (size_type row_start = 0; row_start < num_rows; row_start += max_rows_per_batch) {
size_type row_count = num_rows - row_start;
row_count = row_count > max_rows_per_batch ? max_rows_per_batch : row_count;
ret.emplace_back(detail::fixed_width_convert_to_rows(
row_start, row_count, num_columns, size_per_row, dev_column_start, dev_column_size,
dev_input_data, dev_input_nm, *zero, *step, stream, mr));
}
return ret;
} else {
CUDF_FAIL("Only fixed width types are currently supported");
}
}
std::unique_ptr<table> convert_from_rows(lists_column_view const &input,
std::vector<data_type> const &schema,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
// verify that the types are what we expect
column_view child = input.child();
auto const list_type = child.type().id();
CUDF_EXPECTS(list_type == type_id::INT8 || list_type == type_id::UINT8,
"Only a list of bytes is supported as input");
auto const num_columns = schema.size();
auto const num_rows = input.parent().size();
int device_id;
CUDA_TRY(cudaGetDevice(&device_id));
int total_shmem_in_bytes;
CUDA_TRY(
cudaDeviceGetAttribute(&total_shmem_in_bytes, cudaDevAttrMaxSharedMemoryPerBlock, device_id));
// Need to reduce total shmem available by the size of barriers in the kernel's shared memory
total_shmem_in_bytes -=
sizeof(cuda::barrier<cuda::thread_scope_block>) * NUM_TILES_PER_KERNEL_LOADED;
int shmem_limit_per_tile = total_shmem_in_bytes / NUM_TILES_PER_KERNEL_LOADED;
std::vector<size_type> column_starts;
std::vector<size_type> column_sizes;
auto iter = thrust::make_transform_iterator(thrust::make_counting_iterator(0), [&schema](auto i) {
return std::make_tuple(schema[i], nullptr);
});
auto const fixed_width_size_per_row =
detail::compute_column_information(iter, iter + num_columns, column_starts, column_sizes);
// Ideally we would check that the offsets are all the same, etc. but for now
// this is probably fine
CUDF_EXPECTS(fixed_width_size_per_row * num_rows == child.size(),
"The layout of the data appears to be off");
auto dev_col_starts = make_device_uvector_async(column_starts, stream, mr);
auto dev_col_sizes = make_device_uvector_async(column_sizes, stream, mr);
// Allocate the columns we are going to write into
std::vector<std::unique_ptr<column>> output_columns;
std::vector<int8_t *> output_data;
std::vector<bitmask_type *> output_nm;
for (int i = 0; i < static_cast<int>(num_columns); i++) {
auto column =
make_fixed_width_column(schema[i], num_rows, mask_state::UNINITIALIZED, stream, mr);
auto mut = column->mutable_view();
output_data.emplace_back(mut.data<int8_t>());
output_nm.emplace_back(mut.null_mask());
output_columns.emplace_back(std::move(column));
}
// build the row_batches from the passed in list column
std::vector<detail::row_batch> row_batches;
row_batches.push_back(
{detail::row_batch{child.size(), num_rows, device_uvector<size_type>(0, stream)}});
auto dev_output_data = make_device_uvector_async(output_data, stream, mr);
auto dev_output_nm = make_device_uvector_async(output_nm, stream, mr);
// only ever get a single batch when going from rows, so boundaries
// are 0, num_rows
constexpr auto num_batches = 2;
device_uvector<size_type> gpu_batch_row_boundaries(num_batches, stream);
thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator(0),
thrust::make_counting_iterator(num_batches), gpu_batch_row_boundaries.begin(),
[num_rows] __device__(auto i) { return i == 0 ? 0 : num_rows; });
int info_count = 0;
detail::determine_tiles(column_sizes, column_starts, num_rows, num_rows, shmem_limit_per_tile,
[&gpu_batch_row_boundaries, &info_count,
&stream](int const start_col, int const end_col, int const tile_height) {
info_count += detail::compute_tile_counts(gpu_batch_row_boundaries,
tile_height, stream);
});
// allocate space for tiles
device_uvector<detail::tile_info> gpu_tile_infos(info_count, stream);
int tile_offset = 0;
detail::determine_tiles(
column_sizes, column_starts, num_rows, num_rows, shmem_limit_per_tile,
[&gpu_batch_row_boundaries, &gpu_tile_infos, num_rows, &tile_offset,
stream](int const start_col, int const end_col, int const tile_height) {
tile_offset += detail::build_tiles(
{gpu_tile_infos.data() + tile_offset, gpu_tile_infos.size() - tile_offset},
gpu_batch_row_boundaries, start_col, end_col, tile_height, num_rows, stream);
});
dim3 blocks(util::div_rounding_up_unsafe(gpu_tile_infos.size(), NUM_TILES_PER_KERNEL_FROM_ROWS));
dim3 threads(std::min(std::min(256, shmem_limit_per_tile / 8), static_cast<int>(child.size())));
auto validity_tile_infos =
detail::build_validity_tile_infos(num_columns, num_rows, shmem_limit_per_tile, row_batches);
auto dev_validity_tile_infos = make_device_uvector_async(validity_tile_infos, stream);
dim3 validity_blocks(
util::div_rounding_up_unsafe(validity_tile_infos.size(), NUM_VALIDITY_TILES_PER_KERNEL));
dim3 validity_threads(std::min(validity_tile_infos.size() * 32, 128lu));
detail::row_offset_functor offset_functor(fixed_width_size_per_row);
detail::copy_from_rows<<<blocks, threads, total_shmem_in_bytes, stream.value()>>>(
num_rows, num_columns, shmem_limit_per_tile, offset_functor, gpu_batch_row_boundaries.data(),
dev_output_data.data(), dev_col_sizes.data(), dev_col_starts.data(), gpu_tile_infos,
child.data<int8_t>());
detail::copy_validity_from_rows<<<validity_blocks, validity_threads, total_shmem_in_bytes,
stream.value()>>>(
num_rows, num_columns, shmem_limit_per_tile, offset_functor, gpu_batch_row_boundaries.data(),
dev_output_nm.data(), column_starts.back(), dev_validity_tile_infos, child.data<int8_t>());
return std::make_unique<table>(std::move(output_columns));
#else
CUDF_FAIL("Row to column conversion optimization requires volta or later hardware.");
return {};
#endif // #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
}
std::unique_ptr<table> convert_from_rows_fixed_width_optimized(
lists_column_view const &input, std::vector<data_type> const &schema,
rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) {
// verify that the types are what we expect
column_view child = input.child();
auto const list_type = child.type().id();
CUDF_EXPECTS(list_type == type_id::INT8 || list_type == type_id::UINT8,
"Only a list of bytes is supported as input");
auto const num_columns = schema.size();
if (detail::are_all_fixed_width(schema)) {
std::vector<size_type> column_start;
std::vector<size_type> column_size;
auto const num_rows = input.parent().size();
auto const size_per_row = detail::compute_fixed_width_layout(schema, column_start, column_size);
// Ideally we would check that the offsets are all the same, etc. but for now
// this is probably fine
CUDF_EXPECTS(size_per_row * num_rows == child.size(),
"The layout of the data appears to be off");
auto dev_column_start = make_device_uvector_async(column_start, stream);
auto dev_column_size = make_device_uvector_async(column_size, stream);
// Allocate the columns we are going to write into
std::vector<std::unique_ptr<column>> output_columns;
std::vector<int8_t *> output_data;
std::vector<bitmask_type *> output_nm;
for (int i = 0; i < static_cast<int>(num_columns); i++) {
auto column =
make_fixed_width_column(schema[i], num_rows, mask_state::UNINITIALIZED, stream, mr);
auto mut = column->mutable_view();
output_data.emplace_back(mut.data<int8_t>());
output_nm.emplace_back(mut.null_mask());
output_columns.emplace_back(std::move(column));
}
auto dev_output_data = make_device_uvector_async(output_data, stream, mr);
auto dev_output_nm = make_device_uvector_async(output_nm, stream, mr);
dim3 blocks;
dim3 threads;
int shared_size =
detail::calc_fixed_width_kernel_dims(num_columns, num_rows, size_per_row, blocks, threads);
detail::copy_from_rows_fixed_width_optimized<<<blocks, threads, shared_size, stream.value()>>>(
num_rows, num_columns, size_per_row, dev_column_start.data(), dev_column_size.data(),
dev_output_data.data(), dev_output_nm.data(), child.data<int8_t>());
return std::make_unique<table>(std::move(output_columns));
} else {
CUDF_FAIL("Only fixed width types are currently supported");
}
}
} // namespace jni
} // namespace cudf
|
5b141a8f93bb75dd468ef057821c5b72d04849f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#define THREADS 256 // 2^7
#define BLOCKS 1024 // 2^10
#define NUM_VALS THREADS*BLOCKS
#define MAX_DEPTH 16
#define INSERTION_SORT 32
#define swap(A,B) { int temp = A; A = B; B = temp;}
typedef struct vars{
int l;
int r;
int leq;
} vars;
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
}
__device__ void selection_sort( int *data, int left, int right )
{
for( int i = left ; i <= right ; ++i ){
int min_val = data[i];
int min_idx = i;
// Find the smallest value in the range [left, right].
for( int j = i+1 ; j <= right ; ++j ){
int val_j = data[j];
if( val_j < min_val ){
min_idx = j;
min_val = val_j;
}
}
// Swap the values.
if( i != min_idx ){
data[min_idx] = data[i];
data[i] = min_val;
}
}
}
__global__ void cdp_simple_quicksort(int *data, int left, int right, int depth ){
//If we're too deep or there are few elements left, we use an insertion sort...
if( depth >= MAX_DEPTH || right-left <= INSERTION_SORT ){
selection_sort( data, left, right );
return;
}
hipStream_t s,s1;
int *lptr = data+left;
int *rptr = data+right;
int pivot = data[(left+right)/2];
int lval;
int rval;
int nright, nleft;
// Do the partitioning.
while (lptr <= rptr){
// Find the next left- and right-hand values to swap
lval = *lptr;
rval = *rptr;
// Move the left pointer as long as the pointed element is smaller than the pivot.
while (lval < pivot && lptr < data+right){
lptr++;
lval = *lptr;
}
// Move the right pointer as long as the pointed element is larger than the pivot.
while (rval > pivot && rptr > data+left){
rptr--;
rval = *rptr;
}
// If the swap points are valid, swap
if (lptr <= rptr){
*lptr = rval;
*rptr = lval;
lptr++;
rptr--;
}
}
nright = rptr - data;
nleft = lptr - data;
// Launch a new block to sort the left part.
if (left < (rptr-data)){
hipStreamCreateWithFlags(&s, hipStreamNonBlocking);
hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1), 0, s , data, left, nright, depth+1);
hipStreamDestroy(s);
}
// Launch a new block to sort the right part.
if ((lptr-data) < right){
hipStreamCreateWithFlags(&s1, hipStreamNonBlocking);
hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1), 0, s1 , data, nleft, right, depth+1);
hipStreamDestroy(s1);
}
}
void gpu_qsort(int *data, int n){
int* gpuData;
int left = 0;
int right = n-1;
hipDeviceSetLimit(hipLimitDevRuntimeSyncDepth, MAX_DEPTH);
hipMalloc((void**)&gpuData,n*sizeof(int));
hipMemcpy(gpuData,data, n*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1) , 0, 0, gpuData, left, right, 0);
hipDeviceSynchronize();
hipMemcpy(data,gpuData, n*sizeof(int), hipMemcpyDeviceToHost);
hipFree(gpuData);
hipDeviceReset();
}
int main(int argc, char const *argv[])
{
clock_t start, stop;
int *values = (int*)malloc(NUM_VALS * sizeof(int));
FILE *f = fopen("reverse_dataset.txt", "r");
for(int i=0;i< NUM_VALS; i++) {
fscanf(f, "%d\n", &values[i]);
}
printf("Hello\n");
start = clock();
gpu_qsort(values, NUM_VALS);
stop = clock();
print_elapsed(start, stop);
return 0;
}
| 5b141a8f93bb75dd468ef057821c5b72d04849f9.cu | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#define THREADS 256 // 2^7
#define BLOCKS 1024 // 2^10
#define NUM_VALS THREADS*BLOCKS
#define MAX_DEPTH 16
#define INSERTION_SORT 32
#define swap(A,B) { int temp = A; A = B; B = temp;}
typedef struct vars{
int l;
int r;
int leq;
} vars;
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
}
__device__ void selection_sort( int *data, int left, int right )
{
for( int i = left ; i <= right ; ++i ){
int min_val = data[i];
int min_idx = i;
// Find the smallest value in the range [left, right].
for( int j = i+1 ; j <= right ; ++j ){
int val_j = data[j];
if( val_j < min_val ){
min_idx = j;
min_val = val_j;
}
}
// Swap the values.
if( i != min_idx ){
data[min_idx] = data[i];
data[i] = min_val;
}
}
}
__global__ void cdp_simple_quicksort(int *data, int left, int right, int depth ){
//If we're too deep or there are few elements left, we use an insertion sort...
if( depth >= MAX_DEPTH || right-left <= INSERTION_SORT ){
selection_sort( data, left, right );
return;
}
cudaStream_t s,s1;
int *lptr = data+left;
int *rptr = data+right;
int pivot = data[(left+right)/2];
int lval;
int rval;
int nright, nleft;
// Do the partitioning.
while (lptr <= rptr){
// Find the next left- and right-hand values to swap
lval = *lptr;
rval = *rptr;
// Move the left pointer as long as the pointed element is smaller than the pivot.
while (lval < pivot && lptr < data+right){
lptr++;
lval = *lptr;
}
// Move the right pointer as long as the pointed element is larger than the pivot.
while (rval > pivot && rptr > data+left){
rptr--;
rval = *rptr;
}
// If the swap points are valid, swap
if (lptr <= rptr){
*lptr = rval;
*rptr = lval;
lptr++;
rptr--;
}
}
nright = rptr - data;
nleft = lptr - data;
// Launch a new block to sort the left part.
if (left < (rptr-data)){
cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking);
cdp_simple_quicksort<<< 1, 1, 0, s >>>(data, left, nright, depth+1);
cudaStreamDestroy(s);
}
// Launch a new block to sort the right part.
if ((lptr-data) < right){
cudaStreamCreateWithFlags(&s1, cudaStreamNonBlocking);
cdp_simple_quicksort<<< 1, 1, 0, s1 >>>(data, nleft, right, depth+1);
cudaStreamDestroy(s1);
}
}
void gpu_qsort(int *data, int n){
int* gpuData;
int left = 0;
int right = n-1;
cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, MAX_DEPTH);
cudaMalloc((void**)&gpuData,n*sizeof(int));
cudaMemcpy(gpuData,data, n*sizeof(int), cudaMemcpyHostToDevice);
cdp_simple_quicksort<<< 1, 1 >>>(gpuData, left, right, 0);
cudaDeviceSynchronize();
cudaMemcpy(data,gpuData, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpuData);
cudaDeviceReset();
}
int main(int argc, char const *argv[])
{
clock_t start, stop;
int *values = (int*)malloc(NUM_VALS * sizeof(int));
FILE *f = fopen("reverse_dataset.txt", "r");
for(int i=0;i< NUM_VALS; i++) {
fscanf(f, "%d\n", &values[i]);
}
printf("Hello\n");
start = clock();
gpu_qsort(values, NUM_VALS);
stop = clock();
print_elapsed(start, stop);
return 0;
}
|
d53e2541c9db55ddddde67928a9c94a8dc97114a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<stdio.h>
#include<malloc.h>
#include<opencv2/opencv.hpp>
using namespace std;
using namespace cv;
#define RED 2
#define GREEN 1
#define BLUE 0
__device__
__host__
unsigned char clamp(int value){
if (value < 0) value = 0;
if (value > 255) value = 255;
return (unsigned char)value;
}
__host__
void print(unsigned char *M, int rows, int cols){
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
printf("%d ", M[(i * cols) + j]);
}
printf("\n");
}
}
__host__
void convolution(unsigned char *imageInput, int mask[3][3], int rows, int cols, unsigned char *imageOutput){
for(int i = 0; i < rows; i++) {
for(int j = 0; j < cols; j++) {
int sum = 0;
int aux_cols = j - 1, aux_rows = i - 1;
for(int k = 0; k < 3; k++) { //mask_filas
for(int l = 0; l < 3; l++) { //mask_columnas
if ((aux_rows >= 0 && aux_cols >= 0) && (aux_rows < rows && aux_cols < cols))
sum += mask[k][l]*imageInput[(aux_rows*cols)+ aux_cols];
aux_cols++;
}
aux_rows++;
aux_cols = j - 1;
}
imageOutput[(i * cols) + j] = clamp(sum);
}
}
}
__global__
void convolutionCU(unsigned char *imageInput, int *mask, int rows, int cols, unsigned char *imageOutput){
int i = blockIdx.y*blockDim.y+threadIdx.y;
int j = blockIdx.x*blockDim.x+threadIdx.x;
int sum = 0;
if (i < rows && j < cols) {
int aux_cols = j - 1, aux_rows = i - 1;
for (int k = 0; k < 3; k++) {//mask_filas
for (int l = 0; l < 3; l++) {//mask_columnas
if(aux_rows >= 0 && aux_cols >= 0 && aux_rows < rows && aux_cols < cols)
sum += mask[(k*3) + l] * imageInput[(aux_rows*cols) + aux_cols];
aux_cols++;
}
aux_rows++;
aux_cols = j - 1;
}
imageOutput[(i * cols) + j] = clamp(sum);
}
}
__host__
void img2gray(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
for(int row = 0; row < height; row++){
for(int col = 0; col < width; col++){
imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587 + imageInput[(row*width+col)*3+BLUE]*0.114;
}
}
}
__global__
void img2grayCU(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < height) && (col < width)){
imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587
+ imageInput[(row*width+col)*3+BLUE]*0.114;
}
}
__host__
void Union(unsigned char *imageOutput, unsigned char *Gx, unsigned char *Gy, int rows, int cols){
for(int i = 0; i < rows; i++){
for(int j = 0; j < cols; j++){
imageOutput[(i * cols) + j] = sqrt(pow(Gx[(i * cols) + j],2) + pow(Gx[(i * cols) + j],2));
}
}
}
__global__
void UnionCU(unsigned char *imageOutput, unsigned char *Gx, unsigned char *Gy, int rows, int cols){
int i = blockIdx.y*blockDim.y+threadIdx.y;
int j = blockIdx.x*blockDim.x+threadIdx.x;
if (i < rows && j < cols){
imageOutput[(i * cols) + j] = sqrtf((Gx[(i * cols) + j] * Gx[(i * cols) + j]) + (Gx[(i * cols) + j] * Gx[(i * cols) + j]) );
}
}
int main(int argc, char **argv){
hipError_t error = hipSuccess;
unsigned char *h_imageInput, *d_imageInput, *h_imageGray, *d_imageGray;
unsigned char *d_Gx, *d_Gy, *h_G, *d_G; // Operacion sobel
int *d_XMask, *d_YMask;
char* imageName = argv[1];
Mat image;
clock_t start, end;
double time_used;
if (argc != 2) {
printf("Usage: Image path\n");
return 1;
}
image = imread(imageName, 1);
if (!image.data) {
printf("No image Data\n");
return 1;
}
//---------> Grises
Size s = image.size();
int width = s.width;
int height = s.height;
int sz = sizeof(unsigned char) * width * height * image.channels();
int size = sizeof(unsigned char) * width * height;
h_imageInput = (unsigned char*)malloc(sz);
error = hipMalloc((void**)&d_imageInput,sz);
if (error != hipSuccess) {
printf("Error allocating memory for d_imageInput\n");
exit(-1);
}
h_imageInput = image.data;
start = clock();
error = hipMemcpy(d_imageInput, h_imageInput, sz, hipMemcpyHostToDevice);
if (error != hipSuccess) {
printf("Error copying data from h_imageInput to d_imageInput\n");
exit(-1);
}
end = clock();
time_used = ((double) (end - start)) /CLOCKS_PER_SEC;
h_imageGray = (unsigned char*)malloc(size);
error = hipMalloc((void**)&d_imageGray, size);
if (error != hipSuccess) {
printf("Error allocating memory for d_imageGray\n");
exit(-1);
}
start = clock();
int blockSize = 32;
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(ceil(width/float(blockSize)), ceil(height/float(blockSize)), 1);
hipLaunchKernelGGL(( img2grayCU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_imageInput, width, height, d_imageGray);
hipDeviceSynchronize();
end = clock();
time_used += ((double) (end - start)) /CLOCKS_PER_SEC;
//---------> Mascaras
error = hipMalloc((void**)&d_XMask, 3*3*sizeof(int));
if (error != hipSuccess) {
printf("Error reservando memoria para d_Mascara_X\n");
exit(-1);
}
error = hipMalloc((void**)&d_YMask, 3*3*sizeof(int));
if (error != hipSuccess) {
printf("Error reservando memoria para d_Mascara_Y\n");
exit(-1);
}
int h_XMask[3*3] = {-1, 0, 1, -2, 0, 2, -1, 0, 1};
int h_YMask[3*3] = {-1, -2, -1, 0, 0, 0, 1, 2, 1};
start = clock();
error = hipMemcpy(d_XMask, h_XMask, 3*3*sizeof(int), hipMemcpyHostToDevice);
if (error != hipSuccess) {
printf("Error copying data from h_XMask to d_XMask\n");
exit(-1);
}
error = hipMemcpy(d_YMask, h_YMask, 3*3*sizeof(int), hipMemcpyHostToDevice);
if(error != hipSuccess){
printf("Error copying data from h_YMask to d_YMask\n");
exit(-1);
}
end = clock();
time_used += ((double) (end - start)) /CLOCKS_PER_SEC;
//---------> Sobel
h_G = (unsigned char*)malloc(size);
error = hipMalloc((void**)&d_G, size);
if (error != hipSuccess) {
printf("Error allocating memory for d_G\n");
exit(-1);
}
error = hipMalloc((void**)&d_Gx, size);
if (error != hipSuccess) {
printf("Error allocating memory for d_Gx\n");
exit(-1);
}
error = hipMalloc((void**)&d_Gy, size);
if (error != hipSuccess) {
printf("Error allocating memory for d_Gy\n");
exit(-1);
}
start = clock();
// Convolucion en Gx
hipLaunchKernelGGL(( convolutionCU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_imageGray, d_XMask, height, width, d_Gx);
hipDeviceSynchronize();
// Convolucion en Gy
hipLaunchKernelGGL(( convolutionCU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_imageGray, d_YMask, height, width, d_Gy);
hipDeviceSynchronize();
// Union of Gx and Gy results
hipLaunchKernelGGL(( UnionCU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_G, d_Gx, d_Gy, height, width);
hipDeviceSynchronize();
error = hipMemcpy(h_G, d_G, size, hipMemcpyDeviceToHost);
if (error != hipSuccess) {
printf("Error copying data from d_G to h_G\n");
exit(-1);
}
end = clock();
time_used += ((double) (end - start)) /CLOCKS_PER_SEC;
printf ("%lf \n",time_used);
free(h_imageInput);
hipFree(d_imageInput);
free(h_imageGray);
hipFree(d_imageGray);
hipFree(d_XMask);
hipFree(d_YMask);
free(h_G);
hipFree(d_Gx);
hipFree(d_Gy);
hipFree(d_G);
return 0;
}
| d53e2541c9db55ddddde67928a9c94a8dc97114a.cu | #include<iostream>
#include<stdio.h>
#include<malloc.h>
#include<opencv2/opencv.hpp>
using namespace std;
using namespace cv;
#define RED 2
#define GREEN 1
#define BLUE 0
__device__
__host__
unsigned char clamp(int value){
if (value < 0) value = 0;
if (value > 255) value = 255;
return (unsigned char)value;
}
__host__
void print(unsigned char *M, int rows, int cols){
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
printf("%d ", M[(i * cols) + j]);
}
printf("\n");
}
}
__host__
void convolution(unsigned char *imageInput, int mask[3][3], int rows, int cols, unsigned char *imageOutput){
for(int i = 0; i < rows; i++) {
for(int j = 0; j < cols; j++) {
int sum = 0;
int aux_cols = j - 1, aux_rows = i - 1;
for(int k = 0; k < 3; k++) { //mask_filas
for(int l = 0; l < 3; l++) { //mask_columnas
if ((aux_rows >= 0 && aux_cols >= 0) && (aux_rows < rows && aux_cols < cols))
sum += mask[k][l]*imageInput[(aux_rows*cols)+ aux_cols];
aux_cols++;
}
aux_rows++;
aux_cols = j - 1;
}
imageOutput[(i * cols) + j] = clamp(sum);
}
}
}
__global__
void convolutionCU(unsigned char *imageInput, int *mask, int rows, int cols, unsigned char *imageOutput){
int i = blockIdx.y*blockDim.y+threadIdx.y;
int j = blockIdx.x*blockDim.x+threadIdx.x;
int sum = 0;
if (i < rows && j < cols) {
int aux_cols = j - 1, aux_rows = i - 1;
for (int k = 0; k < 3; k++) {//mask_filas
for (int l = 0; l < 3; l++) {//mask_columnas
if(aux_rows >= 0 && aux_cols >= 0 && aux_rows < rows && aux_cols < cols)
sum += mask[(k*3) + l] * imageInput[(aux_rows*cols) + aux_cols];
aux_cols++;
}
aux_rows++;
aux_cols = j - 1;
}
imageOutput[(i * cols) + j] = clamp(sum);
}
}
__host__
void img2gray(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
for(int row = 0; row < height; row++){
for(int col = 0; col < width; col++){
imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587 + imageInput[(row*width+col)*3+BLUE]*0.114;
}
}
}
__global__
void img2grayCU(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < height) && (col < width)){
imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587
+ imageInput[(row*width+col)*3+BLUE]*0.114;
}
}
__host__
void Union(unsigned char *imageOutput, unsigned char *Gx, unsigned char *Gy, int rows, int cols){
for(int i = 0; i < rows; i++){
for(int j = 0; j < cols; j++){
imageOutput[(i * cols) + j] = sqrt(pow(Gx[(i * cols) + j],2) + pow(Gx[(i * cols) + j],2));
}
}
}
__global__
void UnionCU(unsigned char *imageOutput, unsigned char *Gx, unsigned char *Gy, int rows, int cols){
int i = blockIdx.y*blockDim.y+threadIdx.y;
int j = blockIdx.x*blockDim.x+threadIdx.x;
if (i < rows && j < cols){
imageOutput[(i * cols) + j] = sqrtf((Gx[(i * cols) + j] * Gx[(i * cols) + j]) + (Gx[(i * cols) + j] * Gx[(i * cols) + j]) );
}
}
int main(int argc, char **argv){
cudaError_t error = cudaSuccess;
unsigned char *h_imageInput, *d_imageInput, *h_imageGray, *d_imageGray;
unsigned char *d_Gx, *d_Gy, *h_G, *d_G; // Operacion sobel
int *d_XMask, *d_YMask;
char* imageName = argv[1];
Mat image;
clock_t start, end;
double time_used;
if (argc != 2) {
printf("Usage: Image path\n");
return 1;
}
image = imread(imageName, 1);
if (!image.data) {
printf("No image Data\n");
return 1;
}
//---------> Grises
Size s = image.size();
int width = s.width;
int height = s.height;
int sz = sizeof(unsigned char) * width * height * image.channels();
int size = sizeof(unsigned char) * width * height;
h_imageInput = (unsigned char*)malloc(sz);
error = cudaMalloc((void**)&d_imageInput,sz);
if (error != cudaSuccess) {
printf("Error allocating memory for d_imageInput\n");
exit(-1);
}
h_imageInput = image.data;
start = clock();
error = cudaMemcpy(d_imageInput, h_imageInput, sz, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
printf("Error copying data from h_imageInput to d_imageInput\n");
exit(-1);
}
end = clock();
time_used = ((double) (end - start)) /CLOCKS_PER_SEC;
h_imageGray = (unsigned char*)malloc(size);
error = cudaMalloc((void**)&d_imageGray, size);
if (error != cudaSuccess) {
printf("Error allocating memory for d_imageGray\n");
exit(-1);
}
start = clock();
int blockSize = 32;
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(ceil(width/float(blockSize)), ceil(height/float(blockSize)), 1);
img2grayCU<<<dimGrid,dimBlock>>>(d_imageInput, width, height, d_imageGray);
cudaDeviceSynchronize();
end = clock();
time_used += ((double) (end - start)) /CLOCKS_PER_SEC;
//---------> Mascaras
error = cudaMalloc((void**)&d_XMask, 3*3*sizeof(int));
if (error != cudaSuccess) {
printf("Error reservando memoria para d_Mascara_X\n");
exit(-1);
}
error = cudaMalloc((void**)&d_YMask, 3*3*sizeof(int));
if (error != cudaSuccess) {
printf("Error reservando memoria para d_Mascara_Y\n");
exit(-1);
}
int h_XMask[3*3] = {-1, 0, 1, -2, 0, 2, -1, 0, 1};
int h_YMask[3*3] = {-1, -2, -1, 0, 0, 0, 1, 2, 1};
start = clock();
error = cudaMemcpy(d_XMask, h_XMask, 3*3*sizeof(int), cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
printf("Error copying data from h_XMask to d_XMask\n");
exit(-1);
}
error = cudaMemcpy(d_YMask, h_YMask, 3*3*sizeof(int), cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copying data from h_YMask to d_YMask\n");
exit(-1);
}
end = clock();
time_used += ((double) (end - start)) /CLOCKS_PER_SEC;
//---------> Sobel
h_G = (unsigned char*)malloc(size);
error = cudaMalloc((void**)&d_G, size);
if (error != cudaSuccess) {
printf("Error allocating memory for d_G\n");
exit(-1);
}
error = cudaMalloc((void**)&d_Gx, size);
if (error != cudaSuccess) {
printf("Error allocating memory for d_Gx\n");
exit(-1);
}
error = cudaMalloc((void**)&d_Gy, size);
if (error != cudaSuccess) {
printf("Error allocating memory for d_Gy\n");
exit(-1);
}
start = clock();
// Convolucion en Gx
convolutionCU<<<dimGrid,dimBlock>>>(d_imageGray, d_XMask, height, width, d_Gx);
cudaDeviceSynchronize();
// Convolucion en Gy
convolutionCU<<<dimGrid,dimBlock>>>(d_imageGray, d_YMask, height, width, d_Gy);
cudaDeviceSynchronize();
// Union of Gx and Gy results
UnionCU<<<dimGrid,dimBlock>>>(d_G, d_Gx, d_Gy, height, width);
cudaDeviceSynchronize();
error = cudaMemcpy(h_G, d_G, size, cudaMemcpyDeviceToHost);
if (error != cudaSuccess) {
printf("Error copying data from d_G to h_G\n");
exit(-1);
}
end = clock();
time_used += ((double) (end - start)) /CLOCKS_PER_SEC;
printf ("%lf \n",time_used);
free(h_imageInput);
cudaFree(d_imageInput);
free(h_imageGray);
cudaFree(d_imageGray);
cudaFree(d_XMask);
cudaFree(d_YMask);
free(h_G);
cudaFree(d_Gx);
cudaFree(d_Gy);
cudaFree(d_G);
return 0;
}
|
7aef5b62c8221d51fab774bb7edc3ded60edb264.hip | // !!! This is a file automatically generated by hipify!!!
/*=========================================================================
Program: Insight Segmentation & Registration Toolkit
Module: CudaFilterKernel.cu
Language: CUDA
Copyright (c) Insight Software Consortium. All rights reserved.
See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
=========================================================================*/
/** \class CudaFilterKernel.cu
* \brief Cuda kernel code
* \author Phillip Ward, Luke Parkinson, Daniel Micevski, Christopher
* Share, Victorian Partnership for Advanced Computing (VPAC).
* Richard Beare, Monash University
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
template <class T>
__global__ void cuKernel(T *output, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
{
// Do Something Here
}
}
float * cuFunction(const float* input, unsigned int N)
{
float *output;
// Method #1 - Re-use Device Memory for Output
//
// Cast input to non const.
// Note: ContainerManageDevice must be set to false in input container.
// eg:
/*
output = const_cast<float*>(input);
*/
// Method #2 - Allocate New Memory for Output
//
// CudaMalloc new output memory
// Note: ContainerManageDevice must be set to true in input container.
//
// eg:
/*
hipMalloc((void **) &output, sizeof(float)*N);
*/
// Compute execution configuration
int blockSize = 64;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
// Call kernel
hipLaunchKernelGGL(( cuKernel) , dim3(nBlocks), dim3(blockSize) , 0, 0, output, N);
// Return pointer to the output
return output;
}
| 7aef5b62c8221d51fab774bb7edc3ded60edb264.cu | /*=========================================================================
Program: Insight Segmentation & Registration Toolkit
Module: CudaFilterKernel.cu
Language: CUDA
Copyright (c) Insight Software Consortium. All rights reserved.
See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
=========================================================================*/
/** \class CudaFilterKernel.cu
* \brief Cuda kernel code
* \author Phillip Ward, Luke Parkinson, Daniel Micevski, Christopher
* Share, Victorian Partnership for Advanced Computing (VPAC).
* Richard Beare, Monash University
*/
#include <stdio.h>
#include <cuda.h>
#include <cutil.h>
template <class T>
__global__ void cuKernel(T *output, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
{
// Do Something Here
}
}
float * cuFunction(const float* input, unsigned int N)
{
float *output;
// Method #1 - Re-use Device Memory for Output
//
// Cast input to non const.
// Note: ContainerManageDevice must be set to false in input container.
// eg:
/*
output = const_cast<float*>(input);
*/
// Method #2 - Allocate New Memory for Output
//
// CudaMalloc new output memory
// Note: ContainerManageDevice must be set to true in input container.
//
// eg:
/*
cudaMalloc((void **) &output, sizeof(float)*N);
*/
// Compute execution configuration
int blockSize = 64;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
// Call kernel
cuKernel <<< nBlocks, blockSize >>> (output, N);
// Return pointer to the output
return output;
}
|
bc2d0214105ba7483a784f9f0a32921fdc2f11f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "j2d9pt-512-9-512_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 18
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_9), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
( 7.1f * A[t%2][i-2][j] + 5.1f * A[t%2][i-1][j] + 9.2f * A[t%2][i][j-2] +
12.1f * A[t%2][i][j-1] + 15.f * A[t%2][i][j] + 12.2f * A[t%2][i][j+1] +
9.1f * A[t%2][i][j+2] + 5.2f * A[t%2][i+1][j] + 7.2f * A[t%2][i+2][j]) / 118;
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| bc2d0214105ba7483a784f9f0a32921fdc2f11f5.cu | #include <assert.h>
#include <stdio.h>
#include "j2d9pt-512-9-512_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 18
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_9<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
( 7.1f * A[t%2][i-2][j] + 5.1f * A[t%2][i-1][j] + 9.2f * A[t%2][i][j-2] +
12.1f * A[t%2][i][j-1] + 15.f * A[t%2][i][j] + 12.2f * A[t%2][i][j+1] +
9.1f * A[t%2][i][j+2] + 5.2f * A[t%2][i+1][j] + 7.2f * A[t%2][i+2][j]) / 118;
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
87b5c6a2ec0d0443efd1288165c753b57e7ad933.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdio.h>
#include <vector>
#include <random>
#include <algorithm>
#include <iostream>
#include <iterator>
#include <chrono>
#include <execution>
#include <hip/hip_runtime_api.h>
#include "ManagedAllocator.hpp"
#include "Operations.h"
#include "ReduceMaxIdx.h"
#include "ReduceMaxIdxOptimized.h"
#include "ReduceMaxIdxOptimizedShared.h"
#include "ReduceMaxIdxOptimizedBlocks.h"
#include "ReduceMaxIdxOptimizedWarp.h"
#include "ReduceMaxIdxOptimizedWarpShared.h"
#include "Process.h"
template<class T>
using ManagedVector = std::vector<T, ManagedAllocator<T>>;
template <typename T>
std::ostream& operator<< (std::ostream& out, const ManagedVector<T>& v) {
if ( !v.empty() ) {
out << '[';
std::copy (v.begin(), v.end(), std::ostream_iterator<T>(out, ", "));
out << "\b\b]";
}
return out;
}
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
//if (abort) exit(code);
}
}
void checkGpuMem()
{
size_t freeM, totalM, usedM;
size_t freeT, totalT;
hipMemGetInfo(&freeT,&totalT);
freeM =(unsigned int)freeT/1048576;
totalM=(unsigned int)totalT/1048576;
usedM=totalM-freeM;
printf ( "mem free %ld MB\nmem total %ld MB\nmem used %ld MB\n",freeM,totalM,usedM);
}
int main()
{
checkGpuMem();
const int N = 256000;
ManagedVector<float> input(N);
ManagedVector<float> output(1024 / 32);
ManagedVector<int> outputIdx(1024 / 32);
default_random_engine e;
uniform_real_distribution<> dis(0, 1); // rage 0 - 1
generate(begin(input), end(input), [&](){ return dis(e); });
hipMemPrefetchAsync(input.data(), input.size() * sizeof(float), 0, 0);
hipMemPrefetchAsync(output.data(), output.size() * sizeof(float), 0, 0);
hipMemPrefetchAsync(outputIdx.data(), outputIdx.size() * sizeof(int), 0, 0);
gpuErrchk(hipDeviceSynchronize());
int iterations = 1000;
cout << endl;
cout << "iterations count=" << iterations << endl;
cout << "array size=" << N << endl;
cout << endl;
process(iterations, "gpu", [&]()
{
hipLaunchKernelGGL(( reduceMaxIdx), dim3(1), dim3(1), 0, 0, input.data(), N, output.data(), outputIdx.data());
gpuErrchk(hipGetLastError());
gpuErrchk(hipDeviceSynchronize());
});
output[0] = 0;
process(iterations, "otimized gpu", [&]()
{
reduceMaxIdxOptimized << <1, 1024 >> > (input.data(), N, output.data(), outputIdx.data());
gpuErrchk(hipGetLastError());
gpuErrchk(hipDeviceSynchronize());
});
output[0] = 0;
process(iterations, "otimized shared gpu", [&]()
{
reduceMaxIdxOptimizedShared << <1, 1024 >> >(input.data(), N, output.data(), outputIdx.data());
gpuErrchk(hipGetLastError());
gpuErrchk(hipDeviceSynchronize());
});
output[0] = 0;
process(iterations, "otimized block gpu", [&]()
{
reduceMaxIdxOptimizedBlocks << <4, 1024 >> >(input.data(), N, output.data(), outputIdx.data());
gpuErrchk(hipGetLastError());
gpuErrchk(hipDeviceSynchronize());
});
output[0] = 0;
process(iterations, "warp otimized gpu", [&]()
{
reduceMaxIdxOptimizedWarp << <1, 1024 >> >(input.data(), N, output.data(), outputIdx.data());
gpuErrchk(hipGetLastError());
gpuErrchk(hipDeviceSynchronize());
});
output[0] = 0;
process(iterations, "warp shared otimized gpu", [&]()
{
reduceMaxIdxOptimizedWarpShared << <1, 1024 >> > (input.data(), N, output.data(), outputIdx.data());
gpuErrchk(hipGetLastError());
gpuErrchk(hipDeviceSynchronize());
});
output[0] = 0;
cin.get();
return 0;
} | 87b5c6a2ec0d0443efd1288165c753b57e7ad933.cu | #include <math.h>
#include <stdio.h>
#include <vector>
#include <random>
#include <algorithm>
#include <iostream>
#include <iterator>
#include <chrono>
#include <execution>
#include <cuda_profiler_api.h>
#include "ManagedAllocator.hpp"
#include "Operations.h"
#include "ReduceMaxIdx.h"
#include "ReduceMaxIdxOptimized.h"
#include "ReduceMaxIdxOptimizedShared.h"
#include "ReduceMaxIdxOptimizedBlocks.h"
#include "ReduceMaxIdxOptimizedWarp.h"
#include "ReduceMaxIdxOptimizedWarpShared.h"
#include "Process.h"
template<class T>
using ManagedVector = std::vector<T, ManagedAllocator<T>>;
template <typename T>
std::ostream& operator<< (std::ostream& out, const ManagedVector<T>& v) {
if ( !v.empty() ) {
out << '[';
std::copy (v.begin(), v.end(), std::ostream_iterator<T>(out, ", "));
out << "\b\b]";
}
return out;
}
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
//if (abort) exit(code);
}
}
void checkGpuMem()
{
size_t freeM, totalM, usedM;
size_t freeT, totalT;
cudaMemGetInfo(&freeT,&totalT);
freeM =(unsigned int)freeT/1048576;
totalM=(unsigned int)totalT/1048576;
usedM=totalM-freeM;
printf ( "mem free %ld MB\nmem total %ld MB\nmem used %ld MB\n",freeM,totalM,usedM);
}
int main()
{
checkGpuMem();
const int N = 256000;
ManagedVector<float> input(N);
ManagedVector<float> output(1024 / 32);
ManagedVector<int> outputIdx(1024 / 32);
default_random_engine e;
uniform_real_distribution<> dis(0, 1); // rage 0 - 1
generate(begin(input), end(input), [&](){ return dis(e); });
cudaMemPrefetchAsync(input.data(), input.size() * sizeof(float), 0, 0);
cudaMemPrefetchAsync(output.data(), output.size() * sizeof(float), 0, 0);
cudaMemPrefetchAsync(outputIdx.data(), outputIdx.size() * sizeof(int), 0, 0);
gpuErrchk(cudaDeviceSynchronize());
int iterations = 1000;
cout << endl;
cout << "iterations count=" << iterations << endl;
cout << "array size=" << N << endl;
cout << endl;
process(iterations, "gpu", [&]()
{
reduceMaxIdx<<<1, 1>>>(input.data(), N, output.data(), outputIdx.data());
gpuErrchk(cudaGetLastError());
gpuErrchk(cudaDeviceSynchronize());
});
output[0] = 0;
process(iterations, "otimized gpu", [&]()
{
reduceMaxIdxOptimized << <1, 1024 >> > (input.data(), N, output.data(), outputIdx.data());
gpuErrchk(cudaGetLastError());
gpuErrchk(cudaDeviceSynchronize());
});
output[0] = 0;
process(iterations, "otimized shared gpu", [&]()
{
reduceMaxIdxOptimizedShared << <1, 1024 >> >(input.data(), N, output.data(), outputIdx.data());
gpuErrchk(cudaGetLastError());
gpuErrchk(cudaDeviceSynchronize());
});
output[0] = 0;
process(iterations, "otimized block gpu", [&]()
{
reduceMaxIdxOptimizedBlocks << <4, 1024 >> >(input.data(), N, output.data(), outputIdx.data());
gpuErrchk(cudaGetLastError());
gpuErrchk(cudaDeviceSynchronize());
});
output[0] = 0;
process(iterations, "warp otimized gpu", [&]()
{
reduceMaxIdxOptimizedWarp << <1, 1024 >> >(input.data(), N, output.data(), outputIdx.data());
gpuErrchk(cudaGetLastError());
gpuErrchk(cudaDeviceSynchronize());
});
output[0] = 0;
process(iterations, "warp shared otimized gpu", [&]()
{
reduceMaxIdxOptimizedWarpShared << <1, 1024 >> > (input.data(), N, output.data(), outputIdx.data());
gpuErrchk(cudaGetLastError());
gpuErrchk(cudaDeviceSynchronize());
});
output[0] = 0;
cin.get();
return 0;
} |
3212dbae7553971f4b640d02505d74549a233e19.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2019 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::dense(const Tensor& input,
int outDim,
ActiMode activation,
bool use_bias,
const Op* shared_op,
Initializer* kernel_initializer,
Initializer* bias_initializer)
{
if (kernel_initializer == NULL) {
int seed = std::rand();
kernel_initializer = new GlorotUniform(seed);
}
if (bias_initializer == NULL) {
bias_initializer = new ZeroInitializer();
}
Linear *li = new Linear(*this, input, outDim, activation, use_bias,
shared_op, kernel_initializer, bias_initializer);
layers.push_back(li);
return li->outputs[0];
}
Linear* FFModel::dense(int inDim, int outDim,
ActiMode activation,
bool use_bias,
Initializer* kernel_initializer,
Initializer* bias_initializer)
{
if (kernel_initializer == NULL) {
int seed = std::rand();
kernel_initializer = new GlorotUniform(seed);
}
if (bias_initializer == NULL) {
bias_initializer = new ZeroInitializer();
}
Linear *li = new Linear(*this, inDim, outDim, activation, use_bias,
kernel_initializer, bias_initializer);
layers.push_back(li);
return li;
}
Linear::Linear(FFModel& model,
const Tensor& _input,
int out_dim,
ActiMode _activation,
bool _use_bias,
const Op* shared_op,
Initializer* _kernel_initializer,
Initializer* _bias_initializer)
: Op(model, OP_LINEAR, shared_op, "Dense_"+std::to_string(out_dim), _input),
in_channels(_input.adim[0]), out_channels(out_dim),
activation(_activation), use_bias(_use_bias),
kernel_initializer(_kernel_initializer),
bias_initializer(_bias_initializer),
profiling(model.config.profiling)
{
numInputs = 1;
numOutputs = 1;
outputs[0].numDim = _input.numDim;
for (int i = 1; i < outputs[0].numDim; i++)
outputs[0].adim[i] = _input.adim[i];
outputs[0].adim[0] = out_dim;
weights[0].numDim = 2;
weights[0].adim[0] = in_channels;
weights[0].adim[1] = out_channels;
numWeights = 1;
if (use_bias) {
weights[1].numDim = 1;
weights[1].adim[0] = out_channels;
numWeights = 2;
}
}
Linear::Linear(FFModel& model,
int in_dim, int out_dim,
ActiMode _activation,
bool _use_bias,
Initializer* _kernel_initializer,
Initializer* _bias_initializer)
: Op(model, OP_LINEAR, "Dense_"+std::to_string(out_dim), 1),
in_channels(in_dim), out_channels(out_dim),
activation(_activation), use_bias(_use_bias),
kernel_initializer(_kernel_initializer),
bias_initializer(_bias_initializer),
profiling(model.config.profiling)
{
}
Tensor Linear::init_inout(FFModel& model, const Tensor& _input)
{
assert(_input.adim[0] == in_channels);
inputs[0] = _input;
create_output_and_partition(model);
return outputs[0];
}
/*
void Linear::add_to_model(FFModel& model)
{
model.layers.push_back(this);
model.parameters.push_back(weights[0]);
if (numWeights > 1) { // bias is used
assert(numWeights == 2);
model.parameters.push_back(weights[1]);
}
}
*/
void Linear::create_weights(FFModel& model)
{
int dim = inputs[0].numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
create_weights_with_dim<DIM>(model); \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
// Unsupported dim
assert(false);
}
}
}
template<int NDIM>
void Linear::create_weights_with_dim(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, pcname));
// Create kernel tensor
{
const int dims[2] = {out_channels, in_channels};
weights[0] = model.create_linear_weight<2>(this, dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT, kernel_initializer);
}
// Create bias tensor
if (use_bias) {
const int dims[1] = {out_channels};
weights[1] = model.create_linear_weight<1>(this, dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT, bias_initializer);
assert(numWeights == 2);
} else {
assert(numWeights == 1);
}
}
void Linear::create_output_and_partition(FFModel& model)
{
int dim = inputs[0].numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
create_output_and_partition_with_dim<DIM>(model); \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
// Unsupported dim for ElementWiseBinary operator
assert(false);
}
}
}
template<int NDIM>
void Linear::create_output_and_partition_with_dim(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, pcname));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is);
int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1;
int num_par_n = part_rect.hi[NDIM-1] - part_rect.lo[NDIM-1] + 1;
int in_dim = inputs[0].adim[0];
assert(in_dim == in_channels);
int batch_size = inputs[0].adim[NDIM-1];
{
int dims[NDIM];
for (int i = 0; i < NDIM; i++)
dims[i] = outputs[0].adim[NDIM-1-i];
outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
}
// Compute partition bound for input
Rect<NDIM> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
// Create replica tensor
if (num_par_c > 1) {
const int dims[3] = {num_par_c, batch_size, in_dim};
replica = model.create_linear_replica<3>(dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT);
{
Rect<NDIM> extent;
for (int i = 0; i < NDIM; i++) {
extent.lo[i] = 0;
assert(outputs[0].adim[i] % (part_rect.hi[i] - part_rect.lo[i] + 1) == 0);
extent.hi[i] = outputs[0].adim[i] / (part_rect.hi[i] - part_rect.lo[i] + 1) - 1;
}
Transform<NDIM, NDIM> transform;
for (int i = 0; i < NDIM; i++)
for (int j = 0; j < NDIM; j++)
transform[i][j] = 0;
for (int i = 1; i < NDIM; i++)
transform[i][i] = extent.hi[i] + 1;
IndexPartition ip = runtime->create_partition_by_restriction(
ctx, inputs[0].region.get_index_space(), task_is, transform, extent);
input_lps[0] = runtime->get_logical_partition(
ctx, inputs[0].region, ip);
}
// Backward use the same ip as inputs[0]
input_grad_lps[0] = inputs[0].part_grad;
{
IndexSpaceT<NDIM> input_task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(input_rect));
Rect<NDIM+1> extent;
for (int i = 0; i < NDIM; i++) {
extent.lo[i] = 0;
assert(inputs[0].adim[i] % (input_rect.hi[i] - input_rect.lo[i] + 1) == 0);
extent.hi[i] = inputs[0].adim[i] / (input_rect.hi[i] - input_rect.lo[i] + 1) - 1;
}
extent.lo[NDIM] = 0;
extent.hi[NDIM] = num_par_c - 1;
Transform<NDIM+1, NDIM> transform;
for (int i = 0; i < NDIM+1; i++)
for (int j = 0; j < NDIM; j++)
transform[i][j] = 0;
for (int i = 0; i < NDIM; i++)
transform[i][i] = inputs[0].adim[i] / (input_rect.hi[i] - input_rect.lo[i] + 1);
IndexPartition ip = runtime->create_partition_by_restriction(
ctx, replica.region_grad.get_index_space(), input_task_is,
transform, extent);
assert(runtime->is_index_partition_disjoint(ctx, ip));
assert(runtime->is_index_partition_complete(ctx, ip));
// Note we use replica.part to save how to partition the replica
// to compute input_grad_lps
replica.part = runtime->get_logical_partition(
ctx, replica.region_grad, ip);
}
} else {
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
Rect<NDIM> extent;
for (int i = 0; i < NDIM; i++) {
extent.lo[i] = 0;
assert(inputs[0].adim[i] % (part_rect.hi[i] - part_rect.lo[i] + 1) == 0);
extent.hi[i] = inputs[0].adim[i] / (part_rect.hi[i] - part_rect.lo[i] + 1) - 1;
}
Transform<NDIM, NDIM> transform;
for (int i = 0; i < NDIM; i++)
for (int j = 0; j < NDIM; j++) {
transform[i][j] = 0;
if (i==j)
transform[i][j] = extent.hi[i] + 1;
}
IndexPartition ip = runtime->create_partition_by_restriction(
ctx, inputs[0].region.get_index_space(), task_is, transform, extent);
assert(runtime->is_index_partition_disjoint(ctx, ip));
assert(runtime->is_index_partition_complete(ctx, ip));
input_lps[0] = runtime->get_logical_partition(
ctx, inputs[0].region, ip);
input_grad_lps[0] = runtime->get_logical_partition(
ctx, inputs[0].region_grad, ip);
}
}
}
/*
regions[0](O): output
regions[1](I): kernel
regions[2](I): bias
*/
OpMeta* Linear::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
Domain out_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
switch (out_domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
return init_task_with_dim<DIM>(task, regions, ctx, runtime);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
return NULL;
}
template<int NDIM>
OpMeta* Linear::init_task_with_dim(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 3);
assert(task->regions.size() == 3);
const Linear* linear = (Linear*) task->args;
FFHandler handle = *((const FFHandler*) task->local_args);
//TensorAccessorR<float, 2> acc_input(
// regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, NDIM> acc_output(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
false/*readOutput*/);
TensorAccessorR<float, 2> acc_kernel(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_bias(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
//int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1;
int in_dim = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1;
int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1;
int batch_size = acc_output.rect.volume() / out_dim;
printf("init linear (input): in_dim(%d) out_dim(%d) batch_size(%d)\n",
in_dim, out_dim, batch_size);
LinearMeta* m = new LinearMeta(handle, batch_size);
if (linear->activation != AC_MODE_NONE) {
cudnnActivationMode_t mode;
switch (linear->activation) {
case AC_MODE_RELU:
mode = CUDNN_ACTIVATION_RELU;
break;
case AC_MODE_SIGMOID:
mode = CUDNN_ACTIVATION_SIGMOID;
break;
default:
// Unsupported activation mode
assert(false);
}
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, mode,
CUDNN_PROPAGATE_NAN, 0.0));
checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
batch_size, out_dim, 1, 1));
}
return m;
}
void Linear::init(const FFModel& ff)
{
int dim = outputs[0].numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
return init_with_dim<DIM>(ff);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
template<int NDIM>
void Linear::init_with_dim(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is);
ParallelConfig pc;
std::string pcname = name;
ff.config.find_parallel_config(NDIM, pcname, pc);
int idx = 0;
for (PointInRectIterator<NDIM> it(rect); it(); it++) {
FFHandler handle = ff.handlers[pc.device_ids[idx++]];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher launcher(LINEAR_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
//launcher.add_region_requirement(
// RegionRequirement(input_lps[0], 0/*projection id*/,
// READ_ONLY, EXCLUSIVE, inputs[0].region));
//launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[1].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
launcher.add_field(2, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<NDIM> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
void Linear::forward_kernel(const LinearMeta* m,
const float* input_ptr,
float* output_ptr,
const float* kernel_ptr,
const float* bias_ptr,
int in_dim, int out_dim, int batch_size) const
{
float alpha = 1.0f, beta = 0.0f;
checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_T, HIPBLAS_OP_N,
out_dim, batch_size, in_dim,
&alpha, kernel_ptr, in_dim,
input_ptr, in_dim, &beta,
output_ptr, out_dim));
checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_T, HIPBLAS_OP_N,
out_dim, batch_size, 1,
&alpha, bias_ptr, 1,
m->one_ptr, 1, &alpha,
output_ptr, out_dim));
if (activation != AC_MODE_NONE) {
checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc,
&alpha, m->outputTensor, output_ptr,
&beta, m->outputTensor, output_ptr));
}
}
__host__
void Linear::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
Domain in_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
switch (in_domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
return forward_task_with_dim<DIM>(task, regions, ctx, runtime);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
/*
regions[0](I); input
regions[1](O): output
regions[2](I): kernel
regions[3](I): bias
*/
template<int NDIM>
void Linear::forward_task_with_dim(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
Linear* linear = (Linear*) task->args;
const LinearMeta* m = *((LinearMeta**) task->local_args);
TensorAccessorR<float, NDIM> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, NDIM> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
false/*readOutput*/);
TensorAccessorR<float, 2> acc_kernel(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_bias(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1;
int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1;
int batch_size = acc_output.rect.volume() / out_dim;
assert(acc_output.rect.volume() == out_dim * batch_size);
assert(acc_input.rect.volume() == in_dim * batch_size);
assert(acc_kernel.rect.volume() == in_dim * out_dim);
assert(acc_bias.rect.volume() == out_dim);
hipEvent_t t_start, t_end;
if (linear->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDA(hipblasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
linear->forward_kernel(m, acc_input.ptr, acc_output.ptr,
acc_kernel.ptr, acc_bias.ptr, in_dim, out_dim, batch_size);
if (linear->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("Linear forward time = %.2lfms\n", elapsed);
//print_tensor<2, float>(acc_input.ptr, acc_input.rect, "[Linear:forward:input]");
//print_tensor<2, float>(acc_kernel.ptr, acc_kernel.rect, "[Linear:forward:kernel]");
//print_tensor<1, float>(acc_bias.ptr, acc_bias.rect, "[Linear:forward:bias]");
//print_tensor<2, float>(acc_output.ptr, acc_output.rect, "[Linear:forward:output]");
}
}
void Linear::forward(const FFModel& ff)
{
int dim = outputs[0].numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
return forward_with_dim<DIM>(ff);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
template<int NDIM>
void Linear::forward_with_dim(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<NDIM> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(LINEAR_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[1].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
launcher.add_field(3, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
__global__
void sigmoid_backward(float *grad_ptr, const float *output, int n)
{
CUDA_KERNEL_LOOP(i, n)
{
grad_ptr[i] = grad_ptr[i] * output[i] * (1 - output[i]);
}
}
void Linear::backward_kernel(const LinearMeta* m,
const float* input_ptr,
float* input_grad_ptr,
const float* output_ptr,
float* output_grad_ptr,
const float* kernel_ptr,
float* kernel_grad_ptr,
float* bias_grad_ptr,
int in_dim, int out_dim, int batch_size) const
{
float alpha = 1.0f;
int output_size = out_dim * batch_size;
if (activation == AC_MODE_RELU) {
hipLaunchKernelGGL(( reluBackward), dim3(GET_BLOCKS(output_size)), dim3(CUDA_NUM_THREADS), 0, 0,
output_grad_ptr, output_ptr, output_size);
} else if (activation == AC_MODE_SIGMOID) {
hipLaunchKernelGGL(( sigmoid_backward), dim3(GET_BLOCKS(output_size)), dim3(CUDA_NUM_THREADS), 0, 0,
output_grad_ptr, output_ptr, output_size);
} else {
// TODO: only support relu and sigmoid for now
assert(activation == AC_MODE_NONE);
}
// Compute weight gradiant
// NOTE: we use alpha=1 for kernel_grad to accumulate gradients
checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_N, HIPBLAS_OP_T,
in_dim, out_dim, batch_size,
&alpha, input_ptr, in_dim,
output_grad_ptr, out_dim,
&alpha, kernel_grad_ptr, in_dim));
// Compute bias gradiant
// NOTE: we use alpha=1 for bias_grad to accumulate gradients
checkCUDA(hipblasSgemv(m->handle.blas, HIPBLAS_OP_N,
out_dim, batch_size,
&alpha, output_grad_ptr, out_dim,
m->one_ptr, 1,
&alpha, bias_grad_ptr, 1));
// Compute data gradiant
// NOTE: we use alpha=1 for input_grad to accumulate gradients
checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_N, HIPBLAS_OP_N,
in_dim, batch_size, out_dim,
&alpha, kernel_ptr, in_dim,
output_grad_ptr, out_dim,
&alpha, input_grad_ptr, in_dim));
}
void Linear::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
Domain in_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
switch (in_domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
return backward_task_with_dim<DIM>(task, regions, ctx, runtime);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
/*
regions[0](I): input
regions[1](I/O): replica_grad or input_grad
regions[2](I): output
regions[3](I/O): output_grad
regions[4](I): filter
regions[5](I/O): filter_grad
regions[6](I/O): bias_grad
*/
template<int NDIM>
__host__
void Linear::backward_task_with_dim(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 7);
assert(task->regions.size() == 7);
Linear* linear = (Linear*) task->args;
const LinearMeta* m = *((LinearMeta**) task->local_args);
float* input_grad = NULL;
TensorAccessorR<float, NDIM> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorR<float, NDIM> acc_output(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1;
int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1;
int batch_size = acc_output.rect.volume() / out_dim;
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
if (domain.get_dim() == 3) {
TensorAccessorW<float, 3> acc_replica_grad(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
true/*readOutput*/);
assert(acc_replica_grad.rect.volume() == in_dim * batch_size);
input_grad = acc_replica_grad.ptr;
} else {
TensorAccessorW<float, 2> acc_replica_grad(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
true/*readOutput*/);
assert(acc_replica_grad.rect.volume() == in_dim * batch_size);
input_grad = acc_replica_grad.ptr;
}
TensorAccessorW<float, NDIM> acc_output_grad(
regions[3], task->regions[3], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 2> acc_kernel(
regions[4], task->regions[4], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> acc_kernel_grad(
regions[5], task->regions[5], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorW<float, 1> acc_bias_grad(
regions[6], task->regions[6], FID_DATA, ctx, runtime,
true/*readOutput*/);
// make sure the sizes match
assert(acc_output.rect.volume() == out_dim * batch_size);
assert(acc_output_grad.rect.volume() == out_dim * batch_size);
assert(acc_kernel.rect.volume() == in_dim * out_dim);
assert(acc_kernel_grad.rect.volume() == in_dim * out_dim);
assert(acc_bias_grad.rect.volume() == out_dim);
hipEvent_t t_start, t_end;
if (linear->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDA(hipblasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
linear->backward_kernel(m, acc_input.ptr, input_grad,
acc_output.ptr, acc_output_grad.ptr,
acc_kernel.ptr, acc_kernel_grad.ptr,
acc_bias_grad.ptr, in_dim, out_dim, batch_size);
if (linear->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("Linear backward time = %.2lfms\n", elapsed);
//print_tensor<NDIM, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Linear:backward:output_grad]");
//print_tensor<2, float>(acc_kernel_grad.ptr, acc_kernel_grad.rect, "[Linear:backward:kernel_grad]");
//print_tensor<1, float>(acc_bias_grad.ptr, acc_bias_grad.rect, "[Linear:backward:bias_grad]");
//print_tensor<2, float>(input_grad, acc_input.rect, "[Linear:backward:input_grad]");
}
}
void Linear::backward2_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
Domain in_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
switch (in_domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
return backward2_task_with_dim<DIM>(task, regions, ctx, runtime);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
/*
regions[0](I/O): input_grad
regions[1](I): replicas
*/
template<int NDIM>
__host__
void Linear::backward2_task_with_dim(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
float alpha = 1.0f;
const LinearMeta* m = *((LinearMeta**) task->local_args);
TensorAccessorW<float, NDIM> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 3> acc_replica(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
assert(acc_input.rect.hi[0] == acc_replica.rect.hi[0]);
assert(acc_input.rect.lo[0] == acc_replica.rect.lo[0]);
assert(acc_input.rect.hi[1] == acc_replica.rect.hi[1]);
assert(acc_input.rect.lo[1] == acc_replica.rect.lo[1]);
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDA(hipblasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
int num_replica = acc_replica.rect.hi[NDIM] - acc_replica.rect.lo[NDIM] + 1;
const float *replica_ptr = acc_replica.ptr;
for (int i = 1; i < num_replica; i++) {
checkCUDA(hipblasSaxpy(m->handle.blas, acc_input.rect.volume(),
&alpha, replica_ptr, 1, acc_input.ptr, 1));
replica_ptr += acc_input.rect.volume();
}
}
void Linear::backward(const FFModel& ff)
{
int dim = outputs[0].numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
return backward_with_dim<DIM>(ff);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
template<int NDIM>
void Linear::backward_with_dim(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<NDIM> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
{
IndexLauncher launcher(LINEAR_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0](I): input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1](I/O): replica_grad
if (replica.region_grad != LogicalRegion::NO_REGION) {
launcher.add_region_requirement(
RegionRequirement(replica.part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, replica.region_grad));
launcher.add_field(1, FID_DATA);
} else {
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(1, FID_DATA);
}
// regions[2](I): output
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
// regions[3](I/O): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(3, FID_DATA);
// regions[4](I): filter
launcher.add_region_requirement(
RegionRequirement(weights[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(4, FID_DATA);
// regions[5](I/O): filter_grad
launcher.add_region_requirement(
RegionRequirement(weights[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[0].region_grad));
launcher.add_field(5, FID_DATA);
// regions[6](I/O): bias_grad
launcher.add_region_requirement(
RegionRequirement(weights[1].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[1].region_grad));
launcher.add_field(6, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
if (replica.region_grad != LogicalRegion::NO_REGION) {
// We aggregate parameters from replica tensor to input tensor
// Note we use input's task_is to reduce extra data transfers
Rect<2> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part_grad.get_index_partition());
IndexSpaceT<2> input_task_is = IndexSpaceT<2>(ff.get_task_is(input_rect));
IndexLauncher launcher(LINEAR_BWD2_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(0, FID_DATA);
// Note that replica.part save's a partition of replica.region_grad
launcher.add_region_requirement(
RegionRequirement(replica.part, 0/*partition id*/,
READ_ONLY, EXCLUSIVE, replica.region_grad));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
}
/*
__host__
Parameter* Linear::get_parameter(int index)
{
if (index == 0) {
return &weights[0];
} else if (index == 1){
return &weights[1];
} else {
assert(0);
return NULL;
}
}
*/
__host__
void Linear::print_layer(const FFModel& ff)
{
printf("linear layer\n");
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
RegionRequirement kernel_req(weights[0].region, READ_WRITE, EXCLUSIVE, weights[0].region);
kernel_req.add_field(FID_DATA);
InlineLauncher kernel_launcher(kernel_req);
PhysicalRegion kernel_region = runtime->map_region(ctx, kernel_launcher);
kernel_region.wait_until_valid();
RegionRequirement bias_req(weights[1].region, READ_WRITE, EXCLUSIVE, weights[1].region);
bias_req.add_field(FID_DATA);
InlineLauncher bias_launcher(bias_req);
PhysicalRegion bias_region = runtime->map_region(ctx, bias_launcher);
bias_region.wait_until_valid();
TensorAccessorW<float, 2> acc_kernel(kernel_region, kernel_req, FID_DATA, ctx, runtime, true);
TensorAccessorW<float, 1> acc_bias(bias_region, bias_req, FID_DATA, ctx, runtime, true);
const float *kernel_ptr = acc_kernel.ptr;
const float *bias_ptr = acc_bias.ptr;
size_t kernel_size = acc_kernel.rect.volume();
int kernel_dim1 = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1;
int kernel_dim2 = acc_kernel.rect.hi[1] - acc_kernel.rect.lo[1] + 1;
size_t bias_size = acc_bias.rect.volume();
printf("kernel, %p, %d, [%d, %d]\n", kernel_ptr, kernel_size, kernel_dim1, kernel_dim2);
printf("bias, %p, %d\n", bias_ptr, bias_size);
for (int i = 0; i < bias_size; i++) {
printf("%f ", bias_ptr[i]);
}
printf("\n");
for (int i = 0; i < kernel_size; i++) {
printf("%f ", kernel_ptr[i]);
}
printf("\n");
runtime->unmap_region(ctx, kernel_region);
runtime->unmap_region(ctx, bias_region);
}
LinearMeta::LinearMeta(FFHandler handler, int batch_size)
: OpMeta(handler)
{
// Allocate an all-one's vector
float* dram_one_ptr = (float *) malloc(sizeof(float) * batch_size);
for (int i = 0; i < batch_size; i++)
dram_one_ptr[i] = 1.0f;
float* fb_one_ptr;
checkCUDA(hipMalloc(&fb_one_ptr, sizeof(float) * batch_size));
checkCUDA(hipMemcpy(fb_one_ptr, dram_one_ptr,
sizeof(float) * batch_size, hipMemcpyHostToDevice));
one_ptr = (const float*) fb_one_ptr;
// Allocate descriptors
checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc));
checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor));
}
bool Linear::measure_compute_time(Simulator* sim,
const ParallelConfig& pc,
float& forward_time,
float& backward_time)
{
Tensor sub_output, sub_input;
if (!outputs[0].get_output_sub_tensor(pc, sub_output, OP_LINEAR))
return false;
if (!inputs[0].get_input_sub_tensor(pc, sub_input, OP_LINEAR))
return false;
int input_c = sub_input.adim[0];
int input_n = sub_input.get_volume() / input_c;
int output_c = sub_output.adim[0];
int output_n = sub_output.get_volume() / output_c;
LinearMeta* m = sim->linear_meta;
if (activation != AC_MODE_NONE) {
cudnnActivationMode_t mode;
switch (activation) {
case AC_MODE_RELU:
mode = CUDNN_ACTIVATION_RELU;
break;
case AC_MODE_SIGMOID:
mode = CUDNN_ACTIVATION_SIGMOID;
break;
default:
// Unsupported activation mode
assert(false);
}
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, mode,
CUDNN_PROPAGATE_NAN, 0.0));
checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor,
CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, output_n, output_c, 1, 1));
}
// allocate tensors in simulator
sim->free_all();
float* input_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT);
assert(input_ptr != NULL);
float *output_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT);
assert(output_ptr != NULL);
float* kernel_ptr = (float*)sim->allocate((size_t)output_c * input_c, DT_FLOAT);
assert(kernel_ptr != NULL);
float* bias_ptr = (float*)sim->allocate(output_c, DT_FLOAT);
assert(bias_ptr != NULL);
// measure forward time
checkCUDA(hipDeviceSynchronize());
for (int i = 0; i < sim->warmup_times + sim->repeat_times; i++) {
if (i == sim->warmup_times) {
checkCUDA(hipEventRecord(sim->start_event));
}
forward_kernel(m, input_ptr, output_ptr, kernel_ptr, bias_ptr,
input_c, output_c, input_n);
}
checkCUDA(hipEventRecord(sim->end_event));
checkCUDA(hipEventSynchronize(sim->end_event));
float milliseconds;
hipEventElapsedTime(&milliseconds, sim->start_event, sim->end_event);
forward_time = milliseconds / sim->repeat_times;
// measure backward time
checkCUDA(hipDeviceSynchronize());
for (int i = 0; i < sim->warmup_times + sim->repeat_times; i++) {
if (i == sim->warmup_times) {
checkCUDA(hipEventRecord(sim->start_event));
}
backward_kernel(m, input_ptr, input_ptr, output_ptr, output_ptr,
kernel_ptr, kernel_ptr, bias_ptr, input_c, output_c, input_n);
}
checkCUDA(hipEventRecord(sim->end_event));
checkCUDA(hipEventSynchronize(sim->end_event));
hipEventElapsedTime(&milliseconds, sim->start_event, sim->end_event);
backward_time = milliseconds / sim->repeat_times;
printf("[Measure Linear] in(%d %d) out(%d %d) forward_time(%.4lf) backward_time(%.4lf)\n",
input_n, input_c, output_n, output_c, forward_time, backward_time);
return true;
}
| 3212dbae7553971f4b640d02505d74549a233e19.cu | /* Copyright 2019 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::dense(const Tensor& input,
int outDim,
ActiMode activation,
bool use_bias,
const Op* shared_op,
Initializer* kernel_initializer,
Initializer* bias_initializer)
{
if (kernel_initializer == NULL) {
int seed = std::rand();
kernel_initializer = new GlorotUniform(seed);
}
if (bias_initializer == NULL) {
bias_initializer = new ZeroInitializer();
}
Linear *li = new Linear(*this, input, outDim, activation, use_bias,
shared_op, kernel_initializer, bias_initializer);
layers.push_back(li);
return li->outputs[0];
}
Linear* FFModel::dense(int inDim, int outDim,
ActiMode activation,
bool use_bias,
Initializer* kernel_initializer,
Initializer* bias_initializer)
{
if (kernel_initializer == NULL) {
int seed = std::rand();
kernel_initializer = new GlorotUniform(seed);
}
if (bias_initializer == NULL) {
bias_initializer = new ZeroInitializer();
}
Linear *li = new Linear(*this, inDim, outDim, activation, use_bias,
kernel_initializer, bias_initializer);
layers.push_back(li);
return li;
}
Linear::Linear(FFModel& model,
const Tensor& _input,
int out_dim,
ActiMode _activation,
bool _use_bias,
const Op* shared_op,
Initializer* _kernel_initializer,
Initializer* _bias_initializer)
: Op(model, OP_LINEAR, shared_op, "Dense_"+std::to_string(out_dim), _input),
in_channels(_input.adim[0]), out_channels(out_dim),
activation(_activation), use_bias(_use_bias),
kernel_initializer(_kernel_initializer),
bias_initializer(_bias_initializer),
profiling(model.config.profiling)
{
numInputs = 1;
numOutputs = 1;
outputs[0].numDim = _input.numDim;
for (int i = 1; i < outputs[0].numDim; i++)
outputs[0].adim[i] = _input.adim[i];
outputs[0].adim[0] = out_dim;
weights[0].numDim = 2;
weights[0].adim[0] = in_channels;
weights[0].adim[1] = out_channels;
numWeights = 1;
if (use_bias) {
weights[1].numDim = 1;
weights[1].adim[0] = out_channels;
numWeights = 2;
}
}
Linear::Linear(FFModel& model,
int in_dim, int out_dim,
ActiMode _activation,
bool _use_bias,
Initializer* _kernel_initializer,
Initializer* _bias_initializer)
: Op(model, OP_LINEAR, "Dense_"+std::to_string(out_dim), 1),
in_channels(in_dim), out_channels(out_dim),
activation(_activation), use_bias(_use_bias),
kernel_initializer(_kernel_initializer),
bias_initializer(_bias_initializer),
profiling(model.config.profiling)
{
}
Tensor Linear::init_inout(FFModel& model, const Tensor& _input)
{
assert(_input.adim[0] == in_channels);
inputs[0] = _input;
create_output_and_partition(model);
return outputs[0];
}
/*
void Linear::add_to_model(FFModel& model)
{
model.layers.push_back(this);
model.parameters.push_back(weights[0]);
if (numWeights > 1) { // bias is used
assert(numWeights == 2);
model.parameters.push_back(weights[1]);
}
}
*/
void Linear::create_weights(FFModel& model)
{
int dim = inputs[0].numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
create_weights_with_dim<DIM>(model); \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
// Unsupported dim
assert(false);
}
}
}
template<int NDIM>
void Linear::create_weights_with_dim(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, pcname));
// Create kernel tensor
{
const int dims[2] = {out_channels, in_channels};
weights[0] = model.create_linear_weight<2>(this, dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT, kernel_initializer);
}
// Create bias tensor
if (use_bias) {
const int dims[1] = {out_channels};
weights[1] = model.create_linear_weight<1>(this, dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT, bias_initializer);
assert(numWeights == 2);
} else {
assert(numWeights == 1);
}
}
void Linear::create_output_and_partition(FFModel& model)
{
int dim = inputs[0].numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
create_output_and_partition_with_dim<DIM>(model); \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
// Unsupported dim for ElementWiseBinary operator
assert(false);
}
}
}
template<int NDIM>
void Linear::create_output_and_partition_with_dim(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, pcname));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is);
int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1;
int num_par_n = part_rect.hi[NDIM-1] - part_rect.lo[NDIM-1] + 1;
int in_dim = inputs[0].adim[0];
assert(in_dim == in_channels);
int batch_size = inputs[0].adim[NDIM-1];
{
int dims[NDIM];
for (int i = 0; i < NDIM; i++)
dims[i] = outputs[0].adim[NDIM-1-i];
outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
}
// Compute partition bound for input
Rect<NDIM> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
// Create replica tensor
if (num_par_c > 1) {
const int dims[3] = {num_par_c, batch_size, in_dim};
replica = model.create_linear_replica<3>(dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT);
{
Rect<NDIM> extent;
for (int i = 0; i < NDIM; i++) {
extent.lo[i] = 0;
assert(outputs[0].adim[i] % (part_rect.hi[i] - part_rect.lo[i] + 1) == 0);
extent.hi[i] = outputs[0].adim[i] / (part_rect.hi[i] - part_rect.lo[i] + 1) - 1;
}
Transform<NDIM, NDIM> transform;
for (int i = 0; i < NDIM; i++)
for (int j = 0; j < NDIM; j++)
transform[i][j] = 0;
for (int i = 1; i < NDIM; i++)
transform[i][i] = extent.hi[i] + 1;
IndexPartition ip = runtime->create_partition_by_restriction(
ctx, inputs[0].region.get_index_space(), task_is, transform, extent);
input_lps[0] = runtime->get_logical_partition(
ctx, inputs[0].region, ip);
}
// Backward use the same ip as inputs[0]
input_grad_lps[0] = inputs[0].part_grad;
{
IndexSpaceT<NDIM> input_task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(input_rect));
Rect<NDIM+1> extent;
for (int i = 0; i < NDIM; i++) {
extent.lo[i] = 0;
assert(inputs[0].adim[i] % (input_rect.hi[i] - input_rect.lo[i] + 1) == 0);
extent.hi[i] = inputs[0].adim[i] / (input_rect.hi[i] - input_rect.lo[i] + 1) - 1;
}
extent.lo[NDIM] = 0;
extent.hi[NDIM] = num_par_c - 1;
Transform<NDIM+1, NDIM> transform;
for (int i = 0; i < NDIM+1; i++)
for (int j = 0; j < NDIM; j++)
transform[i][j] = 0;
for (int i = 0; i < NDIM; i++)
transform[i][i] = inputs[0].adim[i] / (input_rect.hi[i] - input_rect.lo[i] + 1);
IndexPartition ip = runtime->create_partition_by_restriction(
ctx, replica.region_grad.get_index_space(), input_task_is,
transform, extent);
assert(runtime->is_index_partition_disjoint(ctx, ip));
assert(runtime->is_index_partition_complete(ctx, ip));
// Note we use replica.part to save how to partition the replica
// to compute input_grad_lps
replica.part = runtime->get_logical_partition(
ctx, replica.region_grad, ip);
}
} else {
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
Rect<NDIM> extent;
for (int i = 0; i < NDIM; i++) {
extent.lo[i] = 0;
assert(inputs[0].adim[i] % (part_rect.hi[i] - part_rect.lo[i] + 1) == 0);
extent.hi[i] = inputs[0].adim[i] / (part_rect.hi[i] - part_rect.lo[i] + 1) - 1;
}
Transform<NDIM, NDIM> transform;
for (int i = 0; i < NDIM; i++)
for (int j = 0; j < NDIM; j++) {
transform[i][j] = 0;
if (i==j)
transform[i][j] = extent.hi[i] + 1;
}
IndexPartition ip = runtime->create_partition_by_restriction(
ctx, inputs[0].region.get_index_space(), task_is, transform, extent);
assert(runtime->is_index_partition_disjoint(ctx, ip));
assert(runtime->is_index_partition_complete(ctx, ip));
input_lps[0] = runtime->get_logical_partition(
ctx, inputs[0].region, ip);
input_grad_lps[0] = runtime->get_logical_partition(
ctx, inputs[0].region_grad, ip);
}
}
}
/*
regions[0](O): output
regions[1](I): kernel
regions[2](I): bias
*/
OpMeta* Linear::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
Domain out_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
switch (out_domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
return init_task_with_dim<DIM>(task, regions, ctx, runtime);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
return NULL;
}
template<int NDIM>
OpMeta* Linear::init_task_with_dim(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 3);
assert(task->regions.size() == 3);
const Linear* linear = (Linear*) task->args;
FFHandler handle = *((const FFHandler*) task->local_args);
//TensorAccessorR<float, 2> acc_input(
// regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, NDIM> acc_output(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
false/*readOutput*/);
TensorAccessorR<float, 2> acc_kernel(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_bias(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
//int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1;
int in_dim = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1;
int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1;
int batch_size = acc_output.rect.volume() / out_dim;
printf("init linear (input): in_dim(%d) out_dim(%d) batch_size(%d)\n",
in_dim, out_dim, batch_size);
LinearMeta* m = new LinearMeta(handle, batch_size);
if (linear->activation != AC_MODE_NONE) {
cudnnActivationMode_t mode;
switch (linear->activation) {
case AC_MODE_RELU:
mode = CUDNN_ACTIVATION_RELU;
break;
case AC_MODE_SIGMOID:
mode = CUDNN_ACTIVATION_SIGMOID;
break;
default:
// Unsupported activation mode
assert(false);
}
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, mode,
CUDNN_PROPAGATE_NAN, 0.0));
checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
batch_size, out_dim, 1, 1));
}
return m;
}
void Linear::init(const FFModel& ff)
{
int dim = outputs[0].numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
return init_with_dim<DIM>(ff);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
template<int NDIM>
void Linear::init_with_dim(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is);
ParallelConfig pc;
std::string pcname = name;
ff.config.find_parallel_config(NDIM, pcname, pc);
int idx = 0;
for (PointInRectIterator<NDIM> it(rect); it(); it++) {
FFHandler handle = ff.handlers[pc.device_ids[idx++]];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher launcher(LINEAR_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
//launcher.add_region_requirement(
// RegionRequirement(input_lps[0], 0/*projection id*/,
// READ_ONLY, EXCLUSIVE, inputs[0].region));
//launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[1].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
launcher.add_field(2, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<NDIM> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
void Linear::forward_kernel(const LinearMeta* m,
const float* input_ptr,
float* output_ptr,
const float* kernel_ptr,
const float* bias_ptr,
int in_dim, int out_dim, int batch_size) const
{
float alpha = 1.0f, beta = 0.0f;
checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_T, CUBLAS_OP_N,
out_dim, batch_size, in_dim,
&alpha, kernel_ptr, in_dim,
input_ptr, in_dim, &beta,
output_ptr, out_dim));
checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_T, CUBLAS_OP_N,
out_dim, batch_size, 1,
&alpha, bias_ptr, 1,
m->one_ptr, 1, &alpha,
output_ptr, out_dim));
if (activation != AC_MODE_NONE) {
checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc,
&alpha, m->outputTensor, output_ptr,
&beta, m->outputTensor, output_ptr));
}
}
__host__
void Linear::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
Domain in_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
switch (in_domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
return forward_task_with_dim<DIM>(task, regions, ctx, runtime);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
/*
regions[0](I); input
regions[1](O): output
regions[2](I): kernel
regions[3](I): bias
*/
template<int NDIM>
void Linear::forward_task_with_dim(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
Linear* linear = (Linear*) task->args;
const LinearMeta* m = *((LinearMeta**) task->local_args);
TensorAccessorR<float, NDIM> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, NDIM> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
false/*readOutput*/);
TensorAccessorR<float, 2> acc_kernel(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_bias(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1;
int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1;
int batch_size = acc_output.rect.volume() / out_dim;
assert(acc_output.rect.volume() == out_dim * batch_size);
assert(acc_input.rect.volume() == in_dim * batch_size);
assert(acc_kernel.rect.volume() == in_dim * out_dim);
assert(acc_bias.rect.volume() == out_dim);
cudaEvent_t t_start, t_end;
if (linear->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDA(cublasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
linear->forward_kernel(m, acc_input.ptr, acc_output.ptr,
acc_kernel.ptr, acc_bias.ptr, in_dim, out_dim, batch_size);
if (linear->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("Linear forward time = %.2lfms\n", elapsed);
//print_tensor<2, float>(acc_input.ptr, acc_input.rect, "[Linear:forward:input]");
//print_tensor<2, float>(acc_kernel.ptr, acc_kernel.rect, "[Linear:forward:kernel]");
//print_tensor<1, float>(acc_bias.ptr, acc_bias.rect, "[Linear:forward:bias]");
//print_tensor<2, float>(acc_output.ptr, acc_output.rect, "[Linear:forward:output]");
}
}
void Linear::forward(const FFModel& ff)
{
int dim = outputs[0].numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
return forward_with_dim<DIM>(ff);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
template<int NDIM>
void Linear::forward_with_dim(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<NDIM> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(LINEAR_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[1].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
launcher.add_field(3, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
__global__
void sigmoid_backward(float *grad_ptr, const float *output, int n)
{
CUDA_KERNEL_LOOP(i, n)
{
grad_ptr[i] = grad_ptr[i] * output[i] * (1 - output[i]);
}
}
void Linear::backward_kernel(const LinearMeta* m,
const float* input_ptr,
float* input_grad_ptr,
const float* output_ptr,
float* output_grad_ptr,
const float* kernel_ptr,
float* kernel_grad_ptr,
float* bias_grad_ptr,
int in_dim, int out_dim, int batch_size) const
{
float alpha = 1.0f;
int output_size = out_dim * batch_size;
if (activation == AC_MODE_RELU) {
reluBackward<<<GET_BLOCKS(output_size), CUDA_NUM_THREADS>>>(
output_grad_ptr, output_ptr, output_size);
} else if (activation == AC_MODE_SIGMOID) {
sigmoid_backward<<<GET_BLOCKS(output_size), CUDA_NUM_THREADS>>>(
output_grad_ptr, output_ptr, output_size);
} else {
// TODO: only support relu and sigmoid for now
assert(activation == AC_MODE_NONE);
}
// Compute weight gradiant
// NOTE: we use alpha=1 for kernel_grad to accumulate gradients
checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_N, CUBLAS_OP_T,
in_dim, out_dim, batch_size,
&alpha, input_ptr, in_dim,
output_grad_ptr, out_dim,
&alpha, kernel_grad_ptr, in_dim));
// Compute bias gradiant
// NOTE: we use alpha=1 for bias_grad to accumulate gradients
checkCUDA(cublasSgemv(m->handle.blas, CUBLAS_OP_N,
out_dim, batch_size,
&alpha, output_grad_ptr, out_dim,
m->one_ptr, 1,
&alpha, bias_grad_ptr, 1));
// Compute data gradiant
// NOTE: we use alpha=1 for input_grad to accumulate gradients
checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_N, CUBLAS_OP_N,
in_dim, batch_size, out_dim,
&alpha, kernel_ptr, in_dim,
output_grad_ptr, out_dim,
&alpha, input_grad_ptr, in_dim));
}
void Linear::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
Domain in_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
switch (in_domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
return backward_task_with_dim<DIM>(task, regions, ctx, runtime);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
/*
regions[0](I): input
regions[1](I/O): replica_grad or input_grad
regions[2](I): output
regions[3](I/O): output_grad
regions[4](I): filter
regions[5](I/O): filter_grad
regions[6](I/O): bias_grad
*/
template<int NDIM>
__host__
void Linear::backward_task_with_dim(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 7);
assert(task->regions.size() == 7);
Linear* linear = (Linear*) task->args;
const LinearMeta* m = *((LinearMeta**) task->local_args);
float* input_grad = NULL;
TensorAccessorR<float, NDIM> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorR<float, NDIM> acc_output(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1;
int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1;
int batch_size = acc_output.rect.volume() / out_dim;
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
if (domain.get_dim() == 3) {
TensorAccessorW<float, 3> acc_replica_grad(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
true/*readOutput*/);
assert(acc_replica_grad.rect.volume() == in_dim * batch_size);
input_grad = acc_replica_grad.ptr;
} else {
TensorAccessorW<float, 2> acc_replica_grad(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
true/*readOutput*/);
assert(acc_replica_grad.rect.volume() == in_dim * batch_size);
input_grad = acc_replica_grad.ptr;
}
TensorAccessorW<float, NDIM> acc_output_grad(
regions[3], task->regions[3], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 2> acc_kernel(
regions[4], task->regions[4], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> acc_kernel_grad(
regions[5], task->regions[5], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorW<float, 1> acc_bias_grad(
regions[6], task->regions[6], FID_DATA, ctx, runtime,
true/*readOutput*/);
// make sure the sizes match
assert(acc_output.rect.volume() == out_dim * batch_size);
assert(acc_output_grad.rect.volume() == out_dim * batch_size);
assert(acc_kernel.rect.volume() == in_dim * out_dim);
assert(acc_kernel_grad.rect.volume() == in_dim * out_dim);
assert(acc_bias_grad.rect.volume() == out_dim);
cudaEvent_t t_start, t_end;
if (linear->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDA(cublasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
linear->backward_kernel(m, acc_input.ptr, input_grad,
acc_output.ptr, acc_output_grad.ptr,
acc_kernel.ptr, acc_kernel_grad.ptr,
acc_bias_grad.ptr, in_dim, out_dim, batch_size);
if (linear->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("Linear backward time = %.2lfms\n", elapsed);
//print_tensor<NDIM, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Linear:backward:output_grad]");
//print_tensor<2, float>(acc_kernel_grad.ptr, acc_kernel_grad.rect, "[Linear:backward:kernel_grad]");
//print_tensor<1, float>(acc_bias_grad.ptr, acc_bias_grad.rect, "[Linear:backward:bias_grad]");
//print_tensor<2, float>(input_grad, acc_input.rect, "[Linear:backward:input_grad]");
}
}
void Linear::backward2_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
Domain in_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
switch (in_domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
return backward2_task_with_dim<DIM>(task, regions, ctx, runtime);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
/*
regions[0](I/O): input_grad
regions[1](I): replicas
*/
template<int NDIM>
__host__
void Linear::backward2_task_with_dim(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
float alpha = 1.0f;
const LinearMeta* m = *((LinearMeta**) task->local_args);
TensorAccessorW<float, NDIM> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 3> acc_replica(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
assert(acc_input.rect.hi[0] == acc_replica.rect.hi[0]);
assert(acc_input.rect.lo[0] == acc_replica.rect.lo[0]);
assert(acc_input.rect.hi[1] == acc_replica.rect.hi[1]);
assert(acc_input.rect.lo[1] == acc_replica.rect.lo[1]);
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDA(cublasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
int num_replica = acc_replica.rect.hi[NDIM] - acc_replica.rect.lo[NDIM] + 1;
const float *replica_ptr = acc_replica.ptr;
for (int i = 1; i < num_replica; i++) {
checkCUDA(cublasSaxpy(m->handle.blas, acc_input.rect.volume(),
&alpha, replica_ptr, 1, acc_input.ptr, 1));
replica_ptr += acc_input.rect.volume();
}
}
void Linear::backward(const FFModel& ff)
{
int dim = outputs[0].numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
return backward_with_dim<DIM>(ff);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
template<int NDIM>
void Linear::backward_with_dim(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<NDIM> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
{
IndexLauncher launcher(LINEAR_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0](I): input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1](I/O): replica_grad
if (replica.region_grad != LogicalRegion::NO_REGION) {
launcher.add_region_requirement(
RegionRequirement(replica.part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, replica.region_grad));
launcher.add_field(1, FID_DATA);
} else {
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(1, FID_DATA);
}
// regions[2](I): output
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
// regions[3](I/O): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(3, FID_DATA);
// regions[4](I): filter
launcher.add_region_requirement(
RegionRequirement(weights[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(4, FID_DATA);
// regions[5](I/O): filter_grad
launcher.add_region_requirement(
RegionRequirement(weights[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[0].region_grad));
launcher.add_field(5, FID_DATA);
// regions[6](I/O): bias_grad
launcher.add_region_requirement(
RegionRequirement(weights[1].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[1].region_grad));
launcher.add_field(6, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
if (replica.region_grad != LogicalRegion::NO_REGION) {
// We aggregate parameters from replica tensor to input tensor
// Note we use input's task_is to reduce extra data transfers
Rect<2> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part_grad.get_index_partition());
IndexSpaceT<2> input_task_is = IndexSpaceT<2>(ff.get_task_is(input_rect));
IndexLauncher launcher(LINEAR_BWD2_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(0, FID_DATA);
// Note that replica.part save's a partition of replica.region_grad
launcher.add_region_requirement(
RegionRequirement(replica.part, 0/*partition id*/,
READ_ONLY, EXCLUSIVE, replica.region_grad));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
}
/*
__host__
Parameter* Linear::get_parameter(int index)
{
if (index == 0) {
return &weights[0];
} else if (index == 1){
return &weights[1];
} else {
assert(0);
return NULL;
}
}
*/
__host__
void Linear::print_layer(const FFModel& ff)
{
printf("linear layer\n");
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
RegionRequirement kernel_req(weights[0].region, READ_WRITE, EXCLUSIVE, weights[0].region);
kernel_req.add_field(FID_DATA);
InlineLauncher kernel_launcher(kernel_req);
PhysicalRegion kernel_region = runtime->map_region(ctx, kernel_launcher);
kernel_region.wait_until_valid();
RegionRequirement bias_req(weights[1].region, READ_WRITE, EXCLUSIVE, weights[1].region);
bias_req.add_field(FID_DATA);
InlineLauncher bias_launcher(bias_req);
PhysicalRegion bias_region = runtime->map_region(ctx, bias_launcher);
bias_region.wait_until_valid();
TensorAccessorW<float, 2> acc_kernel(kernel_region, kernel_req, FID_DATA, ctx, runtime, true);
TensorAccessorW<float, 1> acc_bias(bias_region, bias_req, FID_DATA, ctx, runtime, true);
const float *kernel_ptr = acc_kernel.ptr;
const float *bias_ptr = acc_bias.ptr;
size_t kernel_size = acc_kernel.rect.volume();
int kernel_dim1 = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1;
int kernel_dim2 = acc_kernel.rect.hi[1] - acc_kernel.rect.lo[1] + 1;
size_t bias_size = acc_bias.rect.volume();
printf("kernel, %p, %d, [%d, %d]\n", kernel_ptr, kernel_size, kernel_dim1, kernel_dim2);
printf("bias, %p, %d\n", bias_ptr, bias_size);
for (int i = 0; i < bias_size; i++) {
printf("%f ", bias_ptr[i]);
}
printf("\n");
for (int i = 0; i < kernel_size; i++) {
printf("%f ", kernel_ptr[i]);
}
printf("\n");
runtime->unmap_region(ctx, kernel_region);
runtime->unmap_region(ctx, bias_region);
}
LinearMeta::LinearMeta(FFHandler handler, int batch_size)
: OpMeta(handler)
{
// Allocate an all-one's vector
float* dram_one_ptr = (float *) malloc(sizeof(float) * batch_size);
for (int i = 0; i < batch_size; i++)
dram_one_ptr[i] = 1.0f;
float* fb_one_ptr;
checkCUDA(cudaMalloc(&fb_one_ptr, sizeof(float) * batch_size));
checkCUDA(cudaMemcpy(fb_one_ptr, dram_one_ptr,
sizeof(float) * batch_size, cudaMemcpyHostToDevice));
one_ptr = (const float*) fb_one_ptr;
// Allocate descriptors
checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc));
checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor));
}
bool Linear::measure_compute_time(Simulator* sim,
const ParallelConfig& pc,
float& forward_time,
float& backward_time)
{
Tensor sub_output, sub_input;
if (!outputs[0].get_output_sub_tensor(pc, sub_output, OP_LINEAR))
return false;
if (!inputs[0].get_input_sub_tensor(pc, sub_input, OP_LINEAR))
return false;
int input_c = sub_input.adim[0];
int input_n = sub_input.get_volume() / input_c;
int output_c = sub_output.adim[0];
int output_n = sub_output.get_volume() / output_c;
LinearMeta* m = sim->linear_meta;
if (activation != AC_MODE_NONE) {
cudnnActivationMode_t mode;
switch (activation) {
case AC_MODE_RELU:
mode = CUDNN_ACTIVATION_RELU;
break;
case AC_MODE_SIGMOID:
mode = CUDNN_ACTIVATION_SIGMOID;
break;
default:
// Unsupported activation mode
assert(false);
}
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, mode,
CUDNN_PROPAGATE_NAN, 0.0));
checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor,
CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, output_n, output_c, 1, 1));
}
// allocate tensors in simulator
sim->free_all();
float* input_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT);
assert(input_ptr != NULL);
float *output_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT);
assert(output_ptr != NULL);
float* kernel_ptr = (float*)sim->allocate((size_t)output_c * input_c, DT_FLOAT);
assert(kernel_ptr != NULL);
float* bias_ptr = (float*)sim->allocate(output_c, DT_FLOAT);
assert(bias_ptr != NULL);
// measure forward time
checkCUDA(cudaDeviceSynchronize());
for (int i = 0; i < sim->warmup_times + sim->repeat_times; i++) {
if (i == sim->warmup_times) {
checkCUDA(cudaEventRecord(sim->start_event));
}
forward_kernel(m, input_ptr, output_ptr, kernel_ptr, bias_ptr,
input_c, output_c, input_n);
}
checkCUDA(cudaEventRecord(sim->end_event));
checkCUDA(cudaEventSynchronize(sim->end_event));
float milliseconds;
cudaEventElapsedTime(&milliseconds, sim->start_event, sim->end_event);
forward_time = milliseconds / sim->repeat_times;
// measure backward time
checkCUDA(cudaDeviceSynchronize());
for (int i = 0; i < sim->warmup_times + sim->repeat_times; i++) {
if (i == sim->warmup_times) {
checkCUDA(cudaEventRecord(sim->start_event));
}
backward_kernel(m, input_ptr, input_ptr, output_ptr, output_ptr,
kernel_ptr, kernel_ptr, bias_ptr, input_c, output_c, input_n);
}
checkCUDA(cudaEventRecord(sim->end_event));
checkCUDA(cudaEventSynchronize(sim->end_event));
cudaEventElapsedTime(&milliseconds, sim->start_event, sim->end_event);
backward_time = milliseconds / sim->repeat_times;
printf("[Measure Linear] in(%d %d) out(%d %d) forward_time(%.4lf) backward_time(%.4lf)\n",
input_n, input_c, output_n, output_c, forward_time, backward_time);
return true;
}
|
34aeba4c6cbfd61eadb8c3f5d7d67b629884dd61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <errno.h>
#include <getopt.h>
#include <limits.h>
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "c63_cuda.h"
#include "me.h"
namespace gpu = c63::gpu;
static const int Y = Y_COMPONENT;
static const int U = U_COMPONENT;
static const int V = V_COMPONENT;
__device__
static void min_warp_reduce(int i, volatile int* values)
{
values[i] = min(values[i], values[i + 32]);
values[i] = min(values[i], values[i + 16]);
values[i] = min(values[i], values[i + 8]);
values[i] = min(values[i], values[i + 4]);
values[i] = min(values[i], values[i + 2]);
values[i] = min(values[i], values[i + 1]);
}
template<int block_size>
__device__
static void min_reduce(int i, int* values)
{
if (i < block_size / 2)
{
// Intentionally no break between cases
switch (block_size)
{
case 1024:
values[i] = min(values[i], values[i + 512]);
__syncthreads();
case 512:
values[i] = min(values[i], values[i + 256]);
__syncthreads();
case 256:
values[i] = min(values[i], values[i + 128]);
__syncthreads();
case 128:
values[i] = min(values[i], values[i + 64]);
__syncthreads();
}
if (i < 32)
{
min_warp_reduce(i, values);
}
}
else
{
switch (block_size)
{
case 1024:
__syncthreads();
case 512:
__syncthreads();
case 256:
__syncthreads();
case 128:
__syncthreads();
}
}
}
template<int range>
__global__
static void me_block_8x8_gpu_Y(const uint8_t* __restrict__ orig, const uint8_t* __restrict__ ref,
const int* __restrict__ lefts, const int* __restrict__ rights, const int* __restrict__ tops,
const int* __restrict__ bottoms, int w, unsigned int* __restrict__ index_results)
{
const int i = threadIdx.x;
const int j = threadIdx.y;
const int tid = j * blockDim.x + i;
const int ref_mb_id = j * 4 * blockDim.x + i;
const int ref_mb2_id = (j * 4 + 1) * blockDim.x + i;
const int ref_mb3_id = (j * 4 + 2) * blockDim.x + i;
const int ref_mb4_id = (j * 4 + 3) * blockDim.x + i;
const int mb_x = blockIdx.x;
const int mb_y = blockIdx.y;
const int orig_mb_id = mb_y * gridDim.x + mb_x;
const int left = lefts[mb_x];
const int top = tops[mb_y];
const int right = rights[mb_x];
const int bottom = bottoms[mb_y];
const int mx = mb_x * 8;
const int my = mb_y * 8;
const uint8_t* orig_block = orig + my * w + mx;
const uint8_t* ref_search_range = ref + top * w + left;
__shared__ uint8_t shared_orig_block[64];
if (i < 8 && j < 8)
{
shared_orig_block[j * 8 + i] = orig_block[j * w + i];
}
__syncthreads();
int block_sad = INT_MAX;
int block2_sad = INT_MAX;
int block3_sad = INT_MAX;
int block4_sad = INT_MAX;
const int range_width = right - left;
const int range_height = (bottom - top) / 4;
const unsigned int mask = 0x3210 + 0x1111 * (i % 4);
// (i/4)*4 rounds i down to the nearest integer divisible by 4
const uint8_t* ref_block_top_row_aligned = ref_search_range + (j * 4) * w + (i / 4) * 4;
if (j < range_height && i < range_width)
{
block_sad = 0;
block2_sad = 0;
block3_sad = 0;
block4_sad = 0;
#pragma unroll
for (int y = 0; y < 8; ++y)
{
uint32_t* ref_block_row_aligned = (uint32_t*) (ref_block_top_row_aligned + y * w);
uint32_t ref_row_left = __byte_perm(ref_block_row_aligned[0], ref_block_row_aligned[1],
mask);
uint32_t ref_row_right = __byte_perm(ref_block_row_aligned[1], ref_block_row_aligned[2],
mask);
uint32_t* ref_block2_row_aligned = (uint32_t*) (ref_block_top_row_aligned + (y + 1) * w);
uint32_t ref_row2_left = __byte_perm(ref_block2_row_aligned[0],
ref_block2_row_aligned[1], mask);
uint32_t ref_row2_right = __byte_perm(ref_block2_row_aligned[1],
ref_block2_row_aligned[2], mask);
uint32_t* ref_block3_row_aligned = (uint32_t*) (ref_block_top_row_aligned + (y + 2) * w);
uint32_t ref_row3_left = __byte_perm(ref_block3_row_aligned[0],
ref_block3_row_aligned[1], mask);
uint32_t ref_row3_right = __byte_perm(ref_block3_row_aligned[1],
ref_block3_row_aligned[2], mask);
uint32_t* ref_block4_row_aligned = (uint32_t*) (ref_block_top_row_aligned + (y + 3) * w);
uint32_t ref_row4_left = __byte_perm(ref_block4_row_aligned[0],
ref_block4_row_aligned[1], mask);
uint32_t ref_row4_right = __byte_perm(ref_block4_row_aligned[1],
ref_block4_row_aligned[2], mask);
uint8_t* orig_block_row = shared_orig_block + y * 8;
uint32_t orig_row_left = *((uint32_t*) orig_block_row);
uint32_t orig_row_right = *((uint32_t*) orig_block_row + 1);
block_sad += __vsadu4(ref_row_left, orig_row_left);
block_sad += __vsadu4(ref_row_right, orig_row_right);
block2_sad += __vsadu4(ref_row2_left, orig_row_left);
block2_sad += __vsadu4(ref_row2_right, orig_row_right);
block3_sad += __vsadu4(ref_row3_left, orig_row_left);
block3_sad += __vsadu4(ref_row3_right, orig_row_right);
block4_sad += __vsadu4(ref_row4_left, orig_row_left);
block4_sad += __vsadu4(ref_row4_right, orig_row_right);
}
}
__shared__ int block_sads[32 * 32];
block_sads[ref_mb_id] = block_sad;
block_sads[ref_mb2_id] = block2_sad;
block_sads[ref_mb3_id] = block3_sad;
block_sads[ref_mb4_id] = block4_sad;
__syncthreads();
block_sads[tid] = min(block_sads[tid], block_sads[tid + 512]);
block_sads[tid + 256] = min(block_sads[tid + 256], block_sads[tid + 768]);
__syncthreads();
block_sads[tid] = min(block_sads[tid], block_sads[tid + 256]);
__syncthreads();
if (tid < 128)
{
block_sads[tid] = min(block_sads[tid], block_sads[tid + 128]);
}
__syncthreads();
if (tid < 64)
{
block_sads[tid] = min(block_sads[tid], block_sads[tid + 64]);
}
__syncthreads();
if (tid < 32)
{
min_warp_reduce(tid, block_sads);
}
__syncthreads();
int min = block_sads[0];
if (block_sad == min)
{
atomicMin(index_results + orig_mb_id, ref_mb_id);
}
if (block2_sad == min)
{
atomicMin(index_results + orig_mb_id, ref_mb2_id);
}
if (block3_sad == min)
{
atomicMin(index_results + orig_mb_id, ref_mb3_id);
}
if (block4_sad == min)
{
atomicMin(index_results + orig_mb_id, ref_mb4_id);
}
}
template<int range>
__global__
static void me_block_8x8_gpu_UV(const uint8_t* __restrict__ orig, const uint8_t* __restrict__ ref,
const int* __restrict__ lefts, const int* __restrict__ rights, const int* __restrict__ tops,
const int* __restrict__ bottoms, int w, unsigned int* __restrict__ index_results)
{
const int i = threadIdx.x;
const int j = threadIdx.y;
const int ref_mb_id = j * blockDim.x + i;
const int mb_x = blockIdx.x;
const int mb_y = blockIdx.y;
const int orig_mb_id = mb_y * gridDim.x + mb_x;
const int left = lefts[mb_x];
const int top = tops[mb_y];
const int right = rights[mb_x];
const int bottom = bottoms[mb_y];
const int mx = mb_x * 8;
const int my = mb_y * 8;
const uint8_t* orig_block = orig + my * w + mx;
const uint8_t* ref_search_range = ref + top * w + left;
__shared__ uint8_t shared_orig_block[64];
if (i < 8 && j < 8)
{
shared_orig_block[j * 8 + i] = orig_block[j * w + i];
}
__syncthreads();
int block_sad = INT_MAX;
const int range_width = right - left;
const int range_height = bottom - top;
const unsigned int mask = 0x3210 + 0x1111 * (i % 4);
// (i/4)*4 rounds i down to the nearest integer divisible by 4
const uint8_t* ref_block_top_row_aligned = ref_search_range + j * w + (i / 4) * 4;
if (j < range_height && i < range_width)
{
block_sad = 0;
#pragma unroll
for (unsigned int y = 8; y--;)
{
uint32_t* ref_block_row_aligned = (uint32_t*) (ref_block_top_row_aligned + y * w);
uint32_t ref_row_left = __byte_perm(ref_block_row_aligned[0], ref_block_row_aligned[1],
mask);
uint32_t ref_row_right = __byte_perm(ref_block_row_aligned[1], ref_block_row_aligned[2],
mask);
uint8_t* orig_block_row = shared_orig_block + y * 8;
uint32_t orig_row_left = *((uint32_t*) orig_block_row);
uint32_t orig_row_right = *((uint32_t*) orig_block_row + 1);
block_sad += __vsadu4(ref_row_left, orig_row_left);
block_sad += __vsadu4(ref_row_right, orig_row_right);
}
}
const int max_range_width = range * 2;
const int max_range_height = range * 2;
const int max_mb_count = max_range_width * max_range_height;
__shared__ int block_sads[max_mb_count];
block_sads[ref_mb_id] = block_sad;
__syncthreads();
min_reduce<max_mb_count>(ref_mb_id, block_sads);
__syncthreads();
if (block_sad == block_sads[0])
{
atomicMin(index_results + orig_mb_id, ref_mb_id);
}
}
template<int range>
__global__
static void set_motion_vectors(struct macroblock* __restrict__ mbs, const int* __restrict__ lefts,
const int* __restrict__ tops, const unsigned int* __restrict__ index_results)
{
const int mb_x = blockIdx.x;
const int mb_y = threadIdx.x;
const int orig_mb_id = mb_y * gridDim.x + mb_x;
const int left = lefts[mb_x];
const int top = tops[mb_y];
const int mx = mb_x * 8;
const int my = mb_y * 8;
int index_result = index_results[orig_mb_id];
/* Here, there should be a threshold on SAD that checks if the motion vector
is cheaper than intraprediction. We always assume MV to be beneficial */
struct macroblock* mb = &mbs[orig_mb_id];
mb->use_mv = 1;
mb->mv_x = left + (index_result % (range * 2)) - mx;
mb->mv_y = top + (index_result / (range * 2)) - my;
}
template<int component>
void gpu::c63_motion_estimate(struct c63_common *cm, const struct c63_common_gpu& cm_gpu,
const struct c63_cuda& c63_cuda)
{
const int w = cm->padw[component];
const int cols = cm->mb_cols[component];
const int rows = cm->mb_rows[component];
const int range = ME_RANGE(component);
const struct boundaries& bound = cm_gpu.me_boundaries[component];
const hipStream_t stream = c63_cuda.stream[component];
unsigned int* sad_indexes = cm_gpu.sad_index_results[component];
struct macroblock* mb = cm->curframe->mbs[component];
struct macroblock* mb_gpu = cm->curframe->mbs_gpu[component];
uint8_t* orig;
uint8_t* ref;
switch (component)
{
case Y_COMPONENT:
orig = (uint8_t*) cm->curframe->orig_gpu->Y;
ref = cm->refframe->recons_gpu->Y;
break;
case U_COMPONENT:
orig = (uint8_t*) cm->curframe->orig_gpu->U;
ref = cm->refframe->recons_gpu->U;
break;
case V_COMPONENT:
orig = (uint8_t*) cm->curframe->orig_gpu->V;
ref = cm->refframe->recons_gpu->V;
break;
}
hipMemsetAsync(sad_indexes, 255, cols * rows * sizeof(unsigned int), stream);
dim3 numBlocks(cols, rows);
if (component == Y_COMPONENT)
{
// Luma
dim3 threadsPerBlock(range * 2, range / 2);
hipLaunchKernelGGL(( me_block_8x8_gpu_Y<range>) , dim3(numBlocks), dim3(threadsPerBlock), 0, stream, orig, ref, bound.left,
bound.right, bound.top, bound.bottom, w, sad_indexes);
}
else
{
// Chroma
dim3 threadsPerBlock(range * 2, range * 2);
hipLaunchKernelGGL(( me_block_8x8_gpu_UV<range>) , dim3(numBlocks), dim3(threadsPerBlock), 0, stream, orig, ref,
bound.left, bound.right, bound.top, bound.bottom, w, sad_indexes);
}
hipLaunchKernelGGL(( set_motion_vectors<range>) , dim3(cols), dim3(rows), 0, stream, mb_gpu, bound.left, bound.top,
sad_indexes);
hipEvent_t me_done = c63_cuda.me_done[component];
hipEventRecord(me_done, stream);
hipStream_t memcpy_stream = c63_cuda.memcpy_stream[component];
hipStreamWaitEvent(memcpy_stream, me_done, 0);
hipMemcpyAsync(mb, mb_gpu, cols * rows * sizeof(struct macroblock), hipMemcpyDeviceToHost,
memcpy_stream);
}
/* Motion compensation for 8x8 block */
__global__
static void mc_block_8x8_gpu(const struct macroblock* __restrict__ mbs, int w,
uint8_t __restrict__ *predicted, const uint8_t __restrict__ *ref)
{
const int mb_index = (blockIdx.x + blockIdx.y * gridDim.x);
const int block_offset = mb_index * blockDim.x * blockDim.y;
const int i = threadIdx.y;
const int j = threadIdx.x;
const struct macroblock* mb = &mbs[mb_index];
// We always assume MV to be beneficial
//if (!mb->use_mv) {
// return;
//}
const int mv_x = mb->mv_x;
const int mv_y = mb->mv_y;
/* Copy pixel from ref mandated by MV */
predicted[block_offset + i * 8 + j] = ref[(i + blockIdx.y * 8 + mv_y) * w
+ (j + blockIdx.x * 8 + mv_x)];
}
template<int component>
void gpu::c63_motion_compensate(struct c63_common *cm, const struct c63_cuda& c63_cuda)
{
const int w = cm->padw[component];
const int h = cm->padh[component];
const struct macroblock* mb = cm->curframe->mbs_gpu[component];
const hipStream_t stream = c63_cuda.stream[component];
uint8_t* pred;
uint8_t* ref;
switch (component)
{
case Y_COMPONENT:
pred = cm->curframe->predicted_gpu->Y;
ref = cm->refframe->recons_gpu->Y;
break;
case U_COMPONENT:
pred = cm->curframe->predicted_gpu->U;
ref = cm->refframe->recons_gpu->U;
break;
case V_COMPONENT:
pred = cm->curframe->predicted_gpu->V;
ref = cm->refframe->recons_gpu->V;
break;
}
const dim3 threadsPerBlock(8, 8);
const dim3 numBlocks(w / threadsPerBlock.x, h / threadsPerBlock.y);
hipLaunchKernelGGL(( mc_block_8x8_gpu), dim3(numBlocks), dim3(threadsPerBlock), 0, stream, mb, w, pred, ref);
}
template void gpu::c63_motion_estimate<Y>(struct c63_common *cm,
const struct c63_common_gpu& cm_gpu, const struct c63_cuda& c63_cuda);
template void gpu::c63_motion_estimate<U>(struct c63_common *cm,
const struct c63_common_gpu& cm_gpu, const struct c63_cuda& c63_cuda);
template void gpu::c63_motion_estimate<V>(struct c63_common *cm,
const struct c63_common_gpu& cm_gpu, const struct c63_cuda& c63_cuda);
template void gpu::c63_motion_compensate<Y>(struct c63_common *cm, const struct c63_cuda& c63_cuda);
template void gpu::c63_motion_compensate<U>(struct c63_common *cm, const struct c63_cuda& c63_cuda);
template void gpu::c63_motion_compensate<V>(struct c63_common *cm, const struct c63_cuda& c63_cuda);
| 34aeba4c6cbfd61eadb8c3f5d7d67b629884dd61.cu | #include <assert.h>
#include <errno.h>
#include <getopt.h>
#include <limits.h>
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "c63_cuda.h"
#include "me.h"
namespace gpu = c63::gpu;
static const int Y = Y_COMPONENT;
static const int U = U_COMPONENT;
static const int V = V_COMPONENT;
__device__
static void min_warp_reduce(int i, volatile int* values)
{
values[i] = min(values[i], values[i + 32]);
values[i] = min(values[i], values[i + 16]);
values[i] = min(values[i], values[i + 8]);
values[i] = min(values[i], values[i + 4]);
values[i] = min(values[i], values[i + 2]);
values[i] = min(values[i], values[i + 1]);
}
template<int block_size>
__device__
static void min_reduce(int i, int* values)
{
if (i < block_size / 2)
{
// Intentionally no break between cases
switch (block_size)
{
case 1024:
values[i] = min(values[i], values[i + 512]);
__syncthreads();
case 512:
values[i] = min(values[i], values[i + 256]);
__syncthreads();
case 256:
values[i] = min(values[i], values[i + 128]);
__syncthreads();
case 128:
values[i] = min(values[i], values[i + 64]);
__syncthreads();
}
if (i < 32)
{
min_warp_reduce(i, values);
}
}
else
{
switch (block_size)
{
case 1024:
__syncthreads();
case 512:
__syncthreads();
case 256:
__syncthreads();
case 128:
__syncthreads();
}
}
}
template<int range>
__global__
static void me_block_8x8_gpu_Y(const uint8_t* __restrict__ orig, const uint8_t* __restrict__ ref,
const int* __restrict__ lefts, const int* __restrict__ rights, const int* __restrict__ tops,
const int* __restrict__ bottoms, int w, unsigned int* __restrict__ index_results)
{
const int i = threadIdx.x;
const int j = threadIdx.y;
const int tid = j * blockDim.x + i;
const int ref_mb_id = j * 4 * blockDim.x + i;
const int ref_mb2_id = (j * 4 + 1) * blockDim.x + i;
const int ref_mb3_id = (j * 4 + 2) * blockDim.x + i;
const int ref_mb4_id = (j * 4 + 3) * blockDim.x + i;
const int mb_x = blockIdx.x;
const int mb_y = blockIdx.y;
const int orig_mb_id = mb_y * gridDim.x + mb_x;
const int left = lefts[mb_x];
const int top = tops[mb_y];
const int right = rights[mb_x];
const int bottom = bottoms[mb_y];
const int mx = mb_x * 8;
const int my = mb_y * 8;
const uint8_t* orig_block = orig + my * w + mx;
const uint8_t* ref_search_range = ref + top * w + left;
__shared__ uint8_t shared_orig_block[64];
if (i < 8 && j < 8)
{
shared_orig_block[j * 8 + i] = orig_block[j * w + i];
}
__syncthreads();
int block_sad = INT_MAX;
int block2_sad = INT_MAX;
int block3_sad = INT_MAX;
int block4_sad = INT_MAX;
const int range_width = right - left;
const int range_height = (bottom - top) / 4;
const unsigned int mask = 0x3210 + 0x1111 * (i % 4);
// (i/4)*4 rounds i down to the nearest integer divisible by 4
const uint8_t* ref_block_top_row_aligned = ref_search_range + (j * 4) * w + (i / 4) * 4;
if (j < range_height && i < range_width)
{
block_sad = 0;
block2_sad = 0;
block3_sad = 0;
block4_sad = 0;
#pragma unroll
for (int y = 0; y < 8; ++y)
{
uint32_t* ref_block_row_aligned = (uint32_t*) (ref_block_top_row_aligned + y * w);
uint32_t ref_row_left = __byte_perm(ref_block_row_aligned[0], ref_block_row_aligned[1],
mask);
uint32_t ref_row_right = __byte_perm(ref_block_row_aligned[1], ref_block_row_aligned[2],
mask);
uint32_t* ref_block2_row_aligned = (uint32_t*) (ref_block_top_row_aligned + (y + 1) * w);
uint32_t ref_row2_left = __byte_perm(ref_block2_row_aligned[0],
ref_block2_row_aligned[1], mask);
uint32_t ref_row2_right = __byte_perm(ref_block2_row_aligned[1],
ref_block2_row_aligned[2], mask);
uint32_t* ref_block3_row_aligned = (uint32_t*) (ref_block_top_row_aligned + (y + 2) * w);
uint32_t ref_row3_left = __byte_perm(ref_block3_row_aligned[0],
ref_block3_row_aligned[1], mask);
uint32_t ref_row3_right = __byte_perm(ref_block3_row_aligned[1],
ref_block3_row_aligned[2], mask);
uint32_t* ref_block4_row_aligned = (uint32_t*) (ref_block_top_row_aligned + (y + 3) * w);
uint32_t ref_row4_left = __byte_perm(ref_block4_row_aligned[0],
ref_block4_row_aligned[1], mask);
uint32_t ref_row4_right = __byte_perm(ref_block4_row_aligned[1],
ref_block4_row_aligned[2], mask);
uint8_t* orig_block_row = shared_orig_block + y * 8;
uint32_t orig_row_left = *((uint32_t*) orig_block_row);
uint32_t orig_row_right = *((uint32_t*) orig_block_row + 1);
block_sad += __vsadu4(ref_row_left, orig_row_left);
block_sad += __vsadu4(ref_row_right, orig_row_right);
block2_sad += __vsadu4(ref_row2_left, orig_row_left);
block2_sad += __vsadu4(ref_row2_right, orig_row_right);
block3_sad += __vsadu4(ref_row3_left, orig_row_left);
block3_sad += __vsadu4(ref_row3_right, orig_row_right);
block4_sad += __vsadu4(ref_row4_left, orig_row_left);
block4_sad += __vsadu4(ref_row4_right, orig_row_right);
}
}
__shared__ int block_sads[32 * 32];
block_sads[ref_mb_id] = block_sad;
block_sads[ref_mb2_id] = block2_sad;
block_sads[ref_mb3_id] = block3_sad;
block_sads[ref_mb4_id] = block4_sad;
__syncthreads();
block_sads[tid] = min(block_sads[tid], block_sads[tid + 512]);
block_sads[tid + 256] = min(block_sads[tid + 256], block_sads[tid + 768]);
__syncthreads();
block_sads[tid] = min(block_sads[tid], block_sads[tid + 256]);
__syncthreads();
if (tid < 128)
{
block_sads[tid] = min(block_sads[tid], block_sads[tid + 128]);
}
__syncthreads();
if (tid < 64)
{
block_sads[tid] = min(block_sads[tid], block_sads[tid + 64]);
}
__syncthreads();
if (tid < 32)
{
min_warp_reduce(tid, block_sads);
}
__syncthreads();
int min = block_sads[0];
if (block_sad == min)
{
atomicMin(index_results + orig_mb_id, ref_mb_id);
}
if (block2_sad == min)
{
atomicMin(index_results + orig_mb_id, ref_mb2_id);
}
if (block3_sad == min)
{
atomicMin(index_results + orig_mb_id, ref_mb3_id);
}
if (block4_sad == min)
{
atomicMin(index_results + orig_mb_id, ref_mb4_id);
}
}
template<int range>
__global__
static void me_block_8x8_gpu_UV(const uint8_t* __restrict__ orig, const uint8_t* __restrict__ ref,
const int* __restrict__ lefts, const int* __restrict__ rights, const int* __restrict__ tops,
const int* __restrict__ bottoms, int w, unsigned int* __restrict__ index_results)
{
const int i = threadIdx.x;
const int j = threadIdx.y;
const int ref_mb_id = j * blockDim.x + i;
const int mb_x = blockIdx.x;
const int mb_y = blockIdx.y;
const int orig_mb_id = mb_y * gridDim.x + mb_x;
const int left = lefts[mb_x];
const int top = tops[mb_y];
const int right = rights[mb_x];
const int bottom = bottoms[mb_y];
const int mx = mb_x * 8;
const int my = mb_y * 8;
const uint8_t* orig_block = orig + my * w + mx;
const uint8_t* ref_search_range = ref + top * w + left;
__shared__ uint8_t shared_orig_block[64];
if (i < 8 && j < 8)
{
shared_orig_block[j * 8 + i] = orig_block[j * w + i];
}
__syncthreads();
int block_sad = INT_MAX;
const int range_width = right - left;
const int range_height = bottom - top;
const unsigned int mask = 0x3210 + 0x1111 * (i % 4);
// (i/4)*4 rounds i down to the nearest integer divisible by 4
const uint8_t* ref_block_top_row_aligned = ref_search_range + j * w + (i / 4) * 4;
if (j < range_height && i < range_width)
{
block_sad = 0;
#pragma unroll
for (unsigned int y = 8; y--;)
{
uint32_t* ref_block_row_aligned = (uint32_t*) (ref_block_top_row_aligned + y * w);
uint32_t ref_row_left = __byte_perm(ref_block_row_aligned[0], ref_block_row_aligned[1],
mask);
uint32_t ref_row_right = __byte_perm(ref_block_row_aligned[1], ref_block_row_aligned[2],
mask);
uint8_t* orig_block_row = shared_orig_block + y * 8;
uint32_t orig_row_left = *((uint32_t*) orig_block_row);
uint32_t orig_row_right = *((uint32_t*) orig_block_row + 1);
block_sad += __vsadu4(ref_row_left, orig_row_left);
block_sad += __vsadu4(ref_row_right, orig_row_right);
}
}
const int max_range_width = range * 2;
const int max_range_height = range * 2;
const int max_mb_count = max_range_width * max_range_height;
__shared__ int block_sads[max_mb_count];
block_sads[ref_mb_id] = block_sad;
__syncthreads();
min_reduce<max_mb_count>(ref_mb_id, block_sads);
__syncthreads();
if (block_sad == block_sads[0])
{
atomicMin(index_results + orig_mb_id, ref_mb_id);
}
}
template<int range>
__global__
static void set_motion_vectors(struct macroblock* __restrict__ mbs, const int* __restrict__ lefts,
const int* __restrict__ tops, const unsigned int* __restrict__ index_results)
{
const int mb_x = blockIdx.x;
const int mb_y = threadIdx.x;
const int orig_mb_id = mb_y * gridDim.x + mb_x;
const int left = lefts[mb_x];
const int top = tops[mb_y];
const int mx = mb_x * 8;
const int my = mb_y * 8;
int index_result = index_results[orig_mb_id];
/* Here, there should be a threshold on SAD that checks if the motion vector
is cheaper than intraprediction. We always assume MV to be beneficial */
struct macroblock* mb = &mbs[orig_mb_id];
mb->use_mv = 1;
mb->mv_x = left + (index_result % (range * 2)) - mx;
mb->mv_y = top + (index_result / (range * 2)) - my;
}
template<int component>
void gpu::c63_motion_estimate(struct c63_common *cm, const struct c63_common_gpu& cm_gpu,
const struct c63_cuda& c63_cuda)
{
const int w = cm->padw[component];
const int cols = cm->mb_cols[component];
const int rows = cm->mb_rows[component];
const int range = ME_RANGE(component);
const struct boundaries& bound = cm_gpu.me_boundaries[component];
const cudaStream_t stream = c63_cuda.stream[component];
unsigned int* sad_indexes = cm_gpu.sad_index_results[component];
struct macroblock* mb = cm->curframe->mbs[component];
struct macroblock* mb_gpu = cm->curframe->mbs_gpu[component];
uint8_t* orig;
uint8_t* ref;
switch (component)
{
case Y_COMPONENT:
orig = (uint8_t*) cm->curframe->orig_gpu->Y;
ref = cm->refframe->recons_gpu->Y;
break;
case U_COMPONENT:
orig = (uint8_t*) cm->curframe->orig_gpu->U;
ref = cm->refframe->recons_gpu->U;
break;
case V_COMPONENT:
orig = (uint8_t*) cm->curframe->orig_gpu->V;
ref = cm->refframe->recons_gpu->V;
break;
}
cudaMemsetAsync(sad_indexes, 255, cols * rows * sizeof(unsigned int), stream);
dim3 numBlocks(cols, rows);
if (component == Y_COMPONENT)
{
// Luma
dim3 threadsPerBlock(range * 2, range / 2);
me_block_8x8_gpu_Y<range> <<<numBlocks, threadsPerBlock, 0, stream>>>(orig, ref, bound.left,
bound.right, bound.top, bound.bottom, w, sad_indexes);
}
else
{
// Chroma
dim3 threadsPerBlock(range * 2, range * 2);
me_block_8x8_gpu_UV<range> <<<numBlocks, threadsPerBlock, 0, stream>>>(orig, ref,
bound.left, bound.right, bound.top, bound.bottom, w, sad_indexes);
}
set_motion_vectors<range> <<<cols, rows, 0, stream>>>(mb_gpu, bound.left, bound.top,
sad_indexes);
cudaEvent_t me_done = c63_cuda.me_done[component];
cudaEventRecord(me_done, stream);
cudaStream_t memcpy_stream = c63_cuda.memcpy_stream[component];
cudaStreamWaitEvent(memcpy_stream, me_done, 0);
cudaMemcpyAsync(mb, mb_gpu, cols * rows * sizeof(struct macroblock), cudaMemcpyDeviceToHost,
memcpy_stream);
}
/* Motion compensation for 8x8 block */
__global__
static void mc_block_8x8_gpu(const struct macroblock* __restrict__ mbs, int w,
uint8_t __restrict__ *predicted, const uint8_t __restrict__ *ref)
{
const int mb_index = (blockIdx.x + blockIdx.y * gridDim.x);
const int block_offset = mb_index * blockDim.x * blockDim.y;
const int i = threadIdx.y;
const int j = threadIdx.x;
const struct macroblock* mb = &mbs[mb_index];
// We always assume MV to be beneficial
//if (!mb->use_mv) {
// return;
//}
const int mv_x = mb->mv_x;
const int mv_y = mb->mv_y;
/* Copy pixel from ref mandated by MV */
predicted[block_offset + i * 8 + j] = ref[(i + blockIdx.y * 8 + mv_y) * w
+ (j + blockIdx.x * 8 + mv_x)];
}
template<int component>
void gpu::c63_motion_compensate(struct c63_common *cm, const struct c63_cuda& c63_cuda)
{
const int w = cm->padw[component];
const int h = cm->padh[component];
const struct macroblock* mb = cm->curframe->mbs_gpu[component];
const cudaStream_t stream = c63_cuda.stream[component];
uint8_t* pred;
uint8_t* ref;
switch (component)
{
case Y_COMPONENT:
pred = cm->curframe->predicted_gpu->Y;
ref = cm->refframe->recons_gpu->Y;
break;
case U_COMPONENT:
pred = cm->curframe->predicted_gpu->U;
ref = cm->refframe->recons_gpu->U;
break;
case V_COMPONENT:
pred = cm->curframe->predicted_gpu->V;
ref = cm->refframe->recons_gpu->V;
break;
}
const dim3 threadsPerBlock(8, 8);
const dim3 numBlocks(w / threadsPerBlock.x, h / threadsPerBlock.y);
mc_block_8x8_gpu<<<numBlocks, threadsPerBlock, 0, stream>>>(mb, w, pred, ref);
}
template void gpu::c63_motion_estimate<Y>(struct c63_common *cm,
const struct c63_common_gpu& cm_gpu, const struct c63_cuda& c63_cuda);
template void gpu::c63_motion_estimate<U>(struct c63_common *cm,
const struct c63_common_gpu& cm_gpu, const struct c63_cuda& c63_cuda);
template void gpu::c63_motion_estimate<V>(struct c63_common *cm,
const struct c63_common_gpu& cm_gpu, const struct c63_cuda& c63_cuda);
template void gpu::c63_motion_compensate<Y>(struct c63_common *cm, const struct c63_cuda& c63_cuda);
template void gpu::c63_motion_compensate<U>(struct c63_common *cm, const struct c63_cuda& c63_cuda);
template void gpu::c63_motion_compensate<V>(struct c63_common *cm, const struct c63_cuda& c63_cuda);
|
f5e5a8ef201a952ab367faade1e343e2546bc5d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void hello_from_gpu()
{
const int bid = blockIdx.x;
const int tid = threadIdx.x;
printf("Hello World from the GPU in block %d thread %d!\n", bid, tid);
}
int main(void)
{
hipLaunchKernelGGL(( hello_from_gpu), dim3(2), dim3(4), 0, 0, );
hipDeviceSynchronize();
return 0;
} | f5e5a8ef201a952ab367faade1e343e2546bc5d3.cu | #include <stdio.h>
__global__ void hello_from_gpu()
{
const int bid = blockIdx.x;
const int tid = threadIdx.x;
printf("Hello World from the GPU in block %d thread %d!\n", bid, tid);
}
int main(void)
{
hello_from_gpu<<<2, 4>>>();
cudaDeviceSynchronize();
return 0;
} |
51e5a5a18036f08db12ebc7d52c232b8ece795f2.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPApplyUtils.cuh>
namespace {
using namespace at;
template<typename scalar_t>
void kl_div_backward_kernel(const Tensor& grad_input, const Tensor& target, const Tensor& grad) {
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(
grad_input,
target,
grad,
[] __device__(
scalar_t& grad_input_val, const scalar_t& target_val, const scalar_t& grad_val) {
if (target_val > 0) {
grad_input_val = -target_val * grad_val;
}
});
}
} // namespace
namespace at { namespace native {
Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction) {
auto grad_input = at::zeros_like(input);
Tensor grad_expand = grad.expand_as(input);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "kl_div_backward", [&]() {
kl_div_backward_kernel<scalar_t>(grad_input, target, grad_expand);
});
if (reduction == Reduction::Mean) {
return grad_input / input.numel();
}
return grad_input;
}
}} // namespace at::native
| 51e5a5a18036f08db12ebc7d52c232b8ece795f2.cu | #include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
namespace {
using namespace at;
template<typename scalar_t>
void kl_div_backward_kernel(const Tensor& grad_input, const Tensor& target, const Tensor& grad) {
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(
grad_input,
target,
grad,
[] __device__(
scalar_t& grad_input_val, const scalar_t& target_val, const scalar_t& grad_val) {
if (target_val > 0) {
grad_input_val = -target_val * grad_val;
}
});
}
} // namespace
namespace at { namespace native {
Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction) {
auto grad_input = at::zeros_like(input);
Tensor grad_expand = grad.expand_as(input);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "kl_div_backward", [&]() {
kl_div_backward_kernel<scalar_t>(grad_input, target, grad_expand);
});
if (reduction == Reduction::Mean) {
return grad_input / input.numel();
}
return grad_input;
}
}} // namespace at::native
|
fd4220424417b55f6615b812ae3def6a883cc5dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdexcept>
#include <string>
#include <random>
#include <vector>
#include <utility>
#include <cstddef>
#include <cfloat>
#include <chrono>
namespace {
struct Metadata {
int desiredBlockCount; // minimum number of blocks to create
int threadCountPerBlock; // threads per block
int pointCount; // number of points for the traveling salesman to visit
int randomSeed; // random seed used to generate the distance matrix
int threadCount; // actual number of generated threads
int prefixSize; // amount of static points per thread, that don't get permutated
};
struct DevicePointers {
float* distanceMatrix = 0;
// memory block containing the best result permutations found by each thread (device)
int* allThreadsBestResultPermutations = 0;
// array containing the best distance found by each thread (device)
float* allThreadsBestResults = 0;
// memory block containing the current permutation state threads are operating on
int* allThreadsPermutations = 0;
// array containing the temp state while executing Heap's algorithm
int* allThreadsC = 0;
};
Metadata parseArgs(int argc, char** argv) {
int desiredBlockCount;
int threadCountPerBlock;
int pointCount;
int randomSeed;
int threadCount;
int prefixSize;
// validate user input
if (argc != 5) {
throw std::invalid_argument("Error: Incorrect number of arguments");
}
try {
desiredBlockCount = std::stoi(argv[1]);
if (desiredBlockCount < 1) {
throw std::invalid_argument("Error: Please set block count to a value greater than 0");
}
threadCountPerBlock = std::stoi(argv[2]);
if (threadCountPerBlock < 1 || threadCountPerBlock > 1024) {
throw std::invalid_argument("Error: Please set thread count per block to a value between 1 and 1024");
}
pointCount = std::stoi(argv[3]);
if (pointCount < 2) {
throw std::invalid_argument("Error: Please set point count to a value greater than 1");
}
randomSeed = std::stoi(argv[4]);
}
catch (const std::out_of_range& e) {
std::cout << e.what() << std::endl;
throw std::invalid_argument("Error: A parameter value was out of range");
}
// calculate thread count and prefix size
([&] {
int desiredThreadCount = desiredBlockCount * threadCountPerBlock;
threadCount = 1;
prefixSize = 0;
for (int i = pointCount; (i > 0 && threadCount < desiredThreadCount); i--) {
threadCount *= i;
prefixSize++;
}
})();
Metadata m{
desiredBlockCount,
threadCountPerBlock,
pointCount,
randomSeed,
threadCount,
prefixSize
};
return m;
}
std::vector<std::vector<float>> createDistanceMatrix(const Metadata& metadata) {
std::default_random_engine generator(metadata.randomSeed);
std::uniform_real_distribution<float> distribution(0.0f, 1000.0f);
std::vector<std::vector<float>> matrix;
matrix.reserve(metadata.pointCount);
for (int i = 0; i < metadata.pointCount; i++) {
std::vector<float> row;
row.reserve(metadata.pointCount);
for (int i = 0; i < metadata.pointCount; i++) {
row.push_back(distribution(generator));
}
matrix.push_back(std::move(row));
}
return matrix;
}
void printDistanceMatrix(const std::vector<std::vector<float>>& matrix) {
std::cout << "Distance Matrix:" << std::endl;
std::cout << "------" << "\t";
for (int i = 0; i < matrix.size(); i++) {
std::cout << i << ":\t";
}
std::cout << std::endl;
int counter = 0;
for (auto& row : matrix) {
std::cout << counter++ << ":\t";
for (auto& val : row) {
std::cout << val << "\t";
}
std::cout << std::endl;
}
}
DevicePointers allocateDeviceMemory(const Metadata& metadata) {
hipError_t cudaStatus;
DevicePointers d;
// since metadata.threadCount is probably not always divisible by metadata.threadCountPerBlock,
// some "overhead" memory needs to be allocated to not cause segfaults when coalescing memory
int actualThreadCount = ([&] {
int result = metadata.threadCount / metadata.threadCountPerBlock;
result += metadata.threadCount % metadata.threadCountPerBlock == 0 ? 0 : 1;
result *= metadata.threadCountPerBlock;
return result;
})();
std::size_t size = metadata.pointCount * metadata.pointCount * sizeof(float);
cudaStatus = hipMalloc(&d.distanceMatrix, size);
if (cudaStatus != hipSuccess) {
throw std::runtime_error("Error allocating memory!");
}
size = actualThreadCount * metadata.pointCount * sizeof(int);
cudaStatus = hipMalloc(&d.allThreadsBestResultPermutations, size);
if (cudaStatus != hipSuccess) {
throw std::runtime_error("Error allocating memory!");
}
size = actualThreadCount * sizeof(float);
cudaStatus = hipMalloc(&d.allThreadsBestResults, size);
if (cudaStatus != hipSuccess) {
throw std::runtime_error("Error allocating memory!");
}
size = actualThreadCount * metadata.pointCount * sizeof(int);
cudaStatus = hipMalloc(&d.allThreadsPermutations, size);
if (cudaStatus != hipSuccess) {
throw std::runtime_error("Error allocating memory!");
}
size = actualThreadCount * (metadata.pointCount - metadata.prefixSize) * sizeof(int);
cudaStatus = hipMalloc(&d.allThreadsC, size);
if (cudaStatus != hipSuccess) {
throw std::runtime_error("Error allocating memory!");
}
return d;
}
void freeDeviceMemory(DevicePointers& d) {
hipFree(d.distanceMatrix);
hipFree(d.allThreadsBestResultPermutations);
hipFree(d.allThreadsBestResults);
hipFree(d.allThreadsPermutations);
hipFree(d.allThreadsC);
d.distanceMatrix = 0;
d.allThreadsBestResultPermutations = 0;
d.allThreadsBestResults = 0;
d.allThreadsPermutations = 0;
d.allThreadsC = 0;
}
void copyDataHostToDevice(const DevicePointers& d, const std::vector<std::vector<float>>& distanceMatrix) {
float* currentPointer = d.distanceMatrix;
hipError_t cudaStatus;
for (auto& row : distanceMatrix) {
std::size_t count = row.size() * sizeof(float);
cudaStatus = hipMemcpy(currentPointer, row.data(), count, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
throw std::runtime_error("Error copying distance matrix onto device!");
}
currentPointer += row.size();
}
}
__device__ float calculatePermutationTotalDistance(float* distanceMatrix, int* permutation, int pointCount) {
float distance = distanceMatrix[permutation[blockDim.x * (pointCount - 1)] * pointCount + permutation[0]];
for (int i = 0; i < pointCount - 1; i++) {
distance += distanceMatrix[permutation[blockDim.x * i] * pointCount + permutation[blockDim.x * (i + 1)]];
}
return distance;
}
__device__ void swap(int& a, int& b) {
int tmp = a;
a = b;
b = tmp;
}
__device__ void copyPermutationToBestArray(int* dest, int* src, int size) {
for (int i = 0; i < size; i++) {
dest[i] = src[blockDim.x * i];
}
}
__device__ void prepareSharedDistanceMatrix(const int id, const int pointCount, const float* global, float* shared) {
int size = pointCount * pointCount;
for (int i = threadIdx.x; i < size; i += blockDim.x) {
shared[i] = global[i];
}
}
__global__ void calculateBestRoute(const int threadCount, const int prefixSize, const int pointCount, const DevicePointers d) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (threadCount <= id) {
return;
}
// initialize permutation
int* permutation = d.allThreadsPermutations + blockIdx.x * blockDim.x * pointCount + threadIdx.x;
for (int i = 0; i < pointCount; i++) {
permutation[blockDim.x * i] = i;
}
int quotient = id;
for (int i = prefixSize - 1; i >= 0; i--) {
int factoradic = quotient % (pointCount - i);
quotient /= pointCount - i;
swap(permutation[blockDim.x * i], permutation[blockDim.x * (factoradic + i)]);
}
// prepare pointers
int* permutable = permutation + blockDim.x * prefixSize;
int permutableSize = pointCount - prefixSize;
int* bestResultPermutations = d.allThreadsBestResultPermutations + id * pointCount;
extern __shared__ float sharedDistanceMatrix[];
prepareSharedDistanceMatrix(id, pointCount, d.distanceMatrix, sharedDistanceMatrix);
__syncthreads();
// initial solution is initial best
copyPermutationToBestArray(bestResultPermutations, permutation, pointCount);
float bestResult = calculatePermutationTotalDistance(sharedDistanceMatrix, permutation, pointCount);
if (permutableSize != 0) {
int* c = d.allThreadsC + blockIdx.x * blockDim.x * permutableSize + threadIdx.x;
for (int i = 0; i < permutableSize; i++) {
c[blockDim.x * i] = 0;
}
int i = 0;
while (i < permutableSize) {
if (c[blockDim.x * i] < i) {
if (i % 2 == 0) {
swap(permutable[0], permutable[blockDim.x * i]);
}
else {
swap(permutable[blockDim.x * c[blockDim.x * i]], permutable[blockDim.x * i]);
}
float tempResult = calculatePermutationTotalDistance(sharedDistanceMatrix, permutation, pointCount);
if (tempResult < bestResult) {
copyPermutationToBestArray(bestResultPermutations, permutation, pointCount);
bestResult = tempResult;
}
c[blockDim.x * i]++;
i = 0;
}
else {
c[blockDim.x * i] = 0;
i++;
}
}
}
d.allThreadsBestResults[id] = bestResult;
}
void executeThreads(const Metadata& metadata, const DevicePointers& d) {
int blockCount = ([&] {
int result = metadata.threadCount / metadata.threadCountPerBlock;
result += metadata.threadCount % metadata.threadCountPerBlock == 0 ? 0 : 1;
return result;
})();
std::size_t sharedMemorySize = metadata.pointCount * metadata.pointCount * sizeof(float);
hipLaunchKernelGGL(( calculateBestRoute), dim3(blockCount), dim3(metadata.threadCountPerBlock), sharedMemorySize, 0, metadata.threadCount, metadata.prefixSize, metadata.pointCount, d);
}
void copyDataDeviceToHostAndEvaluate(const Metadata& metadata, const DevicePointers& d) {
// copy data
hipError_t cudaStatus;
std::vector<std::vector<int>> allThreadsBestResultPermutations = ([&] {
std::vector<std::vector<int>> result;
result.reserve(metadata.threadCount);
int* tmp = new int[metadata.threadCount * metadata.pointCount];
std::size_t size = metadata.threadCount * metadata.pointCount * sizeof(int);
cudaStatus = hipMemcpy(tmp, d.allThreadsBestResultPermutations, size, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
throw std::runtime_error("Error copying data from device to host");
}
int* intervalStart = tmp;
int* intervalEnd = tmp + metadata.pointCount;
for (int i = 0; i < metadata.threadCount; i++) {
result.push_back(std::vector<int>(intervalStart, intervalEnd));
intervalStart += metadata.pointCount;
intervalEnd += metadata.pointCount;
}
delete[] tmp;
return result;
})();
std::vector<float> allThreadsBestResults = ([&] {
float* tmp = new float[metadata.threadCount];
std::size_t size = metadata.threadCount * sizeof(float);
cudaStatus = hipMemcpy(tmp, d.allThreadsBestResults, size, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
throw std::runtime_error("Error copying data from device to host");
}
std::vector<float> result(tmp, tmp + metadata.threadCount);
delete[] tmp;
return result;
})();
// evaluate
float bestResult = FLT_MAX;
std::vector<int>* bestResultPermutation = 0;
for (int i = 0; i < allThreadsBestResults.size(); i++) {
if (allThreadsBestResults[i] < bestResult) {
bestResult = allThreadsBestResults[i];
bestResultPermutation = &allThreadsBestResultPermutations[i];
}
}
std::cout << "Best Route:" << std::endl;
for (int i = 0; i < bestResultPermutation->size(); i++) {
std::cout << bestResultPermutation->at(i) << " -> ";
}
std::cout << bestResultPermutation->at(0) << std::endl;
std::cout << "Route Length: " << bestResult << std::endl;
}
}
int main(int argc, char** argv) {
DevicePointers devicePointers;
try {
std::chrono::steady_clock::time_point t_start = std::chrono::steady_clock::now();
const Metadata METADATA = parseArgs(argc, argv);
std::vector<std::vector<float>> distanceMatrix = createDistanceMatrix(METADATA);
printDistanceMatrix(distanceMatrix);
devicePointers = allocateDeviceMemory(METADATA);
copyDataHostToDevice(devicePointers, distanceMatrix);
std::chrono::steady_clock::time_point t_threadStart = std::chrono::steady_clock::now();
executeThreads(METADATA, devicePointers);
hipDeviceSynchronize();
std::chrono::steady_clock::time_point t_threadEnd = std::chrono::steady_clock::now();
copyDataDeviceToHostAndEvaluate(METADATA, devicePointers);
freeDeviceMemory(devicePointers);
hipDeviceReset();
std::chrono::steady_clock::time_point t_end = std::chrono::steady_clock::now();
std::chrono::duration<double> totalExecutionTime = std::chrono::duration_cast<std::chrono::duration<double>>(t_end - t_start);
std::chrono::duration<double> threadExecutionTime = std::chrono::duration_cast<std::chrono::duration<double>>(t_threadEnd - t_threadStart);
std::cout << "Total execution time (s): " << totalExecutionTime.count() << std::endl;
std::cout << "Thread execution time (s): " << threadExecutionTime.count() << std::endl;
}
catch (const std::invalid_argument& e) {
std::cerr << e.what() << std::endl;
std::cerr << "Usage: " << argv[0] << "<desired block count (int)> <thread count per block (int)>" <<
" <point count (int)> <random seed (int)>" << std::endl;
std::cerr << "desired block count = Minimum number of blocks to create" << std::endl;
std::cerr << "thread count per block = Number of threads for each block" << std::endl;
std::cerr << "point count = Number of points to be generated for the traveling salesman problem" << std::endl;
std::cerr << "random seed = random seed used to generate the distances between points" << std::endl;
return 1;
}
catch (const std::runtime_error& e) {
std::cerr << e.what() << std::endl;
freeDeviceMemory(devicePointers);
return 1;
}
} | fd4220424417b55f6615b812ae3def6a883cc5dc.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdexcept>
#include <string>
#include <random>
#include <vector>
#include <utility>
#include <cstddef>
#include <cfloat>
#include <chrono>
namespace {
struct Metadata {
int desiredBlockCount; // minimum number of blocks to create
int threadCountPerBlock; // threads per block
int pointCount; // number of points for the traveling salesman to visit
int randomSeed; // random seed used to generate the distance matrix
int threadCount; // actual number of generated threads
int prefixSize; // amount of static points per thread, that don't get permutated
};
struct DevicePointers {
float* distanceMatrix = 0;
// memory block containing the best result permutations found by each thread (device)
int* allThreadsBestResultPermutations = 0;
// array containing the best distance found by each thread (device)
float* allThreadsBestResults = 0;
// memory block containing the current permutation state threads are operating on
int* allThreadsPermutations = 0;
// array containing the temp state while executing Heap's algorithm
int* allThreadsC = 0;
};
Metadata parseArgs(int argc, char** argv) {
int desiredBlockCount;
int threadCountPerBlock;
int pointCount;
int randomSeed;
int threadCount;
int prefixSize;
// validate user input
if (argc != 5) {
throw std::invalid_argument("Error: Incorrect number of arguments");
}
try {
desiredBlockCount = std::stoi(argv[1]);
if (desiredBlockCount < 1) {
throw std::invalid_argument("Error: Please set block count to a value greater than 0");
}
threadCountPerBlock = std::stoi(argv[2]);
if (threadCountPerBlock < 1 || threadCountPerBlock > 1024) {
throw std::invalid_argument("Error: Please set thread count per block to a value between 1 and 1024");
}
pointCount = std::stoi(argv[3]);
if (pointCount < 2) {
throw std::invalid_argument("Error: Please set point count to a value greater than 1");
}
randomSeed = std::stoi(argv[4]);
}
catch (const std::out_of_range& e) {
std::cout << e.what() << std::endl;
throw std::invalid_argument("Error: A parameter value was out of range");
}
// calculate thread count and prefix size
([&] {
int desiredThreadCount = desiredBlockCount * threadCountPerBlock;
threadCount = 1;
prefixSize = 0;
for (int i = pointCount; (i > 0 && threadCount < desiredThreadCount); i--) {
threadCount *= i;
prefixSize++;
}
})();
Metadata m{
desiredBlockCount,
threadCountPerBlock,
pointCount,
randomSeed,
threadCount,
prefixSize
};
return m;
}
std::vector<std::vector<float>> createDistanceMatrix(const Metadata& metadata) {
std::default_random_engine generator(metadata.randomSeed);
std::uniform_real_distribution<float> distribution(0.0f, 1000.0f);
std::vector<std::vector<float>> matrix;
matrix.reserve(metadata.pointCount);
for (int i = 0; i < metadata.pointCount; i++) {
std::vector<float> row;
row.reserve(metadata.pointCount);
for (int i = 0; i < metadata.pointCount; i++) {
row.push_back(distribution(generator));
}
matrix.push_back(std::move(row));
}
return matrix;
}
void printDistanceMatrix(const std::vector<std::vector<float>>& matrix) {
std::cout << "Distance Matrix:" << std::endl;
std::cout << "------" << "\t";
for (int i = 0; i < matrix.size(); i++) {
std::cout << i << ":\t";
}
std::cout << std::endl;
int counter = 0;
for (auto& row : matrix) {
std::cout << counter++ << ":\t";
for (auto& val : row) {
std::cout << val << "\t";
}
std::cout << std::endl;
}
}
DevicePointers allocateDeviceMemory(const Metadata& metadata) {
cudaError_t cudaStatus;
DevicePointers d;
// since metadata.threadCount is probably not always divisible by metadata.threadCountPerBlock,
// some "overhead" memory needs to be allocated to not cause segfaults when coalescing memory
int actualThreadCount = ([&] {
int result = metadata.threadCount / metadata.threadCountPerBlock;
result += metadata.threadCount % metadata.threadCountPerBlock == 0 ? 0 : 1;
result *= metadata.threadCountPerBlock;
return result;
})();
std::size_t size = metadata.pointCount * metadata.pointCount * sizeof(float);
cudaStatus = cudaMalloc(&d.distanceMatrix, size);
if (cudaStatus != cudaSuccess) {
throw std::runtime_error("Error allocating memory!");
}
size = actualThreadCount * metadata.pointCount * sizeof(int);
cudaStatus = cudaMalloc(&d.allThreadsBestResultPermutations, size);
if (cudaStatus != cudaSuccess) {
throw std::runtime_error("Error allocating memory!");
}
size = actualThreadCount * sizeof(float);
cudaStatus = cudaMalloc(&d.allThreadsBestResults, size);
if (cudaStatus != cudaSuccess) {
throw std::runtime_error("Error allocating memory!");
}
size = actualThreadCount * metadata.pointCount * sizeof(int);
cudaStatus = cudaMalloc(&d.allThreadsPermutations, size);
if (cudaStatus != cudaSuccess) {
throw std::runtime_error("Error allocating memory!");
}
size = actualThreadCount * (metadata.pointCount - metadata.prefixSize) * sizeof(int);
cudaStatus = cudaMalloc(&d.allThreadsC, size);
if (cudaStatus != cudaSuccess) {
throw std::runtime_error("Error allocating memory!");
}
return d;
}
void freeDeviceMemory(DevicePointers& d) {
cudaFree(d.distanceMatrix);
cudaFree(d.allThreadsBestResultPermutations);
cudaFree(d.allThreadsBestResults);
cudaFree(d.allThreadsPermutations);
cudaFree(d.allThreadsC);
d.distanceMatrix = 0;
d.allThreadsBestResultPermutations = 0;
d.allThreadsBestResults = 0;
d.allThreadsPermutations = 0;
d.allThreadsC = 0;
}
void copyDataHostToDevice(const DevicePointers& d, const std::vector<std::vector<float>>& distanceMatrix) {
float* currentPointer = d.distanceMatrix;
cudaError_t cudaStatus;
for (auto& row : distanceMatrix) {
std::size_t count = row.size() * sizeof(float);
cudaStatus = cudaMemcpy(currentPointer, row.data(), count, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
throw std::runtime_error("Error copying distance matrix onto device!");
}
currentPointer += row.size();
}
}
__device__ float calculatePermutationTotalDistance(float* distanceMatrix, int* permutation, int pointCount) {
float distance = distanceMatrix[permutation[blockDim.x * (pointCount - 1)] * pointCount + permutation[0]];
for (int i = 0; i < pointCount - 1; i++) {
distance += distanceMatrix[permutation[blockDim.x * i] * pointCount + permutation[blockDim.x * (i + 1)]];
}
return distance;
}
__device__ void swap(int& a, int& b) {
int tmp = a;
a = b;
b = tmp;
}
__device__ void copyPermutationToBestArray(int* dest, int* src, int size) {
for (int i = 0; i < size; i++) {
dest[i] = src[blockDim.x * i];
}
}
__device__ void prepareSharedDistanceMatrix(const int id, const int pointCount, const float* global, float* shared) {
int size = pointCount * pointCount;
for (int i = threadIdx.x; i < size; i += blockDim.x) {
shared[i] = global[i];
}
}
__global__ void calculateBestRoute(const int threadCount, const int prefixSize, const int pointCount, const DevicePointers d) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (threadCount <= id) {
return;
}
// initialize permutation
int* permutation = d.allThreadsPermutations + blockIdx.x * blockDim.x * pointCount + threadIdx.x;
for (int i = 0; i < pointCount; i++) {
permutation[blockDim.x * i] = i;
}
int quotient = id;
for (int i = prefixSize - 1; i >= 0; i--) {
int factoradic = quotient % (pointCount - i);
quotient /= pointCount - i;
swap(permutation[blockDim.x * i], permutation[blockDim.x * (factoradic + i)]);
}
// prepare pointers
int* permutable = permutation + blockDim.x * prefixSize;
int permutableSize = pointCount - prefixSize;
int* bestResultPermutations = d.allThreadsBestResultPermutations + id * pointCount;
extern __shared__ float sharedDistanceMatrix[];
prepareSharedDistanceMatrix(id, pointCount, d.distanceMatrix, sharedDistanceMatrix);
__syncthreads();
// initial solution is initial best
copyPermutationToBestArray(bestResultPermutations, permutation, pointCount);
float bestResult = calculatePermutationTotalDistance(sharedDistanceMatrix, permutation, pointCount);
if (permutableSize != 0) {
int* c = d.allThreadsC + blockIdx.x * blockDim.x * permutableSize + threadIdx.x;
for (int i = 0; i < permutableSize; i++) {
c[blockDim.x * i] = 0;
}
int i = 0;
while (i < permutableSize) {
if (c[blockDim.x * i] < i) {
if (i % 2 == 0) {
swap(permutable[0], permutable[blockDim.x * i]);
}
else {
swap(permutable[blockDim.x * c[blockDim.x * i]], permutable[blockDim.x * i]);
}
float tempResult = calculatePermutationTotalDistance(sharedDistanceMatrix, permutation, pointCount);
if (tempResult < bestResult) {
copyPermutationToBestArray(bestResultPermutations, permutation, pointCount);
bestResult = tempResult;
}
c[blockDim.x * i]++;
i = 0;
}
else {
c[blockDim.x * i] = 0;
i++;
}
}
}
d.allThreadsBestResults[id] = bestResult;
}
void executeThreads(const Metadata& metadata, const DevicePointers& d) {
int blockCount = ([&] {
int result = metadata.threadCount / metadata.threadCountPerBlock;
result += metadata.threadCount % metadata.threadCountPerBlock == 0 ? 0 : 1;
return result;
})();
std::size_t sharedMemorySize = metadata.pointCount * metadata.pointCount * sizeof(float);
calculateBestRoute<<<blockCount, metadata.threadCountPerBlock, sharedMemorySize>>>(metadata.threadCount, metadata.prefixSize, metadata.pointCount, d);
}
void copyDataDeviceToHostAndEvaluate(const Metadata& metadata, const DevicePointers& d) {
// copy data
cudaError_t cudaStatus;
std::vector<std::vector<int>> allThreadsBestResultPermutations = ([&] {
std::vector<std::vector<int>> result;
result.reserve(metadata.threadCount);
int* tmp = new int[metadata.threadCount * metadata.pointCount];
std::size_t size = metadata.threadCount * metadata.pointCount * sizeof(int);
cudaStatus = cudaMemcpy(tmp, d.allThreadsBestResultPermutations, size, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
throw std::runtime_error("Error copying data from device to host");
}
int* intervalStart = tmp;
int* intervalEnd = tmp + metadata.pointCount;
for (int i = 0; i < metadata.threadCount; i++) {
result.push_back(std::vector<int>(intervalStart, intervalEnd));
intervalStart += metadata.pointCount;
intervalEnd += metadata.pointCount;
}
delete[] tmp;
return result;
})();
std::vector<float> allThreadsBestResults = ([&] {
float* tmp = new float[metadata.threadCount];
std::size_t size = metadata.threadCount * sizeof(float);
cudaStatus = cudaMemcpy(tmp, d.allThreadsBestResults, size, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
throw std::runtime_error("Error copying data from device to host");
}
std::vector<float> result(tmp, tmp + metadata.threadCount);
delete[] tmp;
return result;
})();
// evaluate
float bestResult = FLT_MAX;
std::vector<int>* bestResultPermutation = 0;
for (int i = 0; i < allThreadsBestResults.size(); i++) {
if (allThreadsBestResults[i] < bestResult) {
bestResult = allThreadsBestResults[i];
bestResultPermutation = &allThreadsBestResultPermutations[i];
}
}
std::cout << "Best Route:" << std::endl;
for (int i = 0; i < bestResultPermutation->size(); i++) {
std::cout << bestResultPermutation->at(i) << " -> ";
}
std::cout << bestResultPermutation->at(0) << std::endl;
std::cout << "Route Length: " << bestResult << std::endl;
}
}
int main(int argc, char** argv) {
DevicePointers devicePointers;
try {
std::chrono::steady_clock::time_point t_start = std::chrono::steady_clock::now();
const Metadata METADATA = parseArgs(argc, argv);
std::vector<std::vector<float>> distanceMatrix = createDistanceMatrix(METADATA);
printDistanceMatrix(distanceMatrix);
devicePointers = allocateDeviceMemory(METADATA);
copyDataHostToDevice(devicePointers, distanceMatrix);
std::chrono::steady_clock::time_point t_threadStart = std::chrono::steady_clock::now();
executeThreads(METADATA, devicePointers);
cudaDeviceSynchronize();
std::chrono::steady_clock::time_point t_threadEnd = std::chrono::steady_clock::now();
copyDataDeviceToHostAndEvaluate(METADATA, devicePointers);
freeDeviceMemory(devicePointers);
cudaDeviceReset();
std::chrono::steady_clock::time_point t_end = std::chrono::steady_clock::now();
std::chrono::duration<double> totalExecutionTime = std::chrono::duration_cast<std::chrono::duration<double>>(t_end - t_start);
std::chrono::duration<double> threadExecutionTime = std::chrono::duration_cast<std::chrono::duration<double>>(t_threadEnd - t_threadStart);
std::cout << "Total execution time (s): " << totalExecutionTime.count() << std::endl;
std::cout << "Thread execution time (s): " << threadExecutionTime.count() << std::endl;
}
catch (const std::invalid_argument& e) {
std::cerr << e.what() << std::endl;
std::cerr << "Usage: " << argv[0] << "<desired block count (int)> <thread count per block (int)>" <<
" <point count (int)> <random seed (int)>" << std::endl;
std::cerr << "desired block count = Minimum number of blocks to create" << std::endl;
std::cerr << "thread count per block = Number of threads for each block" << std::endl;
std::cerr << "point count = Number of points to be generated for the traveling salesman problem" << std::endl;
std::cerr << "random seed = random seed used to generate the distances between points" << std::endl;
return 1;
}
catch (const std::runtime_error& e) {
std::cerr << e.what() << std::endl;
freeDeviceMemory(devicePointers);
return 1;
}
} |
573523b6927477d63e2d17e22a6641fc91e0c2e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "N3LDG_cuda.h"
#include <array>
#include <boost/format.hpp>
#include <cstdlib>
#include <cstddef>
#include <vector>
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <rocblas.h>
#include "Printf_cuda.cuh"
#include "Printf_cuda.cu"
#include "Memory_cuda.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "cnmem.h"
#include <string>
#include <utility>
#include <cstring>
#include <cstdint>
#include <chrono>
#include <thread>
#include <numeric>
#include <memory>
#include "profiler.h"
#include "Memory_cuda.h"
#include "MyTensor-def.h"
namespace n3ldg_cuda {
using namespace std;
using boost::format;
#if USE_FLOAT
#define cuda_sqrt(x) sqrtf(x)
#define cuda_pow(x, y) powf(x, y)
#define cuda_tanh(x) tanhf(x)
#define cuda_exp(x) __expf(x)
#define cuda_log(x) logf(x)
#else
#define cuda_sqrt(x) sqrt(x)
#define cuda_pow(x, y) pow(x, y)
#define cuda_tanh(x) tanh(x)
#define cuda_exp(x) exp(x)
#define cuda_log(x) log(x)
#endif
#define KERNEL_LOG
#ifdef KERNEL_LOG
#define KernelPrintLine(format, ...)\
{\
cuPrintf("block:x=%d,y=%d thread:x=%d,y=%d "#format"\n", blockIdx.x,\
blockIdx.y, threadIdx.x, threadIdx.y,__VA_ARGS__);\
}
#else
#define KernelPrintLine(format, ...)
#endif
constexpr int TPB = 1024;
constexpr int BLOCK_COUNT = 56;
void CallCuda(hipError_t status) {
if (status != hipSuccess) {
cerr << "cuda error:" << hipGetErrorString(status) << endl;
abort();
}
}
void CheckCudaError() {
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
std::cerr << "cuda error:" << hipGetErrorName(error) << std::endl;
std::cerr << "cuda error:" << hipGetErrorString(error) << std::endl;
abort();
}
}
void CallCnmem(cnmemStatus_t status) {
assert(status == CNMEM_STATUS_SUCCESS);
}
void CallCublas(hipblasStatus_t status) {
assert(status == HIPBLAS_STATUS_SUCCESS);
}
void CallCurand(hiprandStatus_t status) {
assert(status == HIPRAND_STATUS_SUCCESS);
}
hipblasHandle_t& GetCublasHandle() {
static hipblasHandle_t handle;
static bool init;
if (!init) {
init = true;
CallCublas(hipblasCreate(&handle));
}
return handle;
}
hipError_t MyCudaMemcpy(void *dest, const void *src, size_t count,
hipMemcpyKind kind) {
hipError_t e;
e = hipMemcpyAsync(dest, src, count, kind);
CallCuda(e);
return e;
}
void NumberPointerArray::init(dtype **host_arr, int len) {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype*)));
CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype*),
hipMemcpyHostToDevice));
this->len = len;
}
NumberPointerArray::~NumberPointerArray() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
int NextTwoIntegerPowerNumber(int number) {
int result = 1;
while (number > result) {
result <<= 1;
}
return result;
}
void NumberPointerPointerArray::init(dtype ***host_arr, int len) {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype**)));
CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype*),
hipMemcpyHostToDevice));
this->len = len;
}
NumberPointerPointerArray::~NumberPointerPointerArray() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void NumberArray::init(int len) {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype)));
this->len = len;
}
void NumberArray::init(dtype *host_arr, int len) {
init(len);
CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype),
hipMemcpyHostToDevice));
}
NumberArray::~NumberArray() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void DeviceInt::init() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, sizeof(int)));
}
void DeviceInt::copyFromDeviceToHost() {
CallCuda(MyCudaMemcpy(&v, value, sizeof(int), hipMemcpyDeviceToHost));
}
void DeviceInt::copyFromHostToDevice() {
CallCuda(MyCudaMemcpy(value, &v, sizeof(int), hipMemcpyHostToDevice));
}
DeviceInt::~DeviceInt() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void DeviceNumber::init() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, sizeof(int)));
}
void DeviceNumber::copyFromDeviceToHost() {
CallCuda(MyCudaMemcpy(&v, value, sizeof(dtype), hipMemcpyDeviceToHost));
}
DeviceNumber::~DeviceNumber() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void IntPointerArray::init(int **host_arr, int len) {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int*)));
CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(int*),
hipMemcpyHostToDevice));
this->len = len;
}
IntPointerArray::~IntPointerArray() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void IntArray::init(int *host_arr, int len) {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int)));
CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(int),
hipMemcpyHostToDevice));
this->len = len;
}
void IntArray::init(int len) {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int)));
this->len = len;
}
IntArray::~IntArray() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void BoolArray::init(bool *host_arr, int len) {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(bool)));
CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(bool),
hipMemcpyHostToDevice));
this->len = len;
}
void BoolArray::copyFromHost(bool *host_arr) {
CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(bool),
hipMemcpyHostToDevice));
}
void BoolArray::copyToHost(bool *host_arr) {
CallCuda(MyCudaMemcpy(host_arr, value, len * sizeof(bool),
hipMemcpyDeviceToHost));
}
BoolArray::~BoolArray() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void Tensor1D::init(int dim) {
initOnDevice(dim);
#if TEST_CUDA
v = new dtype[dim];
zero();
#endif
}
void Tensor1D::initOnMemoryAndDevice(int dim) {
initOnDevice(dim);
v = new dtype[dim];
zero();
}
void Tensor1D::initOnDevice(int dim) {
CallCuda(MemoryPool::Ins().Malloc((void**)&value, dim * sizeof(dtype)));
this->dim = dim;
}
Tensor1D::Tensor1D(const Tensor1D &t) {
dim = t.dim;
memcpy(v, t.v, dim *sizeof(dtype));
CallCuda(MyCudaMemcpy(value, t.value, dim * sizeof(dtype), hipMemcpyDeviceToDevice));
}
Tensor1D::~Tensor1D() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void Tensor1D::print() const {
cout << "dim:" << dim << endl;
PrintNums(value, dim);
}
void Tensor1D::copyFromHostToDevice() {
assert(v != NULL);
assert(value != NULL);
CallCuda(MyCudaMemcpy(value, v, dim * sizeof(dtype), hipMemcpyHostToDevice));
}
void Tensor1D::copyFromDeviceToHost() {
CallCuda(MyCudaMemcpy(v, value, dim * sizeof(dtype), hipMemcpyDeviceToHost));
}
void Tensor2D::initOnMemoryAndDevice(int row, int col) {
initOnDevice(row, col);
v = new dtype[row * col];
zero();
}
void Tensor2D::init(int row, int col) {
initOnDevice(row, col);
#if TEST_CUDA
v = new dtype[row * col];
zero();
#endif
}
void Tensor2D::initOnDevice(int row, int col) {
CallCuda(MemoryPool::Ins().Malloc((void**)&value,
row * col * sizeof(dtype)));
this->row = row;
this->col = col;
this->size = row * col;
}
Tensor2D::Tensor2D(const Tensor2D &t) {
row = t.row;
col = t.col;
memcpy(v, t.v, sizeof(dtype) * row * col);
CallCuda(MyCudaMemcpy(value, t.value, sizeof(dtype) * row * col,
hipMemcpyDeviceToDevice));
}
Tensor2D::~Tensor2D() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void Tensor2D::print() const {
cout << "row:" << row << " col:" << col << endl;
PrintNums(value, size);
}
void Tensor2D::copyFromHostToDevice() {
CallCuda(MyCudaMemcpy(value, v, size * sizeof(dtype), hipMemcpyHostToDevice));
}
void Tensor2D::copyFromDeviceToHost() {
CallCuda(MyCudaMemcpy(v, value, size * sizeof(dtype), hipMemcpyDeviceToHost));
}
void Assert(bool v, const std::string &message, const function<void(void)> &call) {
#if TEST_CUDA
if (!v) {
std::cerr << message << std::endl;
call();
abort();
}
#endif
}
__device__ void DeviceAtomicAdd(dtype* address, dtype value) {
float old = value;
float new_old;
do {
new_old = atomicExch(address, 0.0);
new_old += old;
} while ((old = atomicExch(address, new_old))!=0.0);
};
__device__ dtype cuda_dtanh(dtype y) {
return 1.0f - y * y;
}
__device__ dtype cuda_sigmoid(dtype x) {
return 1.0f / (1.0f + cuda_exp(-x));
}
__device__ dtype cuda_dsigmoid(dtype y) {
return y * (1.0f - y);
}
__device__ dtype cuda_relu(dtype x) {
return x > 0.0f ? x : 0.0f;
}
__device__ dtype cuda_drelu(dtype x) {
return x > 0.0f ? 1 : 0.0f;
}
__device__ dtype cuda_leaky_relu(dtype x) {
return x > 0.0f ? x : -0.1f * x;
}
__device__ dtype cuda_dleaky_relu(dtype x) {
return x > 0.0f ? 1.0f : -0.1f;
}
const dtype SELU_LAMBDA = 1.0507009873554804934193349852946;
const dtype SELU_ALPHA = 1.6732632423543772848170429916717;
__device__ dtype cuda_selu(dtype x) {
return x <= 0.0f ? SELU_LAMBDA * SELU_ALPHA * (cuda_exp(x) - 1.0f) :
SELU_LAMBDA * x;
}
__device__ dtype cuda_dselu(dtype x, dtype y) {
return x <= 0.0f ? SELU_LAMBDA * SELU_ALPHA + y : SELU_LAMBDA;
}
void Random(dtype *v, int len, dtype bound) {
dtype *mem = (dtype*)malloc(len * sizeof(dtype));
assert(mem != NULL);
dtype min = -bound, max = bound;
for (int i = 0; i < len; i++) {
mem[i] = (dtype(rand()) / RAND_MAX) * (max - min) + min;
}
CallCuda(MyCudaMemcpy(v, mem, len * sizeof(dtype), hipMemcpyHostToDevice));
free(mem);
}
__device__ int DeviceDefaultIndex() {
return blockIdx.x * blockDim.x + threadIdx.x;
}
__device__ int DeviceDefaultStep() {
return gridDim.x * blockDim.x;
}
__device__ dtype DeviceAbs(dtype d) {
return d > 0 ? d : -d;
}
int DefaultBlockCount(int len) {
int block_count = (len - 1 + TPB) /
TPB;
return ::min(block_count, BLOCK_COUNT);
}
int DefaultBlockCountWithoutLimit(int len) {
return (len - 1 + TPB) / TPB;
}
__global__ void KernelZero(dtype *v, int len) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= len) {
return;
}
v[index] = 0;
}
void Zero(dtype *v, int len) {
int block_count = (len - 1 + TPB) /
TPB;
hipLaunchKernelGGL(( KernelZero), dim3(block_count), dim3(TPB), 0, 0, v, len);
CheckCudaError();
}
__global__ void PrintPointers(void **p, int len) {
for (int i = 0; i < len; ++i) {
printf("%p\n", p[i]);
}
}
__global__ void KernelPrintNums(const dtype* p, int len) {
for (int i = 0; i < len; ++i) {
printf("%d %f\n", i, p[i]);
}
}
void PrintNums(const dtype* p, int len) {
hipLaunchKernelGGL(( KernelPrintNums), dim3(1), dim3(1), 0, 0, p, len);
hipDeviceSynchronize();
CheckCudaError();
}
__global__ void KernelPrintNums(const dtype *const *p, int index, int len) {
for (int i = 0; i < len; ++i) {
printf("%d %f\n", i, p[index][i]);
}
}
void PrintNums(const dtype *const *p, int count_i, int len) {
hipLaunchKernelGGL(( KernelPrintNums), dim3(1), dim3(1), 0, 0, p, count_i, len);
hipDeviceSynchronize();
CheckCudaError();
}
__global__ void KernelPrintInts(const int* p, int len) {
for (int i = 0; i < len; ++i) {
printf("%d\n", p[i]);
}
}
void PrintInts(const int* p, int len) {
hipLaunchKernelGGL(( KernelPrintInts), dim3(1), dim3(1), 0, 0, p, len);
hipDeviceSynchronize();
CheckCudaError();
}
void InitCuda(int device_id, float memory_in_gb) {
std::cout << "device_id:" << device_id << std::endl;
CallCuda(hipSetDeviceFlags(hipDeviceMapHost));
#if DEVICE_MEMORY == 0
cnmemDevice_t device;
device.size = 10000000000;
device.device = device_id;
cnmemInit(1, &device, CNMEM_FLAGS_DEFAULT);
#else
CallCuda(hipSetDevice(device_id));
#endif
CallCuda(hipDeviceSetCacheConfig(hipFuncCachePreferL1));
CallCuda(cudaPrintfInit());
MemoryPool::Ins().Init(memory_in_gb);
}
void EndCuda() {
cudaPrintfEnd();
Profiler::Ins().Print();
}
__global__ void KernelCopyFromOneVectorToMultiVectors(const dtype *src,
dtype **dest, int count, int len) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * len; i += step) {
int count_i = i / len;
int len_i = i % len;
dest[count_i][len_i] = src[i];
}
}
void CopyFromOneVectorToMultiVals(const dtype *src, std::vector<dtype*> &vals,
int count,
int len) {
NumberPointerArray val_arr;
val_arr.init((dtype**)vals.data(), vals.size());
int block_count = (len * count - 1 + TPB) / TPB;
block_count = ::min(block_count, BLOCK_COUNT);
hipLaunchKernelGGL(( KernelCopyFromOneVectorToMultiVectors), dim3(block_count), dim3(TPB), 0, 0, src,
val_arr.value, count, len);
CheckCudaError();
}
void CopyFromHostToDevice(const std::vector<dtype*> &src,
std::vector<dtype*> &dest, int count, int dim) {
dtype *long_src = (dtype*)malloc(count * dim * sizeof(dtype));
if (long_src == NULL) {
std::cerr << "out of memory!" << std::endl;
abort();
}
for (int i = 0; i < count; ++i) {
memcpy(long_src + i * dim, src.at(i), dim * sizeof(dtype));
}
dtype *long_dest = NULL;
CallCuda(MemoryPool::Ins().Malloc((void**)&long_dest,
count * dim * sizeof(dtype*)));
CallCuda(hipMemcpy(long_dest, long_src, count * dim * sizeof(dtype*),
hipMemcpyHostToDevice));
CopyFromOneVectorToMultiVals(long_dest, dest, count, dim);
free(long_src);
CallCuda(MemoryPool::Ins().Free(long_dest));
}
__global__ void KernelCopyFromMultiVectorsToOneVector(const dtype **src, dtype *dest, int count,
int len) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * len; i += step) {
int count_i = i / len;
int len_i = i % len;
dest[i] = src[count_i][len_i];
}
}
void CopyFromMultiVectorsToOneVector(const std::vector<dtype*> &src,
dtype *dest,
int count,
int len) {
NumberPointerArray src_arr;
src_arr.init((dtype**)src.data(), src.size());
int block_count = DefaultBlockCount(len * count);
hipLaunchKernelGGL(( KernelCopyFromMultiVectorsToOneVector), dim3(block_count), dim3(TPB), 0, 0,
(const dtype**)src_arr.value, dest, count, len);
CheckCudaError();
}
void CopyFromDeviceToHost(const std::vector<dtype*> &src,
std::vector<dtype*> &dest, int count, int dim) {
dtype *long_src = NULL;
CallCuda(MemoryPool::Ins().Malloc((void**)&long_src,
count * dim * sizeof(dtype*)));
CopyFromMultiVectorsToOneVector(src, long_src, count, dim);
dtype *long_dest = (dtype*)malloc(count * dim * sizeof(dtype));
if (long_dest == NULL) {
std::cerr << "out of memory!" << std::endl;
abort();
}
CallCuda(hipMemcpy(long_dest, long_src, count * dim * sizeof(dtype),
hipMemcpyDeviceToHost));
for (int i = 0; i < count; ++i) {
memcpy(dest.at(i), long_dest + i * dim, dim * sizeof(dtype));
}
CallCuda(MemoryPool::Ins().Free(long_src));
free(long_dest);
}
__global__ void KernelActivated(ActivatedEnum activated, const dtype *src,
dtype**dest,
dtype* dest2,
int count,
int len) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for (int i = index; i < len * count; i += step) {
int count_i = i / len;
int len_i = i % len;
dtype result;
if (activated == ActivatedEnum::TANH) {
result = cuda_tanh(src[i]);
} else if (activated == ActivatedEnum::SIGMOID) {
result = cuda_sigmoid(src[i]);
} else if (activated == ActivatedEnum::RELU) {
result = cuda_relu(src[i]);
} else if (activated == ActivatedEnum::LEAKY_RELU) {
result = cuda_leaky_relu(src[i]);
} else if (activated == ActivatedEnum::SELU) {
result = cuda_selu(src[i]);
} else {
printf("KernelActivated error\n");
return;
}
dest[count_i][len_i] = result;
dest2[i] = result;
}
}
void Activated(ActivatedEnum activated, const dtype *src,
const std::vector<dtype*>& dest,
dtype *dest2,
int len) {
int count = dest.size();
NumberPointerArray dest_arr;
dest_arr.init((dtype**)dest.data(), dest.size());
int block_count = ::min((len * count - 1 + TPB) / TPB, BLOCK_COUNT);
hipLaunchKernelGGL(( KernelActivated), dim3(block_count), dim3(TPB), 0, 0, activated, src, dest_arr.value, dest2, count, len);
CheckCudaError();
}
__global__ void KernelTanhForward(ActivatedEnum activated, const dtype** xs,
int count,
int dim,
dtype**ys) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
if (activated == ActivatedEnum::TANH) {
ys[count_i][dim_i] = cuda_tanh(xs[count_i][dim_i]);
} else if (activated == ActivatedEnum::SIGMOID) {
ys[count_i][dim_i] = cuda_sigmoid(xs[count_i][dim_i]);
} else {
printf("error\n");
}
}
}
void TanhForward(ActivatedEnum activated, const std::vector<dtype*> &xs,
int count,
int dim,
std::vector<dtype*> &ys) {
NumberPointerArray x_arr, y_arr;
x_arr.init((dtype**)xs.data(), xs.size());
y_arr.init((dtype**)ys.data(), ys.size());
int block_count = DefaultBlockCount(count * dim);
hipLaunchKernelGGL(( KernelTanhForward), dim3(block_count), dim3(TPB), 0, 0, activated,
(const dtype**)x_arr.value, count, dim, y_arr.value);
CheckCudaError();
}
__global__ void KernelTanhBackward(ActivatedEnum activated,
const dtype **losses,
const dtype **vals,
int count,
int dim,
dtype** in_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
dtype v;
if (activated == ActivatedEnum::TANH) {
v = losses[count_i][dim_i] * (1 - vals[count_i][dim_i] *
vals[count_i][dim_i]);
} else if (activated == ActivatedEnum::SIGMOID) {
v = losses[count_i][dim_i] * (1 - vals[count_i][dim_i]) *
vals[count_i][dim_i];
}
DeviceAtomicAdd(in_losses[count_i] + dim_i, v);
}
}
void TanhBackward(ActivatedEnum activated, const std::vector<dtype*> &losses,
const std::vector<dtype*> &vals,
int count,
int dim,
std::vector<dtype*> &in_losses) {
NumberPointerArray loss_arr, val_arr, in_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
val_arr.init((dtype**)vals.data(), vals.size());
in_loss_arr.init((dtype**)in_losses.data(), in_losses.size());
int block_count = DefaultBlockCount(count * dim);
hipLaunchKernelGGL(( KernelTanhBackward), dim3(block_count), dim3(TPB), 0, 0, activated ,(const dtype**)loss_arr.value,
(const dtype**)val_arr.value, count, dim, in_loss_arr.value);
CheckCudaError();
}
__global__ void KernelDropoutForward(const dtype** xs, int count, int dim,
bool is_training,
const dtype* drop_mask,
dtype drop_factor,
dtype**ys) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
if (is_training) {
if (drop_mask[i] < drop_factor) {
ys[count_i][dim_i] = 0.0f;
} else {
ys[count_i][dim_i] = xs[count_i][dim_i];
}
} else {
ys[count_i][dim_i] = (1 - drop_factor) * xs[count_i][dim_i];
}
}
}
void DropoutForward(const std::vector<dtype*> &xs, int count, int dim,
bool is_training,
const dtype *drop_mask,
dtype drop_factor,
std::vector<dtype*> &ys) {
if (drop_factor < 0 || drop_factor >= 1.0f) {
std::cerr << "drop value is " << drop_factor << std::endl;
abort();
}
NumberPointerArray x_arr, y_arr;
x_arr.init((dtype**)xs.data(), xs.size());
y_arr.init((dtype**)ys.data(), ys.size());
int block_count = DefaultBlockCount(count * dim);
hipLaunchKernelGGL(( KernelDropoutForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)x_arr.value,
count, dim, is_training, drop_mask, drop_factor, y_arr.value);
CheckCudaError();
}
__global__ void KernelDropoutBackward(const dtype **losses, const dtype **vals,
int count,
int dim,
bool is_training,
const dtype* drop_mask,
dtype drop_factor,
dtype** in_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
if (is_training) {
if (drop_mask[i] >= drop_factor) {
DeviceAtomicAdd(in_losses[count_i] + dim_i, losses[count_i][dim_i]);
}
} else {
DeviceAtomicAdd(in_losses[count_i] + dim_i,
(1 - drop_factor) * losses[count_i][dim_i]);
}
}
}
void DropoutBackward(const std::vector<dtype*> &losses,
const std::vector<dtype*> &vals,
int count,
int dim,
bool is_training,
const dtype *drop_mask,
dtype drop_factor,
std::vector<dtype*> &in_losses) {
if (drop_factor < 0 || drop_factor >= 1) {
std::cerr << "drop value is " << drop_factor << std::endl;
abort();
}
NumberPointerArray loss_arr, val_arr, in_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
val_arr.init((dtype**)vals.data(), vals.size());
in_loss_arr.init((dtype**)in_losses.data(), in_losses.size());
int block_count = DefaultBlockCount(count * dim);
hipLaunchKernelGGL(( KernelDropoutBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)loss_arr.value,
(const dtype**)val_arr.value, count, dim, is_training, drop_mask, drop_factor,
in_loss_arr.value);
CheckCudaError();
}
__global__ void KernelBucketForward(const dtype *input, int count, int dim, dtype **ys) {
int index = DeviceDefaultIndex();
for (int i = index; i < count * dim; i+= DeviceDefaultStep()) {
int count_i = i / dim;
int dim_i = i % dim;
ys[count_i][dim_i] = input[count_i * dim + dim_i];
}
}
void BucketForward(const std::vector<dtype> input, int count, int dim, std::vector<dtype*> &ys) {
NumberArray input_arr;
NumberPointerArray ys_arr;
input_arr.init((dtype*)input.data(), input.size());
ys_arr.init((dtype**)ys.data(), ys.size());
int block_count = DefaultBlockCount(count * dim);
hipLaunchKernelGGL(( KernelBucketForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype*)input_arr.value, count, dim,
ys_arr.value);
CheckCudaError();
}
__global__ void KernelCopyForUniNodeForward(const dtype** xs, const dtype* b,
dtype* xs_dest,
dtype* b_dest,
int count,
int x_len,
int b_len,
bool use_b) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int step = gridDim.x * blockDim.x;
int x_total_len = count * x_len;
int b_total_len = count * b_len;
for (int i = index; i < x_total_len + b_total_len; i += step) {
if (i < x_total_len) {
int count_i = i / x_len;
int len_i = i % x_len;
xs_dest[i] = xs[count_i][len_i];
} else if (use_b) {
int b_i = i - x_total_len;
int len_i = b_i % b_len;
b_dest[b_i] = b[len_i];
}
}
}
void CopyForUniNodeForward(const std::vector<dtype*> &xs, const dtype* b,
dtype* xs_dest,
dtype* b_dest,
int count,
int x_len,
int b_len,
bool use_b) {
NumberPointerArray x_arr;
x_arr.init((dtype**)xs.data(), xs.size());
int len = x_len + b_len;
int block_count = ::min((count * len - 1 + TPB) / TPB, 56);
hipLaunchKernelGGL(( KernelCopyForUniNodeForward), dim3(block_count), dim3(TPB), 0, 0,
(const dtype**)x_arr.value, (const dtype*)b, xs_dest, b_dest,
count, x_len, b_len, use_b);
CheckCudaError();
}
__global__ void KernelCopyForBiNodeForward(const dtype **x1s,
const dtype **x2s,
const dtype *b,
dtype *x1s_dest,
dtype *x2s_dest,
dtype *b_dest,
int count,
int x1_len,
int x2_len,
bool use_b,
int b_len) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
int x1_total_len = count * x1_len;
int x2_total_len = count * x2_len;
int b_total_len = use_b ? count * b_len : 0;
int total_len = x1_total_len + x2_total_len + b_total_len;
for (int i = index; i < total_len; i += step) {
if (i < x2_total_len) {
int len_i = i % x2_len;
int count_i = i / x2_len;
x2s_dest[i] = x2s[count_i][len_i];
} else if (i >= x2_total_len && i < x1_total_len + x2_total_len) {
int len_i = (i - x2_total_len) % x1_len;
int count_i = (i - x2_total_len) / x1_len;
x1s_dest[i - x2_total_len] = x1s[count_i][len_i];
} else {
int b_i = (i - x1_total_len - x2_total_len);
int len_i = b_i % b_len;
b_dest[b_i] = b[len_i];
}
}
}
void CopyForBiNodeForward(const std::vector<dtype*>& x1s,
const std::vector<dtype *>& x2s,
const dtype *b,
dtype *x1s_dest,
dtype *x2s_dest,
dtype *b_dest,
int count,
int x1_len,
int x2_len,
bool use_b,
int b_len) {
int len = x1_len + x2_len + b_len;
int block_count = DefaultBlockCount(count * len);
NumberPointerArray x1_arr, x2_arr;
x1_arr.init((dtype**)x1s.data(), x1s.size());
x2_arr.init((dtype**)x2s.data(), x2s.size());
hipLaunchKernelGGL(( KernelCopyForBiNodeForward), dim3(block_count), dim3(TPB), 0, 0,
(const dtype**)x1_arr.value,
(const dtype**)x2_arr.value,
b,
x1s_dest,
x2s_dest,
b_dest,
count,
x1_len,
x2_len,
use_b,
b_len);
CheckCudaError();
}
void MatrixMultiplyMatrix(dtype *W, dtype *x, dtype *y, int row, int col,
int count, bool useb, bool should_x_transpose,
bool should_W_transpose) {
hipblasHandle_t &handle = GetCublasHandle();
dtype alpha = 1;
dtype beta = useb? 1 : 0;
hipblasOperation_t x_op = should_x_transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N;
int ldx = should_x_transpose ? count : col;
hipblasOperation_t W_op = should_W_transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N;
int ldw = should_W_transpose ? col : row;
#if USE_FLOAT
CallCublas(hipblasSgemm(handle, W_op, x_op, row, count, col,
&alpha, W, ldw, x, ldx, &beta, y, row));
#else
CallCublas(hipblasDgemm(handle, W_op, x_op, row, count, col,
&alpha, W, ldw, x, ldx, &beta, y, row));
#endif
}
__global__ void KernelVerify(dtype *host, dtype *device, int len,
const char *message, bool *success) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < len; i += step) {
dtype loss = host[index] - device[index];
if (DeviceAbs(loss) > 0.001 && DeviceAbs(loss) > 0.001 * DeviceAbs(host[index])) {
*success = false;
KernelPrintLine("KernelVerify: host:%f device:%f loss:%f",
host[index],
device[index],
loss);
}
}
}
bool Verify(dtype *host, dtype *device, int len, const char* message) {
NumberArray arr;
arr.init(host, len);
int block_count = DefaultBlockCount(len);
char *m = NULL;
CallCuda(MemoryPool::Ins().Malloc((void**)&m,
(strlen(message) + 1) * sizeof(char)));
CallCuda(MyCudaMemcpy(m, message,
(strlen(message) + 1) * sizeof(char), hipMemcpyHostToDevice));
bool success = true;
bool *dev_success = NULL;
CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, 8 * sizeof(bool)));
CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( KernelVerify), dim3(block_count), dim3(TPB), 0, 0, arr.value, device, len, m, dev_success);
CheckCudaError();
CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool),
hipMemcpyDeviceToHost));
MemoryPool::Ins().Free(dev_success);
MemoryPool::Ins().Free(m);
hipDeviceSynchronize();
cudaPrintfDisplay(stdout, true);
if (!success) {
cout << message << endl;
}
return success;
}
__global__ void KernelVerify(bool *host, bool *device, int len,
const char *message, bool *success) {
int index = DeviceDefaultIndex();
if (index < len) {
if (host[index] != device[index]) {
*success = false;
printf("KernelVerify %s: host:%d device:%d \n", message,
host[index],
device[index]);
KernelPrintLine("KernelVerify: host:%d device:%d", host[index],
device[index]);
}
}
}
bool Verify(bool *host, bool *device, int len, const char* message) {
BoolArray arr;
arr.init(host, len);
int block_count = (len + TPB - 1) / TPB;
char *m = NULL;
CallCuda(MemoryPool::Ins().Malloc((void**)&m,
(strlen(message) + 1) * sizeof(char)));
CallCuda(MyCudaMemcpy(m, message,
(strlen(message) + 1) * sizeof(char), hipMemcpyHostToDevice));
bool success = true;
bool *dev_success = NULL;
CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, 8 * sizeof(bool)));
CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( KernelVerify), dim3(block_count), dim3(TPB), 0, 0, arr.value, device, len, m, dev_success);
CheckCudaError();
CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool),
hipMemcpyDeviceToHost));
MemoryPool::Ins().Free(dev_success);
MemoryPool::Ins().Free(m);
hipDeviceSynchronize();
cudaPrintfDisplay(stdout, true);
return success;
}
__global__ void KernelVerify(int *host, int *device, int len,
const char *message, bool *success) {
int index = DeviceDefaultIndex();
if (index < len) {
if (host[index] != device[index]) {
*success = false;
printf("KernelVerify %s: host:%d device:%d \n", message,
host[index],
device[index]);
KernelPrintLine("KernelVerify: host:%d device:%d", host[index],
device[index]);
}
}
}
bool Verify(int *host, int *device, int len, const char* message) {
IntArray arr;
arr.init(host, len);
int block_count = (len + TPB - 1) / TPB;
char *m = NULL;
CallCuda(MemoryPool::Ins().Malloc((void**)&m,
(strlen(message) + 1) * sizeof(char)));
CallCuda(MyCudaMemcpy(m, message,
(strlen(message) + 1) * sizeof(char), hipMemcpyHostToDevice));
bool success = true;
bool *dev_success = NULL;
CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, sizeof(bool)));
CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( KernelVerify), dim3(block_count), dim3(TPB), 0, 0, arr.value, device, len, m, dev_success);
CheckCudaError();
CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool),
hipMemcpyDeviceToHost));
MemoryPool::Ins().Free(dev_success);
MemoryPool::Ins().Free(m);
hipDeviceSynchronize();
cudaPrintfDisplay(stdout, true);
return success;
}
constexpr int MAX_BLOCK_POWER = 100;
MemoryPool& MemoryPool::Ins() {
static MemoryPool *p;
if (p == NULL) {
p = new MemoryPool;
p->free_blocks_.resize(MAX_BLOCK_POWER + 1);
p->busy_blocks_.reserve(10000);
}
return *p;
}
void appendFreeBlock(const MemoryBlock &memory_block,
vector<map<void*, MemoryBlock>> &free_blocks,
int i,
const unordered_map<void*, MemoryBlock> &busy_blocks) {
if (memory_block.size != (1 << i)) {
cerr << boost::format("incorrect block size %1%, but i is %2%") % memory_block.size % i <<
endl;
abort();
}
free_blocks.at(i).insert(make_pair(memory_block.p, memory_block));
}
hipError_t MemoryPool::Malloc(void **p, int size) {
assert(*p == NULL);
Profiler &profiler = Profiler::Ins();
profiler.BeginEvent("Malloc");
#if DEVICE_MEMORY == 0
CallCnmem(cnmemMalloc(p, size, NULL));
profiler.EndEvent();
return hipSuccess;
#elif DEVICE_MEMORY == 1
hipError_t r = hipMalloc(p, size);
profiler.EndEvent();
return r;
#else
int fit_size = 1;
int n = 0;
while (fit_size < size) {
fit_size <<= 1;
++n;
}
hipError_t status = hipErrorMemoryAllocation;
while (status != hipSuccess) {
if (free_blocks_.at(n).empty()) {
int higher_power = n + 1;
while (higher_power <= MAX_BLOCK_POWER && free_blocks_.at(higher_power).empty()) {
++higher_power;
}
if (higher_power > MAX_BLOCK_POWER) {
while (status != hipSuccess) {
status = hipMalloc(p, fit_size);
}
CallCuda(status);
MemoryBlock block(*p, fit_size);
busy_blocks_.insert(std::make_pair(*p, block));
} else {
auto &v = free_blocks_.at(higher_power);
MemoryBlock &to_split = v.rbegin()->second;
int half_size = to_split.size >> 1;
void *half_address = static_cast<void*>(static_cast<char*>(to_split.p) +
half_size);
MemoryBlock low_block(to_split.p, half_size, to_split.buddy),
high_block(half_address, half_size, to_split.p);
v.erase(v.rbegin()->first);
appendFreeBlock(low_block, free_blocks_, higher_power - 1, busy_blocks_);
appendFreeBlock(high_block, free_blocks_, higher_power - 1, busy_blocks_);
}
} else {
status = hipSuccess;
int this_size = free_blocks_.at(n).size();
MemoryBlock &block = free_blocks_.at(n).rbegin()->second;
*p = block.p;
busy_blocks_.insert(std::make_pair(block.p, block));
free_blocks_.at(n).erase(free_blocks_.at(n).rbegin()->first);
}
}
profiler.EndEvent();
return status;
#endif
}
std::pair<const MemoryBlock *, const MemoryBlock *> lowerAndhigherBlocks(const MemoryBlock &a,
const MemoryBlock &b) {
if (a.size != b.size) {
cerr << "a.size is not equal to b.size" << endl;
abort();
}
int distance = static_cast<char*>(a.p) - static_cast<char*>(b.p);
if (distance == 0) {
cerr << "block a and b has the same address" << endl;
abort();
}
const MemoryBlock &low = distance > 0 ? b : a;
const MemoryBlock &high = distance > 0 ? a : b;
return std::make_pair(&low, &high);
}
bool isBuddies(const MemoryBlock &a, const MemoryBlock &b) {
if (a.size != b.size) {
return false;
}
auto pair = lowerAndhigherBlocks(a, b);
return pair.second->buddy == pair.first->p &&
((char*)pair.second->p - (char*)pair.first->p) == a.size;
}
MemoryBlock mergeBlocks(const MemoryBlock &a, const MemoryBlock &b) {
if (a.size != b.size) {
cerr << "sizes of memory blocks to merge not equal" << endl;
abort();
}
auto pair = lowerAndhigherBlocks(a, b);
if ((char*)pair.second->p - (char*)pair.first->p != a.size ||
(a.p != b.buddy && a.buddy != b.p)) {
cerr << "a and b are not buddies" << endl;
cerr << boost::format("a:%1%\nb:%2%") % a.toString() % b.toString() << endl;
abort();
}
MemoryBlock block(pair.first->p, pair.first->size << 1, pair.first->buddy);
return block;
}
void returnFreeBlock(const MemoryBlock &block, vector<map<void*, MemoryBlock>> &free_blocks,
int power,
const unordered_map<void*, MemoryBlock> &busy_blocks) {
Profiler &profiler = Profiler::Ins();
profiler.BeginEvent("returnFreeBlock");
MemoryBlock current_block = block;
for (int i = power; i <= MAX_BLOCK_POWER; ++i) {
map<void*, MemoryBlock> &v = free_blocks.at(i);
void *free_p = (char*)current_block.p - (char*)current_block.buddy == current_block.size ?
current_block.buddy : (void*)((char*)current_block.p + current_block.size);
auto it = v.find(free_p);
if (it == v.end() || (it->second.p != current_block.buddy &&
it->second.buddy != current_block.p)) {
appendFreeBlock(current_block, free_blocks, i, busy_blocks);
break;
} else {
MemoryBlock merged_block = mergeBlocks(it->second, current_block);
current_block = merged_block;
v.erase(it);
}
}
profiler.EndEvent();
}
hipError_t MemoryPool::Free(void *p) {
Profiler &profiler = Profiler::Ins();
profiler.BeginEvent("Free");
#if DEVICE_MEMORY == 0
CallCnmem(cnmemFree(p, NULL));
profiler.EndEvent();
#elif DEVICE_MEMORY == 1
hipError_t r = hipFree(p);
profiler.EndEvent();
return r;
#else
auto it = busy_blocks_.find(p);
if (it == busy_blocks_.end()) {
cerr << "cannot find busy block " << p << endl;
abort();
}
int size = it->second.size;
int n = 0;
while (size > 1) {
size >>= 1;
++n;
}
if (it->second.size != (1 << n)) {
cerr << boost::format("size:%1% n:%2%") % it->second.size % n << endl;
abort();
}
auto block = it->second;
busy_blocks_.erase(it);
returnFreeBlock(block, free_blocks_, n, busy_blocks_);
it = busy_blocks_.find(p);
if (it != busy_blocks_.end()) {
cerr << "can find erased block " << p << endl;
abort();
}
profiler.EndEvent();
if (busy_blocks_.find(p) != busy_blocks_.end()) {
cerr << boost::format("Malloc - find freed p in busy blocks") << endl;
}
return hipSuccess;
#endif
}
void Profiler::EndCudaEvent() {
//hipDeviceSynchronize();
EndEvent();
}
__global__ void KernelCalculateLtyForUniBackward(ActivatedEnum activated,
const dtype *const*ly,
const dtype *ty,
const dtype *y,
dtype *lty,
int count,
int dim) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
int len = count * dim;
for (int i = index; i < len; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
dtype yi = y[i];
dtype lyv = ly[count_i][dim_i];
if (activated == ActivatedEnum::TANH) {
lty[i] = lyv * cuda_dtanh(yi);
} else if (activated == ActivatedEnum::SIGMOID) {
lty[i] = lyv * cuda_dsigmoid(yi);
} else if (activated == ActivatedEnum::RELU) {
lty[i] = lyv * cuda_drelu(ty[i]);
} else if (activated == ActivatedEnum::LEAKY_RELU) {
lty[i] = lyv * cuda_dleaky_relu(ty[i]);
} else if (activated == ActivatedEnum::SELU) {
lty[i] = lyv * cuda_dselu(ty[i], yi);
} else {
printf("KernelCalculateLtyForUniBackward error\n");
}
}
}
void CalculateLtyForUniBackward(ActivatedEnum activated,
const std::vector<dtype*> &ly,
const dtype *ty,
const dtype *y,
dtype *lty,
int count,
int dim) {
NumberPointerArray ly_arr;
ly_arr.init((dtype**)ly.data(), ly.size());
int block_count = ::min(BLOCK_COUNT, (count * dim + TPB - 1) / TPB);
hipLaunchKernelGGL(( KernelCalculateLtyForUniBackward), dim3(block_count), dim3(TPB), 0, 0, activated,
ly_arr.value, ty, y, lty, count, dim);
CheckCudaError();
hipDeviceSynchronize();
}
__global__ void KernelAddLtyToParamBiasAndAddLxToInputLossesForUniBackward(
const dtype *lty,
const dtype *lx,
dtype *b,
dtype **losses,
int count,
int out_dim,
int in_dim,
dtype *block_sums,
int *global_block_count,
bool use_b) {
__shared__ volatile dtype shared_arr[TPB];
int count_i = blockIdx.y * blockDim.x + threadIdx.x;
int dim_i = blockIdx.x;
if (dim_i < out_dim) {
if (use_b) {
if (threadIdx.x == 0 && blockIdx.y == 0) {
global_block_count[dim_i] = 0;
}
int lty_index = count_i * out_dim + dim_i;
shared_arr[threadIdx.x] = count_i < count ? lty[lty_index] : 0.0f;
__syncthreads();
for (int i = (TPB >> 1); i > 0; i>>=1) {
if (threadIdx.x < i) {
shared_arr[threadIdx.x] += shared_arr[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
block_sums[gridDim.y * blockIdx.x + blockIdx.y] =
shared_arr[0];
if (atomicAdd(global_block_count + dim_i, 1) ==
gridDim.y - 1) {
dtype sum = 0.0;
for (int i = 0; i < gridDim.y; ++i) {
sum += block_sums[gridDim.y * blockIdx.x + i];
}
DeviceAtomicAdd(b + dim_i, sum);
}
}
}
} else {
if (count_i < count) {
dim_i -= out_dim;
int lx_index = dim_i + count_i * in_dim;
DeviceAtomicAdd(losses[count_i] + dim_i, lx[lx_index]);
}
}
}
void AddLtyToParamBiasAndAddLxToInputLossesForUniBackward(const dtype *lty,
const dtype *lx, dtype *b, std::vector<dtype*> &losses, int count,
int out_dim, int in_dim, bool use_b) {
int block_y = (count - 1 + TPB) / TPB;
dim3 block_dim(out_dim + in_dim, block_y, 1);
NumberPointerArray loss_arr;
loss_arr.init(losses.data(), count);
Tensor1D block_sums;
block_sums.init(block_y * out_dim);
IntArray global_block_count_arr;
global_block_count_arr.init(out_dim);
hipLaunchKernelGGL(( KernelAddLtyToParamBiasAndAddLxToInputLossesForUniBackward), dim3(block_dim),
dim3(TPB), 0, 0, lty, lx, b, loss_arr.value, count, out_dim, in_dim,
block_sums.value, global_block_count_arr.value, use_b);
CheckCudaError();
}
__global__ void KernelAddLtyToParamBiasAndAddLxToInputLossesForBiBackward(
const dtype *lty,
const dtype *lx1,
const dtype *lx2,
dtype *b,
dtype **losses1,
dtype **losses2,
int count,
int out_dim,
int in_dim1,
int in_dim2,
bool use_b,
dtype *block_sums,
int *global_block_count) {
__shared__ volatile dtype shared_arr[TPB];
int count_i = blockIdx.y * blockDim.x + threadIdx.x;
int dim_i = blockIdx.x;
if (dim_i < out_dim) {
if (threadIdx.x == 0 && blockIdx.y == 0) {
global_block_count[dim_i] = 0;
}
//int lty_index = dim_i * count + count_i;
int lty_index = dim_i + count_i * out_dim;
shared_arr[threadIdx.x] = count_i < count ? lty[lty_index] : 0.0f;
__syncthreads();
for (int i = (TPB >> 1); i > 0; i>>=1) {
if (threadIdx.x < i) {
shared_arr[threadIdx.x] += shared_arr[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
block_sums[gridDim.y * blockIdx.x + blockIdx.y] = shared_arr[0];
if (atomicAdd(global_block_count + dim_i, 1) == gridDim.y - 1) {
dtype sum = 0.0;
for (int i = 0; i < gridDim.y; ++i) {
sum += block_sums[gridDim.y * blockIdx.x + i];
}
if (use_b) {
DeviceAtomicAdd(b + dim_i, sum);
}
}
}
} else if (dim_i < out_dim + in_dim1) {
if (count_i < count) {
dim_i -= out_dim;
int lx_index = dim_i + count_i * in_dim1;
DeviceAtomicAdd(losses1[count_i] + dim_i, lx1[lx_index]);
}
} else {
if (count_i < count) {
dim_i -= (out_dim + in_dim1);
int lx_index = dim_i + count_i * in_dim2;
DeviceAtomicAdd(losses2[count_i] + dim_i, lx2[lx_index]);
}
}
}
void AddLtyToParamBiasAndAddLxToInputLossesForBiBackward(const dtype *lty,
const dtype *lx1,
const dtype *lx2,
dtype *b,
std::vector<dtype*> &losses1,
std::vector<dtype*> &losses2,
int count,
int out_dim,
int in_dim1,
int in_dim2,
bool use_b) {
int block_y = (count - 1 + TPB) / TPB;
dim3 block_dim(out_dim + in_dim1 + in_dim2, block_y, 1);
NumberPointerArray loss1_arr;
loss1_arr.init(losses1.data(), count);
NumberPointerArray loss2_arr;
loss2_arr.init(losses2.data(), count);
Tensor1D block_sums;
block_sums.init(block_y * out_dim);
IntArray global_block_count_arr;
global_block_count_arr.init(out_dim);
hipLaunchKernelGGL(( KernelAddLtyToParamBiasAndAddLxToInputLossesForBiBackward), dim3(block_dim),
dim3(TPB), 0, 0, lty, lx1, lx2, b, loss1_arr.value, loss2_arr.value, count,
out_dim, in_dim1, in_dim2, use_b, block_sums.value,
global_block_count_arr.value);
CheckCudaError();
}
constexpr int MAX_BATCH_COUNT = 1000000;
__global__ void KernelInitCurandStates(hiprandState_t *states) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int step = gridDim.x * blockDim.x;
for (int i = index; i < MAX_BATCH_COUNT; i += step) {
hiprand_init(0, i, 0, &states[i]);
}
}
hiprandState_t *GetCurandStates() {
static hiprandState_t *states;
if (states == NULL) {
MemoryPool &pool = MemoryPool::Ins();
CallCuda(pool.Malloc((void**)&states, sizeof(hiprandState_t) *
MAX_BATCH_COUNT));
hipLaunchKernelGGL(( KernelInitCurandStates), dim3(BLOCK_COUNT), dim3(TPB), 0, 0, states);
CheckCudaError();
}
return states;
}
hiprandGenerator_t &GetGenerator() {
static hiprandGenerator_t gen;
static bool init;
if (!init) {
CallCurand(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT));
CallCurand(hiprandSetPseudoRandomGeneratorSeed(gen, 0));
init = true;
}
return gen;
}
void CalculateDropoutMask(dtype drop_factor, int count, int dim, dtype* mask) {
hiprandGenerator_t &gen = GetGenerator();
CallCurand(hiprandGenerateUniform(gen, mask, count * dim));
}
__global__ void KernelConcatForward(dtype **ins, int *in_dims,
dtype **outs,
int count,
int in_count,
int out_dim) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < out_dim * count; i += step) {
int out_dim_i = i % out_dim;
int count_i = i / out_dim;
int in_dim_sum = 0;
int last_in_dim_sum;
int offset_j = 0;
for (int j = 0; j < in_count; ++j) {
last_in_dim_sum = in_dim_sum;
in_dim_sum += in_dims[j];
offset_j = j;
if (out_dim_i < in_dim_sum) {
break;
}
}
int in_dim_i = out_dim_i - last_in_dim_sum;
dtype v = ins[count_i * in_count + offset_j][in_dim_i];
outs[count_i][out_dim_i] = v;
}
}
void ConcatForward(const std::vector<dtype*> &in_vals,
const std::vector<int> &in_dims,
std::vector<dtype*> &vals,
int count,
int in_count,
int out_dim) {
int len = count * out_dim;
int block_count = ::min(BLOCK_COUNT, (len - 1 + TPB) / TPB);
NumberPointerArray in_val_arr, val_arr;
in_val_arr.init((dtype**)in_vals.data(), in_vals.size());
val_arr.init((dtype**)vals.data(), vals.size());
IntArray in_dim_arr;
in_dim_arr.init((int*)in_dims.data(), in_dims.size());
hipLaunchKernelGGL(( KernelConcatForward), dim3(block_count), dim3(TPB), 0, 0, in_val_arr.value,
in_dim_arr.value, val_arr.value, count, in_count, out_dim);
CheckCudaError();
}
__global__ void KernelConcatBackward(dtype** in_losses, int *in_dims,
dtype **out_losses,
int count,
int in_count,
int out_dim) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < out_dim * count; i += step) {
int out_dim_i = i % out_dim;
int count_i = i / out_dim;
int in_dim_sum = 0;
int last_in_dim_sum;
int offset_j = 0;
for (int j = 0; j < in_count; ++j) {
last_in_dim_sum = in_dim_sum;
in_dim_sum += in_dims[j];
offset_j = j;
if (out_dim_i < in_dim_sum) {
break;
}
}
int in_dim_i = out_dim_i - last_in_dim_sum;
DeviceAtomicAdd(in_losses[count_i * in_count + offset_j] +
in_dim_i, out_losses[count_i][out_dim_i]);
}
}
void ConcatBackward(const std::vector<dtype*> &in_losses,
const std::vector<int> &in_dims,
std::vector<dtype*> &losses,
int count,
int in_count,
int out_dim) {
int len = count * out_dim;
int block_count = ::min(BLOCK_COUNT, (len - 1 + TPB) / TPB);
NumberPointerArray in_loss_arr, loss_arr;
in_loss_arr.init((dtype**)in_losses.data(), in_losses.size());
loss_arr.init((dtype**)losses.data(), losses.size());
IntArray in_dim_arr;
in_dim_arr.init((int*)in_dims.data(), in_dims.size());
hipLaunchKernelGGL(( KernelConcatBackward), dim3(block_count), dim3(TPB), 0, 0, in_loss_arr.value,
in_dim_arr.value, loss_arr.value, count, in_count, out_dim);
CheckCudaError();
}
__global__ void KernelMemset(dtype *p, int len, dtype value) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < len; i+= step) {
p[i] = value;
}
}
void Memset(dtype *p, int len, dtype value) {
int block_count = ::min(BLOCK_COUNT, (len - 1 + TPB) / TPB);
hipLaunchKernelGGL(( KernelMemset), dim3(block_count), dim3(TPB), 0, 0, p, len, value);
CheckCudaError();
}
__global__ void KernelMemset(bool *p, int len, bool value) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < len; i+= step) {
p[i] = value;
}
}
void Memset(bool *p, int len, bool value) {
int block_count = ::min(BLOCK_COUNT, (len - 1 + TPB) / TPB);
hipLaunchKernelGGL(( KernelMemset), dim3(block_count), dim3(TPB), 0, 0, p, len, value);
CheckCudaError();
}
void *Malloc(int size) {
void *p;
CallCuda(hipMalloc(&p, size));
return p;
}
__global__ void KernelBatchMemset(dtype **p, int count, int dim, dtype value) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count ; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
p[count_i][dim_i] = value;
}
}
void BatchMemset(const std::vector<dtype*> &vec, int count, int dim,
dtype value) {
int block_count = (count * dim -1 + TPB) / TPB;
block_count = ::min(block_count, BLOCK_COUNT);
NumberPointerArray vec_arr;
vec_arr.init((dtype**)vec.data(), vec.size());
hipLaunchKernelGGL(( KernelBatchMemset), dim3(block_count), dim3(TPB), 0, 0, vec_arr.value, count, dim, value);
CheckCudaError();
}
__global__ void KernelLookupForward(const int *xids, const dtype *vocabulary,
int count,
int dim,
dtype **vals) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
int xid = xids[count_i];
if (xid >= 0) {
int voc_i = xid * dim + dim_i;
vals[count_i][dim_i] = vocabulary[voc_i];
} else {
vals[count_i][dim_i] = 0.0f;
}
}
}
void LookupForward(const std::vector<int> &xids, const dtype *vocabulary,
int count,
int dim,
std::vector<dtype*> &vals) {
int block_count = ::min(BLOCK_COUNT, (count * dim - 1 + TPB) / TPB);
IntArray xid_arr;
xid_arr.init((int*)xids.data(), xids.size());
NumberPointerArray val_arr;
val_arr.init((dtype**)vals.data(), vals.size());
hipLaunchKernelGGL(( KernelLookupForward), dim3(block_count), dim3(TPB), 0, 0, xid_arr.value, vocabulary,
count, dim, const_cast<dtype**>(val_arr.value));
CheckCudaError();
}
__global__ void KernelLookupBackward(const int *xids, int unknown_id,
bool fine_tune,
const dtype** losses,
int count,
int dim,
dtype *grad,
bool *indexers) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
int xid = xids[count_i];
if (xid == unknown_id || fine_tune) {
assert(xid >= 0);
if (dim_i == 0) {
indexers[xid] = true;
}
DeviceAtomicAdd(grad + xid * dim + dim_i, losses[count_i][dim_i]);
}
}
}
void LookupBackward(const std::vector<int> &xids, int unknown_id,
bool fine_tune,
const std::vector<dtype*> &losses,
int count,
int dim,
dtype *grad,
bool *indexers) {
int block_count = ::min((count * dim - 1 + TPB) / TPB, BLOCK_COUNT);
IntArray pl_arr;
pl_arr.init((int*)xids.data(), xids.size());
IntArray xid_arr;
xid_arr.init((int*)pl_arr.value, xids.size());
NumberPointerArray loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
hipLaunchKernelGGL(( KernelLookupBackward), dim3(block_count), dim3(TPB), 0, 0,
const_cast<const int *>(xid_arr.value),
unknown_id,
fine_tune,
const_cast<const dtype**>(loss_arr.value),
count,
dim,
grad,
indexers);
CheckCudaError();
}
__global__ void KernelPoolForward(PoolingEnum pooling, dtype **ins,
int *in_counts, int max_in_count, dtype **outs, int count, int dim,
int* hit_inputs) {
__shared__ volatile extern dtype pool_shared_arr[];
volatile dtype* shared_indexers = pool_shared_arr + blockDim.x;
int batch_i = blockIdx.y;
int in_count = in_counts[batch_i];
int in_count_i = threadIdx.x;
int dim_i = blockIdx.x;
if (in_count_i < in_count) {
pool_shared_arr[threadIdx.x] = ins[batch_i * max_in_count +
in_count_i][dim_i];
} else {
pool_shared_arr[threadIdx.x] = pooling == PoolingEnum::MAX ?
-INFINITY : INFINITY;
}
shared_indexers[threadIdx.x] = threadIdx.x;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0;i >>=1) {
if (threadIdx.x < i) {
int plus_i = threadIdx.x + i;
if (pooling == PoolingEnum::MAX) {
if (pool_shared_arr[threadIdx.x] < pool_shared_arr[plus_i]) {
pool_shared_arr[threadIdx.x] = pool_shared_arr[plus_i];
shared_indexers[threadIdx.x] = shared_indexers[plus_i];
}
} else {
if (pool_shared_arr[threadIdx.x] > pool_shared_arr[plus_i]) {
pool_shared_arr[threadIdx.x] = pool_shared_arr[plus_i];
shared_indexers[threadIdx.x] = shared_indexers[plus_i];
}
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
hit_inputs[batch_i * dim + dim_i] = shared_indexers[0];
outs[batch_i][dim_i] = pool_shared_arr[0];
}
}
void PoolForward(PoolingEnum pooling, const std::vector<dtype*> &in_vals,
std::vector<dtype*> &vals,
int count,
const std::vector<int> &in_counts,
int dim,
int *hit_inputs) {
int max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
int thread_count = 8;
while (max_in_count > thread_count) {
thread_count <<= 1;
}
dim3 block_dim(dim, count, 1);
NumberPointerArray in_val_arr;
in_val_arr.init((dtype**)in_vals.data(), in_vals.size());
NumberPointerArray val_arr;
val_arr.init((dtype**)vals.data(), vals.size());
IntArray in_count_arr;
in_count_arr.init((int*)in_counts.data(), in_counts.size());
hipLaunchKernelGGL(( KernelPoolForward), dim3(block_dim), dim3(thread_count), thread_count * 2 *
sizeof(dtype), 0, pooling, in_val_arr.value, in_count_arr.value,
max_in_count, val_arr.value, count, dim, hit_inputs);
CheckCudaError();
}
__global__ void KernelPoolBackward(const dtype ** losses,
const int *hit_inputs,
int max_in_count,
int count,
int dim,
dtype **in_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
int input_i = hit_inputs[i];
dtype loss = losses[count_i][dim_i];
DeviceAtomicAdd(in_losses[count_i * max_in_count + input_i] + dim_i,
loss);
}
}
void PoolBackward(const std::vector<dtype*> &losses,
std::vector<dtype*> &in_losses,
const std::vector<int> &in_counts,
const int *hit_inputs,
int count,
int dim) {
NumberPointerArray loss_arr, in_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
in_loss_arr.init((dtype**)in_losses.data(), in_losses.size());
int max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
int block_count = (count * dim - 1 + TPB) / TPB;
block_count = ::min(block_count, BLOCK_COUNT);
hipLaunchKernelGGL(( KernelPoolBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)loss_arr.value,
hit_inputs,
max_in_count,
count,
dim,
in_loss_arr.value);
CheckCudaError();
}
__global__ void KernelSumPoolForward(PoolingEnum pooling,
const dtype **in_vals,
int count,
int dim,
const int *in_counts,
int max_in_count,
dtype **vals) {
__shared__ volatile extern dtype pool_shared_arr[];
int batch_i = blockIdx.y;
int in_count = in_counts[batch_i];
int in_count_i = threadIdx.x;
int dim_i = blockIdx.x;
if (in_count_i < in_count) {
pool_shared_arr[threadIdx.x] = in_vals[batch_i * max_in_count +
in_count_i][dim_i];
} else {
pool_shared_arr[threadIdx.x] = 0.0f;
}
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0;i >>=1) {
if (threadIdx.x < i) {
int plus_i = threadIdx.x + i;
pool_shared_arr[threadIdx.x] += pool_shared_arr[plus_i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
vals[batch_i][dim_i] = pooling == PoolingEnum::SUM ?
pool_shared_arr[0] : pool_shared_arr[0] / in_counts[batch_i];
}
}
void SumPoolForward(PoolingEnum pooling, const std::vector<dtype*> &in_vals,
int count,
int dim,
const std::vector<int> &in_counts,
std::vector<dtype*> &vals) {
int max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
int thread_count = 8;
while (max_in_count > thread_count) {
thread_count <<= 1;
}
dim3 block_dim(dim, count, 1);
NumberPointerArray in_val_arr;
in_val_arr.init((dtype**)in_vals.data(), in_vals.size());
IntArray in_count_arr;
in_count_arr.init((int*)in_counts.data(), in_counts.size());
NumberPointerArray val_arr;
val_arr.init((dtype**)vals.data(), vals.size());
hipLaunchKernelGGL(( KernelSumPoolForward), dim3(block_dim), dim3(thread_count),
thread_count * sizeof(dtype), 0, pooling,
(const dtype**)in_val_arr.value, count, dim,
(const int*)in_count_arr.value, max_in_count, val_arr.value);
CheckCudaError();
}
__global__ void KernelSumBackward(PoolingEnum pooling, const dtype **losses,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype **in_losses) {
int global_in_count_i = blockIdx.x * max_in_count + blockIdx.y;
for (int i = threadIdx.x; i < dim; i += blockDim.x) {
if (blockIdx.y < in_counts[blockIdx.x]) {
DeviceAtomicAdd(in_losses[global_in_count_i] + i, pooling == PoolingEnum::SUM ?
losses[blockIdx.x][i] : losses[blockIdx.x][i] / in_counts[blockIdx.x]);
}
}
}
void SumPoolBackward(PoolingEnum pooling, const std::vector<dtype*> &losses,
const std::vector<int> &in_counts,
int count,
int dim,
std::vector<dtype*> &in_losses) {
int thread_count = 8;
while (thread_count < dim) {
thread_count <<= 1;
}
thread_count = ::min(TPB, thread_count);
int max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
dim3 block_dim(count, max_in_count, 1);
NumberPointerArray loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
IntArray in_count_arr;
in_count_arr.init((int*)in_counts.data(), in_counts.size());
NumberPointerArray in_loss_arr;
in_loss_arr.init((dtype**)in_losses.data(), in_losses.size());
hipLaunchKernelGGL(( KernelSumBackward), dim3(block_dim), dim3(thread_count), 0, 0, pooling,
(const dtype**)loss_arr.value, (const int*)in_count_arr.value,
max_in_count, count, dim, in_loss_arr.value);
CheckCudaError();
}
//__global_ void KernelCalculateNormalizedForAttention(const dtype** unnormeds, const int *in_counts,
// int max_in_count,
// int count,
// dtype** normalized_scalars) {
// __shared__ volatile extern dtype shared_arr[];
// int in_count = in_counts[blockIdx.x];
// int global_in_count_i = max_in_count * blockIdx.x + threadIdx.x;
// dtype exped_value = threadIdx.x < in_count ? cuda_exp(unnormeds[global_in_count_i][0]) : 0.0f;
// shared_arr[threadIdx.x] = exped_value;
// for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
// if (threadIdx.x < i) {
// int plus_i = threadIdx.x + i;
// shared_arr[threadIdx.x] += attention_shared_arr[plus_i];
// }
// __syncthreads();
// }
// if (threadIdx.x < in_count) {
// normalized_scalars[blockIdx.y][blockIdx.x * max_in_count + threadIdx.x] = mask;
// }
//}
__global__ void KernelScalarAttentionForward(const dtype** ins,
const dtype **unnormeds,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype **masks,
dtype **vals) {
__shared__ volatile extern dtype attention_shared_arr[];
volatile dtype *shared_unnormed_masks = attention_shared_arr + blockDim.x;
int count_i = blockIdx.y;
int in_count = in_counts[count_i];
int dim_i = blockIdx.x;
int global_in_count_i = blockIdx.y * max_in_count + threadIdx.x;
dtype unnormed_mask = threadIdx.x < in_count ?
cuda_exp(unnormeds[global_in_count_i][0]) : 0.0f;
attention_shared_arr[threadIdx.x] = unnormed_mask;
shared_unnormed_masks[threadIdx.x] = unnormed_mask;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
int plus_i = threadIdx.x + i;
attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i];
}
__syncthreads();
}
dtype mask = threadIdx.x < in_count ? shared_unnormed_masks[threadIdx.x] /
attention_shared_arr[0] : 0.0f;
if (threadIdx.x < in_count) {
masks[blockIdx.y][blockIdx.x * max_in_count + threadIdx.x] = mask;
}
dtype in = threadIdx.x < in_count ? ins[global_in_count_i][dim_i] : 0.0f;
attention_shared_arr[threadIdx.x] = threadIdx.x < in_count ?
mask * in : 0.0f;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
int plus_i = threadIdx.x + i;
attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
vals[blockIdx.y][blockIdx.x] = attention_shared_arr[0];
}
}
void ScalarAttentionForward(const std::vector<dtype*> &ins,
const std::vector<dtype*> &unnormeds,
const std::vector<int> &in_counts, int count, int dim,
std::vector<dtype*> &masks, std::vector<dtype*> &vals) {
int max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
int thread_count = 8;
while (max_in_count > thread_count) {
thread_count <<= 1;
}
dim3 block_dim(dim, count, 1);
NumberPointerArray in_arr;
in_arr.init((dtype**)ins.data(), ins.size());
NumberPointerArray unnormed_arr;
unnormed_arr.init((dtype**)unnormeds.data(), unnormeds.size());
NumberPointerArray mask_arr;
mask_arr.init((dtype**)masks.data(), masks.size());
NumberPointerArray val_arr;
val_arr.init((dtype**)vals.data(), vals.size());
IntArray in_count_arr;
in_count_arr.init((int*)in_counts.data(), in_counts.size());
hipLaunchKernelGGL(( KernelScalarAttentionForward), dim3(block_dim), dim3(thread_count), 2 * thread_count *
sizeof(dtype), 0, (const dtype**)in_arr.value,
(const dtype**)unnormed_arr.value,
(const int*)in_count_arr.value,
max_in_count, count, dim, mask_arr.value, val_arr.value);
CheckCudaError();
}
__global__ void KernelScalarAttentionMaskAndInLoss(const dtype **losses,
const dtype **in_vals,
const dtype **masks,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype *mask_losses,
dtype **in_losses) {
// blockIdx.x : in_count_i
// blockIdx.y : count_i
// threadIdx.x : dim_i
__shared__ extern volatile dtype att_mask_loss_shared_arr[];
int in_count = in_counts[blockIdx.y];
int global_in_count_i = blockIdx.y * max_in_count + blockIdx.x;
if (in_count <= blockIdx.x) {
return;
}
for (int i = threadIdx.x; i < dim; i += blockDim.x) {
DeviceAtomicAdd(in_losses[global_in_count_i] + i, losses[blockIdx.y][i] *
masks[blockIdx.y][max_in_count * threadIdx.x + blockIdx.x]);
}
att_mask_loss_shared_arr[threadIdx.x] = 0.0f;
for (int i = threadIdx.x; i < dim; i += blockDim.x) {
att_mask_loss_shared_arr[threadIdx.x] += losses[blockIdx.y][i] *
in_vals[global_in_count_i][i];
}
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
att_mask_loss_shared_arr[threadIdx.x] +=
att_mask_loss_shared_arr[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
mask_losses[global_in_count_i] = att_mask_loss_shared_arr[0];
}
}
void ScalarAttentionMaskAndInLoss(const dtype** losses,
const dtype** in_vals,
const dtype **masks,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype *mask_losses,
dtype **in_losses) {
dim3 block_dim(max_in_count, count, 1);
int thread_count = 8;
if (dim >= TPB) {
thread_count = TPB;
} else {
while (dim > thread_count) {
thread_count <<= 1;
}
}
hipLaunchKernelGGL(( KernelScalarAttentionMaskAndInLoss), dim3(block_dim), dim3(thread_count),
thread_count * sizeof(dtype), 0, losses, in_vals, masks, in_counts,
max_in_count, count, dim, mask_losses, in_losses);
CheckCudaError();
}
__global__ void KernelScalarAttentionBackward(const dtype** masks,
const dtype *mask_losses,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype **unnormed_losses) {
__shared__ volatile extern dtype shared_att_bckwrd_arr[];
int global_in_count_i = max_in_count * blockIdx.x + threadIdx.x;
int in_count = in_counts[blockIdx.x];
if (threadIdx.x < in_count && blockIdx.y == 0) {
DeviceAtomicAdd(unnormed_losses[global_in_count_i],
masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] *
mask_losses[global_in_count_i]);
}
shared_att_bckwrd_arr[threadIdx.x] = threadIdx.x < in_count ?
masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] *
mask_losses[global_in_count_i] : 0.0f;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_att_bckwrd_arr[threadIdx.x] +=
shared_att_bckwrd_arr[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x < in_count && blockIdx.y == 0) {
DeviceAtomicAdd(unnormed_losses[global_in_count_i],
-shared_att_bckwrd_arr[0] * masks[blockIdx.x][threadIdx.x]);
}
}
void ScalarAttentionBackward(const std::vector<dtype*> &losses,
const std::vector<dtype*> &in_vals,
const std::vector<dtype*> &masks,
const std::vector<int> &in_counts,
int count,
int dim,
std::vector<dtype*> &in_losses,
std::vector<dtype*> &unnormed_losses) {
NumberPointerArray loss_arr, mask_arr, in_loss_arr, unnormed_loss_arr,
in_val_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
mask_arr.init((dtype**)masks.data(), masks.size());
in_loss_arr.init((dtype**)in_losses.data(), in_losses.size());
unnormed_loss_arr.init((dtype**)unnormed_losses.data(),
unnormed_losses.size());
in_val_arr.init((dtype**)in_vals.data(), in_vals.size());
IntArray in_count_arr;
in_count_arr.init((int*)in_counts.data(), in_counts.size());
int max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
NumberArray mask_loss_arr;
mask_loss_arr.init(count * max_in_count);
ScalarAttentionMaskAndInLoss((const dtype**)loss_arr.value,
(const dtype**)in_val_arr.value, (const dtype**)mask_arr.value,
(const int*)in_count_arr.value, max_in_count, count, dim,
mask_loss_arr.value, in_loss_arr.value);
dim3 block_dim(count, dim, 1);
int thread_count = 8;
while (thread_count < max_in_count) {
thread_count <<= 1;
}
hipLaunchKernelGGL(( KernelScalarAttentionBackward), dim3(block_dim), dim3(thread_count),
thread_count * sizeof(dtype), 0, (const dtype**)mask_arr.value,
(const dtype*)mask_loss_arr.value,
(const int*)in_count_arr.value, max_in_count, count, dim,
unnormed_loss_arr.value);
CheckCudaError();
}
__global__ void KernelVectorAttentionForward(const dtype** ins,
const dtype **unnormeds,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype **masks,
dtype **vals) {
__shared__ volatile extern dtype attention_shared_arr[];
volatile dtype *shared_unnormed_masks = attention_shared_arr + blockDim.x;
int count_i = blockIdx.y;
int in_count = in_counts[count_i];
int dim_i = blockIdx.x;
int global_in_count_i = blockIdx.y * max_in_count + threadIdx.x;
dtype unnormed_mask = threadIdx.x < in_count ?
cuda_exp(unnormeds[global_in_count_i][blockIdx.x]) : 0.0f;
attention_shared_arr[threadIdx.x] = unnormed_mask;
shared_unnormed_masks[threadIdx.x] = unnormed_mask;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
int plus_i = threadIdx.x + i;
attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i];
}
__syncthreads();
}
dtype mask = threadIdx.x < in_count ? shared_unnormed_masks[threadIdx.x] /
attention_shared_arr[0] : 0.0f;
if (threadIdx.x < in_count) {
masks[blockIdx.y][blockIdx.x * max_in_count + threadIdx.x] = mask;
}
dtype in = threadIdx.x < in_count ? ins[global_in_count_i][dim_i] : 0.0f;
attention_shared_arr[threadIdx.x] = threadIdx.x < in_count ?
mask * in : 0.0f;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
int plus_i = threadIdx.x + i;
attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
vals[blockIdx.y][blockIdx.x] = attention_shared_arr[0];
}
}
void VectorAttentionForward(const std::vector<dtype*> &ins,
const std::vector<dtype*> &unnormeds,
const std::vector<int> &in_counts, int count, int dim,
std::vector<dtype*> &masks, std::vector<dtype*> &vals) {
int max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
int thread_count = 8;
while (max_in_count > thread_count) {
thread_count <<= 1;
}
dim3 block_dim(dim, count, 1);
NumberPointerArray in_arr;
in_arr.init((dtype**)ins.data(), ins.size());
NumberPointerArray unnormed_arr;
unnormed_arr.init((dtype**)unnormeds.data(), unnormeds.size());
NumberPointerArray mask_arr;
mask_arr.init((dtype**)masks.data(), masks.size());
NumberPointerArray val_arr;
val_arr.init((dtype**)vals.data(), vals.size());
IntArray in_count_arr;
in_count_arr.init((int*)in_counts.data(), in_counts.size());
hipLaunchKernelGGL(( KernelVectorAttentionForward), dim3(block_dim), dim3(thread_count), 2 * thread_count *
sizeof(dtype), 0, (const dtype**)in_arr.value,
(const dtype**)unnormed_arr.value,
(const int*)in_count_arr.value,
max_in_count, count, dim, mask_arr.value, val_arr.value);
CheckCudaError();
}
__global__ void KernelVectorAttentionMaskAndInLoss(const dtype **losses,
const dtype **in_vals,
const dtype **masks,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype **mask_losses,
dtype **in_losses) {
// blockIdx.x : in_count_i
// blockIdx.y : count_i
// threadIdx.x : dim_i
int in_count = in_counts[blockIdx.y];
int global_in_count_i = blockIdx.y * max_in_count + blockIdx.x;
if (in_count <= blockIdx.x) {
return;
}
for (int i = threadIdx.x; i < dim; i += blockDim.x) {
DeviceAtomicAdd(in_losses[global_in_count_i] + i, losses[blockIdx.y][i] *
masks[blockIdx.y][max_in_count * i + blockIdx.x]);
mask_losses[blockIdx.y][max_in_count * i + blockIdx.x] =
losses[blockIdx.y][i] * in_vals[global_in_count_i][i];
}
}
void VectorAttentionMaskAndInLoss(const dtype** losses,
const dtype** in_vals,
const dtype** masks,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype **mask_losses,
dtype **in_losses) {
dim3 block_dim(max_in_count, count, 1);
int thread_count = 8;
if (dim >= TPB) {
thread_count = TPB;
} else {
while (dim > thread_count) {
thread_count <<= 1;
}
}
hipLaunchKernelGGL(( KernelVectorAttentionMaskAndInLoss), dim3(block_dim), dim3(thread_count),
thread_count * sizeof(dtype), 0, losses, in_vals, masks, in_counts,
max_in_count, count, dim, mask_losses, in_losses);
CheckCudaError();
}
__global__ void KernelVectorAttentionBackward(const dtype** masks,
const dtype **mask_losses,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype **unnormed_losses) {
__shared__ volatile extern dtype shared_att_bckwrd_arr[];
int global_in_count_i = max_in_count * blockIdx.x + threadIdx.x;
int in_count = in_counts[blockIdx.x];
if (threadIdx.x < in_count) {
DeviceAtomicAdd(unnormed_losses[global_in_count_i] + blockIdx.y,
masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] *
mask_losses[blockIdx.x][blockIdx.y * max_in_count +
threadIdx.x]);
}
shared_att_bckwrd_arr[threadIdx.x] = threadIdx.x < in_count ?
masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] *
mask_losses[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] :
0.0f;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_att_bckwrd_arr[threadIdx.x] +=
shared_att_bckwrd_arr[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x < in_count) {
DeviceAtomicAdd(unnormed_losses[global_in_count_i] + blockIdx.y,
-shared_att_bckwrd_arr[0] * masks[blockIdx.x][blockIdx.y *
max_in_count + threadIdx.x]);
}
}
void VectorAttentionBackward(const std::vector<dtype*> &losses,
const std::vector<dtype*> &in_vals,
const std::vector<dtype*> &masks,
const std::vector<int> &in_counts,
int count,
int dim,
std::vector<dtype*> &in_losses,
std::vector<dtype*> &unnormed_losses) {
NumberPointerArray loss_arr, mask_arr, in_loss_arr, unnormed_loss_arr,
in_val_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
mask_arr.init((dtype**)masks.data(), masks.size());
in_loss_arr.init((dtype**)in_losses.data(), in_losses.size());
unnormed_loss_arr.init((dtype**)unnormed_losses.data(),
unnormed_losses.size());
in_val_arr.init((dtype**)in_vals.data(), in_vals.size());
IntArray in_count_arr;
in_count_arr.init((int*)in_counts.data(), in_counts.size());
int max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
std::vector<std::shared_ptr<NumberArray>> mask_losses;
mask_losses.reserve(count);
for (int i = 0; i < count; ++i) {
std::shared_ptr<NumberArray> p = std::make_shared<NumberArray>();
p->init(max_in_count * dim);
mask_losses.push_back(p);
}
std::vector<dtype*> raw_mask_losses;
raw_mask_losses.reserve(count);
for (auto &p : mask_losses) {
raw_mask_losses.push_back(p->value);
}
NumberPointerArray mask_loss_arr;
mask_loss_arr.init((dtype**)raw_mask_losses.data(), mask_losses.size());
VectorAttentionMaskAndInLoss((const dtype**)loss_arr.value,
(const dtype**)in_val_arr.value, (const dtype**)mask_arr.value,
(const int*)in_count_arr.value, max_in_count, count, dim,
mask_loss_arr.value, in_loss_arr.value);
dim3 block_dim(count, dim, 1);
int thread_count = 8;
while (thread_count < max_in_count) {
thread_count <<= 1;
}
hipLaunchKernelGGL(( KernelVectorAttentionBackward), dim3(block_dim), dim3(thread_count),
thread_count * sizeof(dtype), 0, (const dtype**)mask_arr.value,
(const dtype**)mask_loss_arr.value,
(const int*)in_count_arr.value, max_in_count, count, dim,
unnormed_loss_arr.value);
CheckCudaError();
}
__global__ void KernelPMultiForward(const dtype **ins1, const dtype **ins2,
int count,
int dim,
dtype** vals) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
vals[count_i][dim_i] = ins1[count_i][dim_i] * ins2[count_i][dim_i];
}
}
void PMultiForward(const std::vector<dtype*> &ins1,
const std::vector<dtype*> &ins2,
int count,
int dim,
std::vector<dtype*> &vals) {
int block_count = DefaultBlockCount(count * dim);
NumberPointerArray ins1_arr, ins2_arr, vals_arr;
ins1_arr.init((dtype**)ins1.data(), count);
ins2_arr.init((dtype**)ins2.data(), count);
vals_arr.init((dtype**)vals.data(), count);
hipLaunchKernelGGL(( KernelPMultiForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)ins1_arr.value,
(const dtype**)ins2_arr.value, count, dim, vals_arr.value);
CheckCudaError();
}
__global__ void KernelDivForward(const dtype *const *numerators, const dtype *const *denominators,
int count,
int dim,
dtype **results) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
results[count_i][dim_i] = numerators[count_i][dim_i] / denominators[count_i][0];
}
}
void DivForwartd(const vector<const dtype*> numerators, const vector<const dtype*> denominators,
int count,
int dim,
vector<dtype*> &results) {
int block_count = DefaultBlockCount(count * dim);
NumberPointerArray numerator_arr, denominator_arr, result_arr;
numerator_arr.init((dtype**)numerators.data(), count);
denominator_arr.init((dtype**)denominators.data(), count);
result_arr.init((dtype**)results.data(), count);
hipLaunchKernelGGL(( KernelDivForward), dim3(block_count), dim3(TPB), 0, 0, numerator_arr.value, denominator_arr.value, count, dim,
result_arr.value);
CheckCudaError();
}
__global__ void KernelDivNumeratorBackward(const dtype *const *losses,
const dtype *const *denominator_vals,
int count,
int dim,
dtype *const *numerator_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
DeviceAtomicAdd(numerator_losses[count_i] + dim_i, losses[count_i][dim_i] /
denominator_vals[count_i][0]);
}
}
__global__ void KernelDivDenominatorBackward(const dtype *const *losses,
const dtype *const *numerator_vals,
const dtype *const *denominator_vals,
int count,
int dim,
dtype *block_sums,
int *block_counters,
dtype *const *denominator_losses) {
__shared__ volatile dtype shared_sum[TPB];
__shared__ volatile bool is_last_block;
__shared__ volatile dtype square;
if (threadIdx.x == 0 && blockIdx.y == 0) {
block_counters[blockIdx.x] = 0;
}
int count_i = blockIdx.x;
if (threadIdx.x == 0) {
is_last_block = false;
square = denominator_vals[count_i][0] * denominator_vals[count_i][0];
}
__syncthreads();
int offset = blockIdx.y * blockDim.x + threadIdx.x;
shared_sum[threadIdx.x] = offset < dim ? losses[count_i][offset] *
numerator_vals[count_i][offset] / square : 0.0f;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
__syncthreads();
}
int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y;
if (threadIdx.x == 0) {
block_sums[block_sums_offset] = shared_sum[0];
if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) {
is_last_block = true;
}
}
__syncthreads();
if (is_last_block) {
dtype sum = 0.0f;
for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) {
int offset = blockIdx.x * gridDim.y + i;
sum += block_sums[offset];
}
shared_sum[threadIdx.x] = sum;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
__syncthreads();
}
if (threadIdx.x == 0) {
DeviceAtomicAdd(denominator_losses[count_i], -shared_sum[0]);
}
}
}
void DivBackward(const vector<const dtype*> &losses, const vector<const dtype*> &denominator_vals,
const vector<const dtype*> &numerator_vals,
int count,
int dim,
vector<dtype*> &numerator_losses,
vector<dtype*> &denominator_losses) {
NumberPointerArray loss_arr, denominator_val_arr, numerator_val_arr, numerator_loss_arr,
denominator_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
denominator_val_arr.init((dtype**)denominator_vals.data(), denominator_vals.size());
numerator_val_arr.init((dtype**)numerator_vals.data(), numerator_vals.size());
numerator_loss_arr.init((dtype**)numerator_losses.data(), numerator_losses.size());
denominator_loss_arr.init((dtype**)denominator_losses.data(), denominator_losses.size());
int block_count = DefaultBlockCount(count * dim);
hipLaunchKernelGGL(( KernelDivNumeratorBackward), dim3(block_count), dim3(TPB), 0, 0, loss_arr.value, denominator_val_arr.value,
count,
dim,
numerator_loss_arr.value);
CheckCudaError();
int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB);
int block_y_count = (dim - 1 + thread_count) / thread_count;
dim3 block_dim(count, block_y_count, 1);
NumberArray block_sums;
block_sums.init(block_y_count * count);
IntArray block_counters;
block_counters.init(count);
hipLaunchKernelGGL(( KernelDivDenominatorBackward), dim3(block_dim) , dim3(thread_count), 0, 0, loss_arr.value,
numerator_val_arr.value, denominator_val_arr.value, count, dim, block_sums.value,
block_counters.value, denominator_loss_arr.value);
CheckCudaError();
}
__global__ void KernelSplitForward(const dtype *const *inputs, const int *offsets, int count,
int dim,
dtype **results) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
int offset = offsets[count_i];
results[count_i][dim_i] = inputs[count_i][offset + dim_i];
}
}
void SplitForward(const vector<const dtype*> &inputs, const vector<int> &offsets, int count,
int dim,
vector<dtype*> &results) {
NumberPointerArray input_arr, result_arr;
input_arr.init((dtype**)inputs.data(), inputs.size());
result_arr.init((dtype**)results.data(), results.size());
IntArray offset_arr;
offset_arr.init((int*)offsets.data(), offsets.size());
int block_count = DefaultBlockCount(count * dim);
hipLaunchKernelGGL(( KernelSplitForward), dim3(block_count), dim3(TPB), 0, 0, input_arr.value, offset_arr.value, count, dim,
result_arr.value);
CheckCudaError();
}
__global__ void KernelSplitBackward(const dtype *const *losses, const int *offsets, int count,
int dim,
dtype *const *input_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
int offset = offsets[count_i];
DeviceAtomicAdd(input_losses[count_i] + offset + dim_i, losses[count_i][dim_i]);
}
}
void SplitBackward(const vector<const dtype*> &losses, const vector<int> offsets, int count,
int dim,
const vector<dtype*> &input_losses) {
NumberPointerArray loss_arr, input_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
input_loss_arr.init((dtype**)input_losses.data(), input_losses.size());
IntArray offset_arr;
offset_arr.init((int*)offsets.data(), offsets.size());
int block_count = DefaultBlockCount(count * dim);
hipLaunchKernelGGL(( KernelSplitBackward), dim3(block_count), dim3(TPB), 0, 0, loss_arr.value, offset_arr.value, count, dim,
input_loss_arr.value);
CheckCudaError();
}
__global__ void KernelSubForward(const dtype *const *minuend, const dtype *const *subtrahend,
int count,
int dim,
dtype **results) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
results[count_i][dim_i] = minuend[count_i][dim_i] - subtrahend[count_i][dim_i];
}
}
void SubForward(const std::vector<const dtype*> &minuend,
const std::vector<const dtype*> &subtrahend,
int count,
int dim,
std::vector<dtype*> &results) {
int block_count = DefaultBlockCount(count * dim);
NumberPointerArray minuend_arr, subtrahend_arr, result_arr;
minuend_arr.init((dtype**)minuend.data(), count);
subtrahend_arr.init((dtype**)subtrahend.data(), count);
result_arr.init((dtype**)results.data(), count);
hipLaunchKernelGGL(( KernelSubForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype* const*)minuend_arr.value,
(const dtype *const *)subtrahend_arr.value, count, dim, result_arr.value);
CheckCudaError();
}
__global__ void KernelSubBackward(const dtype *const *losses, int count, int dim,
dtype *const *minuend_losses,
dtype *const *subtrahend_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
DeviceAtomicAdd(minuend_losses[count_i] + dim_i, losses[count_i][dim_i]);
DeviceAtomicAdd(subtrahend_losses[count_i] + dim_i, -losses[count_i][dim_i]);
}
}
void SubBackward(const std::vector<const dtype*> &losses, int count, int dim,
std::vector<dtype*> &minuend_losses,
std::vector<dtype*> &subtrahend_losses) {
int block_count = DefaultBlockCount(count * dim);
NumberPointerArray loss_arr, minuend_loss_arr, subtrahend_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
minuend_loss_arr.init((dtype**)minuend_losses.data(), minuend_losses.size());
subtrahend_loss_arr.init((dtype**)subtrahend_losses.data(), subtrahend_losses.size());
hipLaunchKernelGGL(( KernelSubBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype *const *)loss_arr.value, count, dim,
(dtype *const *)minuend_loss_arr.value, (dtype *const *)subtrahend_loss_arr.value);
CheckCudaError();
}
__global__ void KernelPMultiBackward(const dtype **losses,
const dtype **in_vals1,
const dtype **in_vals2,
int count,
int dim,
dtype** in_losses1,
dtype** in_losses2) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
DeviceAtomicAdd(in_losses1[count_i] + dim_i,
losses[count_i][dim_i] * in_vals2[count_i][dim_i]);
DeviceAtomicAdd(in_losses2[count_i] + dim_i,
losses[count_i][dim_i] * in_vals1[count_i][dim_i]);
}
}
void PMultiBackward(const std::vector<dtype*> &losses,
const std::vector<dtype*> &in_vals1,
const std::vector<dtype*> &in_vals2,
int count,
int dim,
std::vector<dtype*> &in_losses1,
std::vector<dtype*> &in_losses2) {
int block_count = DefaultBlockCount(count * dim);
NumberPointerArray losses_arr, in_vals1_arr, in_vals2_arr, in_losses1_arr,
in_losses2_arr;
losses_arr.init((dtype**)losses.data(), losses.size());
in_vals1_arr.init((dtype**)in_vals1.data(), in_vals1.size());
in_vals2_arr.init((dtype**)in_vals2.data(), in_vals2.size());
in_losses1_arr.init((dtype**)in_losses1.data(), in_losses1.size());
in_losses2_arr.init((dtype**)in_losses2.data(), in_losses2.size());
hipLaunchKernelGGL(( KernelPMultiBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)losses_arr.value,
(const dtype**)in_vals1_arr.value,
(const dtype**)in_vals2_arr.value, count, dim, in_losses1_arr.value, in_losses2_arr.value);
CheckCudaError();
}
__global__ void KernelPAddForward(const dtype*** ins, int count, int dim,
int in_count,
dtype **vals) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i+= step) {
int count_i = i / dim;
int dim_i = i % dim;
dtype sum = ins[0][count_i][dim_i];
for (int j = 1; j < in_count; ++j) {
sum += ins[j][count_i][dim_i];
}
vals[count_i][dim_i] = sum;
}
}
__global__ void KernelPDotForward(const dtype **in_vals1,
const dtype **in_vals2,
int count,
int dim,
dtype** vals) {
volatile __shared__ extern dtype shared_val[];
if (threadIdx.x < dim) {
shared_val[threadIdx.x] = in_vals1[blockIdx.x][threadIdx.x] *
in_vals2[blockIdx.x][threadIdx.x];
} else {
shared_val[threadIdx.x] = 0.0f;
}
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_val[threadIdx.x] += shared_val[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
vals[blockIdx.x][0] = shared_val[0];
}
}
void PDotForward(const std::vector<dtype*> &ins1,
const std::vector<dtype*> &ins2,
int count,
int dim,
std::vector<dtype*> &vals) {
NumberPointerArray in1_arr, in2_arr, val_arr;
in1_arr.init((dtype**)ins1.data(), ins1.size());
in2_arr.init((dtype**)ins2.data(), ins2.size());
val_arr.init((dtype**)vals.data(), vals.size());
int thread_count = NextTwoIntegerPowerNumber(dim);
hipLaunchKernelGGL(( KernelPDotForward), dim3(count), dim3(thread_count), thread_count * sizeof(dtype), 0, (
const dtype**)in1_arr.value, (const dtype**)in2_arr.value,
count, dim, val_arr.value);
CheckCudaError();
}
__global__ void KernelPDotBackward(const dtype **losses,
const dtype **in_vals1,
const dtype **in_vals2,
int count,
int dim,
dtype **in_losses1,
dtype **in_losses2) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
DeviceAtomicAdd(in_losses1[count_i] + dim_i,
losses[count_i][0] * in_vals2[count_i][dim_i]);
DeviceAtomicAdd(in_losses2[count_i] + dim_i,
losses[count_i][0] * in_vals1[count_i][dim_i]);
}
}
void PDotBackward(const std::vector<dtype*> &losses,
const std::vector<dtype*> &in_vals1,
const std::vector<dtype*> &in_vals2,
int count,
int dim,
std::vector<dtype*> &in_losses1,
std::vector<dtype*> &in_losses2) {
NumberPointerArray in1_loss_arr, in2_loss_arr, loss_arr, in_val1_arr,
in_val2_arr;
in1_loss_arr.init((dtype**)in_losses1.data(), in_losses1.size());
in2_loss_arr.init((dtype**)in_losses2.data(), in_losses2.size());
loss_arr.init((dtype**)losses.data(), losses.size());
in_val1_arr.init((dtype**)in_vals1.data(), in_vals1.size());
in_val2_arr.init((dtype**)in_vals2.data(), in_vals2.size());
int block_count = DefaultBlockCount(count * dim);
hipLaunchKernelGGL(( KernelPDotBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)loss_arr.value,
(const dtype**)in_val1_arr.value, (const dtype**)in_val2_arr.value,
count, dim, in1_loss_arr.value, in2_loss_arr.value);
CheckCudaError();
}
void PAddForward(const std::vector<std::vector<dtype*>> &ins, int count,
int dim,
int in_count,
std::vector<dtype*> &vals) {
std::vector<std::shared_ptr<NumberPointerArray>> gpu_addr;
gpu_addr.reserve(ins.size());
for (const std::vector<dtype*> &x : ins) {
std::shared_ptr<NumberPointerArray> arr =
std::make_shared<NumberPointerArray>();
arr->init((dtype**)x.data(), x.size());
gpu_addr.push_back(arr);
}
std::vector<dtype**> ins_gpu;
ins_gpu.reserve(ins.size());
for (auto &ptr : gpu_addr) {
ins_gpu.push_back(ptr->value);
}
NumberPointerPointerArray in_arr;
in_arr.init(ins_gpu.data(), ins_gpu.size());
NumberPointerArray out_arr;
out_arr.init(vals.data(), vals.size());
int block_count = DefaultBlockCount(count * dim);
hipLaunchKernelGGL(( KernelPAddForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype***)in_arr.value,
count, dim, in_count, out_arr.value);
CheckCudaError();
}
__global__ void KernelPAddBackward(const dtype **losses, int count, int dim,
int in_count,
dtype ***in_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
int dim_mul_count = dim * count;
for (int i = index; i < dim_mul_count * in_count; i += step) {
int in_count_i = i / dim_mul_count;
int dim_mul_count_i = i % dim_mul_count;
int count_i = dim_mul_count_i / dim;
int dim_i = dim_mul_count_i % dim;
DeviceAtomicAdd(in_losses[in_count_i][count_i] + dim_i, losses[count_i][dim_i]);
}
}
void PAddBackward(const std::vector<dtype*> &losses, int count, int dim,
int in_count,
std::vector<std::vector<dtype*>> &in_losses) {
std::vector<std::shared_ptr<NumberPointerArray>> gpu_addr;
gpu_addr.reserve(in_losses.size());
for (const std::vector<dtype*> &x : in_losses) {
std::shared_ptr<NumberPointerArray> arr =
std::make_shared<NumberPointerArray>();
arr->init((dtype**)x.data(), x.size());
gpu_addr.push_back(arr);
}
std::vector<dtype**> in_losses_gpu;
in_losses_gpu.reserve(in_losses.size());
for (auto &ptr : gpu_addr) {
in_losses_gpu.push_back(ptr->value);
}
NumberPointerPointerArray in_loss_arr;
in_loss_arr.init(in_losses_gpu.data(), in_losses_gpu.size());
NumberPointerArray out_loss_arr;
out_loss_arr.init((dtype**)losses.data(), losses.size());
int block_count = DefaultBlockCount(in_count * count * dim);
hipLaunchKernelGGL(( KernelPAddBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)out_loss_arr.value,
count, dim, in_count, in_loss_arr.value);
CheckCudaError();
}
__global__ void KernelSoftMaxLoss(const dtype **vals, dtype **losses,
int *correct_count, int *answers, int batchsize, int count, int dim) {
volatile __shared__ int opt_label;
volatile __shared__ dtype shared_val[TPB];
volatile __shared__ int64_t max_indexes[TPB];
volatile __shared__ dtype scores_sum[TPB];
volatile __shared__ dtype scores[TPB];
int dim_i = threadIdx.x;
int count_i = blockIdx.x;
if (count_i == 0 && dim_i == 0) {
*correct_count = 0;
}
shared_val[dim_i] = dim_i < dim ? vals[count_i][dim_i] : -INFINITY;
max_indexes[dim_i] = dim_i;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (shared_val[threadIdx.x + i] > shared_val[threadIdx.x]) { // race
shared_val[threadIdx.x] = shared_val[threadIdx.x + i]; // race
max_indexes[threadIdx.x] = max_indexes[threadIdx.x + i]; // race
}
__syncthreads();
}
if (threadIdx.x == 0) {
opt_label = max_indexes[0];
if (answers[count_i] == opt_label) {
atomicAdd(correct_count, 1);
}
}
__syncthreads();
dtype max_score = vals[count_i][opt_label];
dtype score = dim_i < dim ? cuda_exp(vals[count_i][dim_i] - max_score) :
0.0f;
scores[dim_i] = score;
scores_sum[dim_i] = score;
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
scores_sum[threadIdx.x] = scores_sum[threadIdx.x] +
scores_sum[threadIdx.x + i]; // race
__syncthreads();
}
if (dim_i < dim) {
losses[count_i][dim_i] = (scores[dim_i] / scores_sum[0] -
(dim_i == answers[count_i] ? 1 : 0)) / batchsize;
}
}
void SoftMaxLoss(const std::vector<dtype*> &vals, std::vector<dtype*> &losses,
int *correct_count,
const std::vector<int> &answers,
int batchsize,
int count,
int dim) {
if (dim > TPB) {
abort();
}
int thread_count = NextTwoIntegerPowerNumber(dim);
NumberPointerArray val_arr;
val_arr.init((dtype**)vals.data(), vals.size());
NumberPointerArray loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
IntArray answer_arr;
answer_arr.init((int*)answers.data(), answers.size());
hipLaunchKernelGGL(( KernelSoftMaxLoss), dim3(count), dim3(thread_count), 0, 0,
const_cast<const dtype **>(val_arr.value),
const_cast<dtype **>(loss_arr.value),
correct_count,
answer_arr.value,
batchsize,
count,
dim);
CheckCudaError();
}
__global__ void Predict(const dtype *val, int dim, int *result) {
__shared__ volatile dtype shared_vals[TPB];
__shared__ volatile dtype shared_indexes[TPB];
shared_indexes[threadIdx.x] = threadIdx.x;
if (threadIdx.x < dim) {
shared_vals[threadIdx.x] = val[threadIdx.x];
} else {
shared_vals[threadIdx.x] = -10000000.0f;
}
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (shared_vals[threadIdx.x] < shared_vals[threadIdx.x + i]) {
shared_vals[threadIdx.x] = shared_vals[threadIdx.x + i];
shared_indexes[threadIdx.x] = shared_indexes[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
*result = shared_indexes[0];
}
}
int Predict(const dtype* val, int dim) {
if (dim > TPB) {
abort();
}
int thread_count = NextTwoIntegerPowerNumber(dim);
DeviceInt result;
result.init();
hipLaunchKernelGGL(( Predict), dim3(1), dim3(thread_count), 0, 0, val, dim, result.value);
CheckCudaError();
result.copyFromDeviceToHost();
return result.v;
}
__global__ void KernelMax(const dtype *const *v, int count, int dim, dtype *block_maxes,
int *block_max_is,
int *block_counters,
int *max_indexes,
dtype *max_vals) {
__shared__ volatile dtype shared_max[TPB];
__shared__ volatile dtype shared_max_i[TPB];
__shared__ volatile bool is_last_block;
if (threadIdx.x == 0 && blockIdx.y == 0) {
block_counters[blockIdx.x] = 0;
}
if (threadIdx.x == 0) {
is_last_block = false;
}
int count_i = blockIdx.x;
int offset = blockIdx.y * blockDim.x + threadIdx.x;
shared_max[threadIdx.x] = offset < dim ? v[count_i][offset] : -INFINITY;
shared_max_i[threadIdx.x] = offset;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i && shared_max[threadIdx.x] < shared_max[threadIdx.x + i]) {
shared_max[threadIdx.x] = shared_max[threadIdx.x + i];
shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i];
}
__syncthreads();
}
int block_maxes_offset = blockIdx.x * gridDim.y + blockIdx.y;
if (threadIdx.x == 0) {
block_maxes[block_maxes_offset] = shared_max[0];
block_max_is[block_maxes_offset] = shared_max_i[0];
//if (shared_max_i[0] >= dim) {
// KernelPrintLine("dim:%d shared_max_i[0]:%d shared_max[0]:%f", dim, shared_max_i[0],
// shared_max[0]);
//}
if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) {
is_last_block = true;
}
}
__syncthreads();
if (is_last_block) {
dtype max = -INFINITY;
int max_i = 100000;
//if (threadIdx.x == 0) {
// for (int i = 0; i < gridDim.y; ++i) {
// int offset = blockIdx.x * gridDim.y + i;
// KernelPrintLine("i:%d block_maxes[%d]:%f", i, offset, block_maxes[offset]);
// }
//}
for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) {
int offset = blockIdx.x * gridDim.y + i;
if (block_maxes[offset] > max) {
max = block_maxes[offset];
max_i = block_max_is[offset];
//if (max_i >= dim) {
// KernelPrintLine("max_i:%d blockIdx.x:%d gridDim.y:%d i:%d offset:%d",
// max_i, blockIdx.x, gridDim.y, i, offset);
//}
}
}
shared_max[threadIdx.x] = max;
shared_max_i[threadIdx.x] = max_i;
//if (max_i >= dim) {
// KernelPrintLine("count_i:%d dim:%d max_i:%d", count_i, dim, max_i);
//}
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i && shared_max[threadIdx.x + i] > shared_max[threadIdx.x]) {
shared_max[threadIdx.x] = shared_max[threadIdx.x + i];
shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i];
//if (shared_max_i[threadIdx.x] >= dim) {
// KernelPrintLine("index:%d v:%f" shared_max_i[threadIdx.x],
// shared_max[threadIdx.x]);
//}
}
__syncthreads();
}
if (threadIdx.x == 0) {
max_vals[count_i] = shared_max[0];
max_indexes[count_i] = shared_max_i[0];
}
}
}
__global__ void KernelSingleMax(const dtype *const *v, int count, int dim,
int *max_indexes,
dtype *max_vals) {
for (int count_i = 0; count_i < count; ++count_i) {
dtype max_val = -INFINITY;
int max_i;
for (int dim_i = 0; dim_i < dim; ++ dim_i) {
if (v[count_i][dim_i] > max_val) {
max_val = v[count_i][dim_i];
max_i = dim_i;
}
}
max_indexes[count_i] = max_i;
max_vals[count_i] = max_val;
}
}
void Max(const dtype *const *v, int count, int dim, int *max_indexes, dtype *max_vals) {
int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB);
int block_y_count = (dim - 1 + thread_count) / thread_count;
dim3 block_dim(count, block_y_count, 1);
NumberArray block_maxes;
block_maxes.init(block_y_count * count);
IntArray block_max_is, block_counters;
block_max_is.init(block_y_count * count);
block_counters.init(count);
hipLaunchKernelGGL(( KernelMax), dim3(block_dim), dim3(thread_count), 0, 0, v, count, dim, block_maxes.value, block_max_is.value,
block_counters.value, max_indexes, max_vals);
cudaPrintfDisplay(stdout, true);
CheckCudaError();
#if TEST_CUDA
NumberArray max_val_arr;
IntArray max_indexer_arr;
max_val_arr.init(count);
max_indexer_arr.init(count);
hipLaunchKernelGGL(( KernelSingleMax), dim3(1), dim3(1), 0, 0, v, count, dim, max_indexer_arr.value, max_val_arr.value);
CheckCudaError();
vector<int> max_indexer_target(count), max_indexer_gold(count);
MyCudaMemcpy(max_indexer_target.data(), max_indexes, count * sizeof(int), hipMemcpyDeviceToHost);
MyCudaMemcpy(max_indexer_gold.data(), max_indexer_arr.value, count * sizeof(int),
hipMemcpyDeviceToHost);
for (int i = 0; i < count; ++i) {
if (max_indexer_target.at(i) != max_indexer_gold.at(i)) {
cerr << format("max_indexer_target:%1% max_indexer_gold:%2%") % max_indexer_target.at(i)
% max_indexer_gold.at(i) << endl;
PrintNums(v, i, dim);
abort();
}
}
#endif
CheckCudaError();
}
__global__ void KernelExp(const dtype *const *in, int count, int dim, const dtype *number_to_sub,
dtype *const *out) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
out[count_i][dim_i] = cuda_exp(in[count_i][dim_i] - number_to_sub[count_i]);
}
}
void Exp(const dtype *const *in, int count, int dim, const dtype *number_to_sub,
dtype *const *out) {
int block_count = DefaultBlockCount(dim * count);
hipLaunchKernelGGL(( KernelExp), dim3(block_count), dim3(TPB), 0, 0, in, count, dim, number_to_sub, out);
CheckCudaError();
}
__global__ void KernelExpForward(const dtype* const *inputs, int count, int dim,
dtype *const *results) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
results[count_i][dim_i] = cuda_exp(inputs[count_i][dim_i]);
}
}
void ExpForward(const vector<const dtype*> &inputs, int count, int dim, vector<dtype*> &results) {
NumberPointerArray input_arr, result_arr;
input_arr.init((dtype**)inputs.data(), inputs.size());
result_arr.init((dtype**)results.data(), results.size());
int block_count = DefaultBlockCount(dim * count);
hipLaunchKernelGGL(( KernelExpForward), dim3(block_count), dim3(TPB), 0, 0, input_arr.value, count, dim, result_arr.value);
CheckCudaError();
}
__global__ void KernelExpBackward(const dtype* const *losses, const dtype* const *vals,
int count,
int dim,
dtype *const *input_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
DeviceAtomicAdd(input_losses[count_i], losses[count_i][dim_i] * vals[count_i][dim_i]);
}
}
void ExpBackward(const vector<const dtype*> &losses, const vector<const dtype*> &vals, int count,
int dim,
vector<dtype*> input_losses) {
NumberPointerArray loss_arr, val_arr, input_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
val_arr.init((dtype**)vals.data(), vals.size());
input_loss_arr.init((dtype**)input_losses.data(), input_losses.size());
int block_count = DefaultBlockCount(dim * count);
hipLaunchKernelGGL(( KernelExpBackward), dim3(block_count), dim3(TPB), 0, 0, loss_arr.value, val_arr.value, count, dim,
input_loss_arr.value);
CheckCudaError();
}
__global__ void KernelSum(const dtype *const *v, int count, int dim, dtype *block_sums,
int *block_counters,
dtype *sum_vals) {
__shared__ volatile dtype shared_sum[TPB];
__shared__ volatile bool is_last_block;
if (threadIdx.x == 0 && blockIdx.y == 0) {
block_counters[blockIdx.x] = 0;
}
if (threadIdx.x == 0) {
is_last_block = false;
}
int count_i = blockIdx.x;
int offset = blockIdx.y * blockDim.x + threadIdx.x;
shared_sum[threadIdx.x] = offset < dim ? v[count_i][offset] : 0.0f;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y;
if (threadIdx.x == 0) {
block_sums[block_sums_offset] = shared_sum[0];
if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) {
is_last_block = true;
}
}
__syncthreads();
if (is_last_block) {
dtype sum = 0.0f;
for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) {
int offset = blockIdx.x * gridDim.y + i;
sum += block_sums[offset];
}
shared_sum[threadIdx.x] = sum;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
sum_vals[count_i] = shared_sum[0];
}
}
}
void Sum(const dtype *const *v, int count, int dim, dtype *sum_vals) {
int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB);
int block_y_count = (dim - 1 + thread_count) / thread_count;
dim3 block_dim(count, block_y_count, 1);
NumberArray block_sums;
block_sums.init(block_y_count * count);
IntArray block_counters;
block_counters.init(count);
hipLaunchKernelGGL(( KernelSum), dim3(block_dim), dim3(thread_count), 0, 0, v, count, dim, block_sums.value, block_counters.value,
sum_vals);
CheckCudaError();
}
__global__ void KernelSoftMaxLossByExp(const dtype *const *exps, int count, int dim,
const dtype *const *vals,
const dtype *sums,
const dtype *max_vals,
const int *answers,
dtype reverse_batchsize,
dtype **grads,
dtype *losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
dtype loss = exps[count_i][dim_i] / sums[count_i];
if (dim_i == answers[count_i]) {
loss -= 1.0f;
}
grads[count_i][dim_i] = loss * reverse_batchsize;
losses[count_i] = (cuda_log(sums[count_i]) - vals[count_i][answers[count_i]] + max_vals[count_i])
* reverse_batchsize;
}
}
void SoftMaxLossByExp(const dtype *const *exps, int count, int dim, const dtype *const *vals,
const dtype *sums,
const dtype *max_vals,
const int *answers,
dtype reverse_batchsize,
dtype **grads,
dtype *losses) {
int block_count = DefaultBlockCount(dim * count);
hipLaunchKernelGGL(( KernelSoftMaxLossByExp), dim3(block_count), dim3(TPB), 0, 0, exps, count, dim, vals, sums, max_vals, answers,
reverse_batchsize, grads, losses);
CheckCudaError();
}
std::pair<dtype, std::vector<int>> SoftMaxLoss(const std::vector<const dtype *> &vals_vector,
int count,
int dim,
const std::vector<int> &gold_answers,
int batchsize,
const std::vector<dtype *> &losses_vector) {
IntArray answer_arr, gold_answer_arr;
answer_arr.init(count);
gold_answer_arr.init((int*)gold_answers.data(), count);
NumberArray max_vals, sum_vals;
max_vals.init(count);
sum_vals.init(count);
NumberPointerArray vals, losses;
vals.init((dtype**)vals_vector.data(), count);
losses.init((dtype**)losses_vector.data(), count);
Max(vals.value, count, dim, answer_arr.value, max_vals.value);
Exp(vals.value, count, dim, max_vals.value, losses.value);
Sum(losses.value, count, dim, sum_vals.value);
NumberArray loss_arr;
loss_arr.init(count);
SoftMaxLossByExp(losses.value, count, dim, vals.value, sum_vals.value, max_vals.value,
gold_answer_arr.value, 1.0 / batchsize, losses.value, loss_arr.value);
vector<int> answers(count);
MyCudaMemcpy(answers.data(), answer_arr.value, count * sizeof(int), hipMemcpyDeviceToHost);
for (int word_id : answers) {
if (word_id < 0) {
for (int id : answers) {
cerr << id << " ";
}
cerr << endl;
abort();
}
}
vector<dtype> loss_vector(count);
MyCudaMemcpy(loss_vector.data(), loss_arr.value, count * sizeof(dtype), hipMemcpyDeviceToHost);
dtype loss_sum = accumulate(loss_vector.begin(), loss_vector.end(), 0.0f);
return std::make_pair(loss_sum, answers);
}
__global__ void KernelMaxScalarForward(const dtype *const *v, int count, int dim,
dtype *block_maxes,
int *block_max_is,
int *block_counters,
int *max_indexes,
dtype **max_vals) {
__shared__ volatile dtype shared_max[TPB];
__shared__ volatile dtype shared_max_i[TPB];
__shared__ volatile bool is_last_block;
if (threadIdx.x == 0 && blockIdx.y == 0) {
block_counters[blockIdx.x] = 0;
}
if (threadIdx.x == 0) {
is_last_block = false;
}
int count_i = blockIdx.x;
int offset = blockIdx.y * blockDim.x + threadIdx.x;
shared_max[threadIdx.x] = offset < dim ? v[count_i][offset] : -INFINITY;
shared_max_i[threadIdx.x] = offset;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i && shared_max[threadIdx.x] < shared_max[threadIdx.x + i]) {
shared_max[threadIdx.x] = shared_max[threadIdx.x + i];
shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i];
}
__syncthreads();
}
int block_maxes_offset = blockIdx.x * gridDim.y + blockIdx.y;
if (threadIdx.x == 0) {
block_maxes[block_maxes_offset] = shared_max[0];
block_max_is[block_maxes_offset] = shared_max_i[0];
if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) {
is_last_block = true;
}
}
__syncthreads();
if (is_last_block) {
dtype max = -INFINITY;
int max_i = 100000;
for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) {
int offset = blockIdx.x * gridDim.y + i;
if (block_maxes[offset] > max) {
max = block_maxes[offset];
max_i = block_max_is[offset];
}
}
shared_max[threadIdx.x] = max;
shared_max_i[threadIdx.x] = max_i;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i && shared_max[threadIdx.x + i] > shared_max[threadIdx.x]) {
shared_max[threadIdx.x] = shared_max[threadIdx.x + i];
shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
max_vals[count_i][0] = shared_max[0];
max_indexes[count_i] = shared_max_i[0];
}
}
}
void MaxScalarForward(const vector<const dtype*> &inputs, int count, int dim,
vector<dtype*> &results,
vector<int> &max_indexes) {
int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB);
int block_y_count = (dim - 1 + thread_count) / thread_count;
dim3 block_dim(count, block_y_count, 1);
NumberArray block_maxes;
block_maxes.init(block_y_count * count);
IntArray block_max_is, block_counters;
block_max_is.init(block_y_count * count);
block_counters.init(count);
NumberPointerArray input_arr;
input_arr.init((dtype**)inputs.data(), inputs.size());
NumberPointerArray result_arr;
result_arr.init((dtype**)results.data(), results.size());
IntArray max_index_arr;
max_index_arr.init(max_indexes.size());
hipLaunchKernelGGL(( KernelMaxScalarForward), dim3(block_dim), dim3(thread_count), 0, 0, input_arr.value, count, dim,
block_maxes.value, block_max_is.value, block_counters.value, max_index_arr.value,
result_arr.value);
CheckCudaError();
MyCudaMemcpy(max_indexes.data(), max_index_arr.value, count * sizeof(int),
hipMemcpyDeviceToHost);
}
__global__ void KernelMaxScalarBackward(const dtype *const *losses, const int *indexes, int count,
dtype *const *input_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count; i += step) {
DeviceAtomicAdd(input_losses[i] + indexes[i], losses[i][0]);
}
}
void MaxScalarBackward(const vector<const dtype *> &losses, const vector<int> &indexes, int count,
const vector<dtype*> &input_losses) {
int block_count = DefaultBlockCount(count);
NumberPointerArray loss_arr, input_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
input_loss_arr.init((dtype**)input_losses.data(), input_losses.size());
IntArray index_arr;
index_arr.init((int*)indexes.data(), indexes.size());
hipLaunchKernelGGL(( KernelMaxScalarBackward), dim3(block_count), dim3(TPB), 0, 0, loss_arr.value, index_arr.value, count,
input_loss_arr.value);
}
__global__ void KernelVectorSumForward(const dtype *const *v, int count, int dim,
dtype *block_sums,
int *block_counters,
dtype **results) {
__shared__ volatile dtype shared_sum[TPB];
__shared__ volatile bool is_last_block;
if (threadIdx.x == 0 && blockIdx.y == 0) {
block_counters[blockIdx.x] = 0;
}
if (threadIdx.x == 0) {
is_last_block = false;
}
int count_i = blockIdx.x;
int offset = blockIdx.y * blockDim.x + threadIdx.x;
shared_sum[threadIdx.x] = offset < dim ? v[count_i][offset] : 0.0f;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y;
if (threadIdx.x == 0) {
block_sums[block_sums_offset] = shared_sum[0];
if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) {
is_last_block = true;
}
}
__syncthreads();
if (is_last_block) {
dtype sum = 0.0f;
for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) {
int offset = blockIdx.x * gridDim.y + i;
sum += block_sums[offset];
}
shared_sum[threadIdx.x] = sum;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
results[count_i][0] = shared_sum[0];
}
}
}
void VectorSumForward(const vector<const dtype *> &inputs, int count, int dim,
vector<dtype*> &results) {
int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB);
int block_y_count = (dim - 1 + thread_count) / thread_count;
dim3 block_dim(count, block_y_count, 1);
NumberArray block_sums;
block_sums.init(block_y_count * count);
IntArray block_counters;
block_counters.init(count);
NumberPointerArray input_arr;
input_arr.init((dtype**)inputs.data(), inputs.size());
NumberPointerArray result_arr;
result_arr.init((dtype**)results.data(), results.size());
hipLaunchKernelGGL(( KernelVectorSumForward), dim3(block_dim), dim3(thread_count), 0, 0, input_arr.value, count, dim,
block_sums.value, block_counters.value, result_arr.value);
CheckCudaError();
}
__global__ void KernelVectorSumBackward(const dtype *const *losses, int count, int dim,
dtype * *const input_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
DeviceAtomicAdd(input_losses[count_i] + dim_i, losses[count_i][0]);
}
}
void VectorSumBackward(const vector<const dtype*> &losses, int count, int dim,
vector<dtype*> &input_losses) {
int block_count = DefaultBlockCount(count * dim);
NumberPointerArray loss_arr, input_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
input_loss_arr.init((dtype**)input_losses.data(), input_losses.size());
hipLaunchKernelGGL(( KernelVectorSumBackward), dim3(block_count), dim3(TPB), 0, 0, loss_arr.value, count, dim,
input_loss_arr.value);
CheckCudaError();
}
__global__ void KernelScalarToVectorForward(const dtype* const* inputs, int count, int dim,
dtype *const *results) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
results[count_i][dim_i] = inputs[count_i][0];
}
}
void ScalarToVectorForward(const vector<const dtype*> &inputs, int count, int dim,
vector<dtype*> &results) {
int block_count = DefaultBlockCount(dim * count);
NumberPointerArray input_arr;
input_arr.init((dtype**)inputs.data(), inputs.size());
NumberPointerArray result_arr;
result_arr.init((dtype**)results.data(), inputs.size());
hipLaunchKernelGGL(( KernelScalarToVectorForward), dim3(block_count), dim3(TPB), 0, 0, input_arr.value, count, dim,
result_arr.value);
CheckCudaError();
}
__global__ void KernelScalarToVectorBackward(const dtype *const *losses, int count, int dim,
dtype *block_sums,
int *block_counters,
dtype *const *input_losses) {
__shared__ volatile dtype shared_sum[TPB];
__shared__ volatile bool is_last_block;
if (threadIdx.x == 0 && blockIdx.y == 0) {
block_counters[blockIdx.x] = 0;
}
if (threadIdx.x == 0) {
is_last_block = false;
}
int count_i = blockIdx.x;
int offset = blockIdx.y * blockDim.x + threadIdx.x;
shared_sum[threadIdx.x] = offset < dim ? losses[count_i][offset] : 0.0f;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y;
if (threadIdx.x == 0) {
block_sums[block_sums_offset] = shared_sum[0];
if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) {
is_last_block = true;
}
}
__syncthreads();
if (is_last_block) {
dtype sum = 0.0f;
for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) {
int offset = blockIdx.x * gridDim.y + i;
sum += block_sums[offset];
}
shared_sum[threadIdx.x] = sum;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
DeviceAtomicAdd(input_losses[count_i], shared_sum[0]);
}
}
}
void ScalarToVectorBackward(const vector<const dtype*> &losses, int count, int dim,
vector<dtype*> &input_losses) {
int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB);
int block_y_count = (dim - 1 + thread_count) / thread_count;
dim3 block_dim(count, block_y_count, 1);
NumberArray block_sums;
block_sums.init(block_y_count * count);
IntArray block_counters;
block_counters.init(count);
NumberPointerArray loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
NumberPointerArray input_loss_arr;
input_loss_arr.init((dtype**)input_losses.data(), input_losses.size());
hipLaunchKernelGGL(( KernelScalarToVectorBackward), dim3(block_dim), dim3(thread_count), 0, 0, loss_arr.value, count, dim,
block_sums.value, block_counters.value, input_loss_arr.value);
CheckCudaError();
}
__global__ void KernelSquareSum(const dtype *v, int len, dtype *global_sum,
int *block_counter, dtype *result) {
__shared__ volatile dtype shared_sum[TPB];
__shared__ volatile bool is_last_block;
int index = DeviceDefaultIndex();
if (index == 0) {
*block_counter = 0;
}
if (threadIdx.x == 0) {
is_last_block = false;
}
shared_sum[threadIdx.x] = 0.0f;
for (int i = index; i < len; i += blockDim.x * gridDim.x) {
shared_sum[threadIdx.x] += v[i] * v[i];
}
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
global_sum[blockIdx.x] = shared_sum[0];
if (atomicAdd(block_counter, 1) == gridDim.x - 1) {
is_last_block = true;
}
}
__syncthreads();
if (is_last_block) {
dtype sum = 0.0f;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sum += global_sum[i];
}
shared_sum[threadIdx.x] = sum;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
*result = shared_sum[0];
}
}
}
dtype SquareSum(const dtype *v, int len) {
int block_count = DefaultBlockCount(len);
NumberArray global_sum;
global_sum.init(block_count);
DeviceInt block_counter;
block_counter.init();
DeviceNumber result;
result.init();
hipLaunchKernelGGL(( KernelSquareSum), dim3(block_count), dim3(TPB), 0, 0, v, len,
global_sum.value, block_counter.value, result.value);
CheckCudaError();
result.copyFromDeviceToHost();
return result.v;
}
__global__ void KernelSquareSum(const dtype *v, const bool *indexers,
int count,
int dim,
dtype *global_sum,
int *block_counter,
dtype *result) {
__shared__ volatile dtype shared_sum[TPB];
__shared__ volatile bool is_last_block;
int index = DeviceDefaultIndex();
if (index == 0) {
*block_counter = 0;
}
if (threadIdx.x == 0) {
global_sum[blockIdx.x] = 0.0f;
is_last_block = false;
}
int count_i = index / dim;
if (index < count * dim && indexers[count_i]) {
shared_sum[threadIdx.x] = v[index] * v[index];
} else {
shared_sum[threadIdx.x] = 0.0f;
}
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
global_sum[blockIdx.x] = shared_sum[0];
if (atomicAdd(block_counter, 1) == gridDim.x - 1) {
is_last_block = true;
}
}
__syncthreads();
if (is_last_block) {
float sum = 0.0f;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sum += global_sum[i];
}
shared_sum[threadIdx.x] = sum;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
*result = shared_sum[0];
}
}
}
dtype SquareSum(const dtype *v, const bool *indexers, int count, int dim) {
int block_count = DefaultBlockCountWithoutLimit(count * dim);
NumberArray global_sum;
global_sum.init(block_count);
DeviceInt block_counter;
block_counter.init();
DeviceNumber result;
result.init();
hipLaunchKernelGGL(( KernelSquareSum), dim3(block_count), dim3(TPB), 0, 0, v, indexers,
count, dim, global_sum.value, block_counter.value, result.value);
CheckCudaError();
result.copyFromDeviceToHost();
return result.v;
}
__global__ void KernelRescale(dtype *v, int len, dtype scale) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < len; i += step) {
v[i] *= scale;
}
}
void Rescale(dtype *v, int len, dtype scale) {
int block_count = DefaultBlockCount(len);
hipLaunchKernelGGL(( KernelRescale), dim3(block_count), dim3(TPB), 0, 0, v, len, scale);
CheckCudaError();
}
__global__ void KernelUpdateAdam(dtype *val, dtype *grad, int row, int col, bool is_bias,
dtype *aux_mean,
dtype *aux_square,
int iter,
dtype belta1,
dtype belta2,
dtype alpha,
dtype reg,
dtype eps,
dtype x) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
int len = row * col;
for (int i = index; i < len; i += step) {
if (!is_bias) {
grad[i] += val[i] * reg;
}
aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i];
aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] *
grad[i];
dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iter + 1)) * x;
dtype square_plus_eps = aux_square[i] + eps;
val[i] = val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps);
}
}
void UpdateAdam(dtype *val, dtype *grad, int row, int col, bool is_bias, dtype *aux_mean,
dtype *aux_square,
int iter,
dtype belta1,
dtype belta2,
dtype alpha,
dtype reg,
dtype eps) {
int block_count = DefaultBlockCount(row * col);
dtype x = 1.0f / (1 - pow(belta1, iter + 1));
hipLaunchKernelGGL(( KernelUpdateAdam), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, is_bias, aux_mean,
aux_square,
iter,
belta1,
belta2,
alpha,
reg,
eps,
x);
CheckCudaError();
}
__global__ void KernelUpdateAdamW(dtype *val, dtype *grad, int row, int col, bool is_bias,
dtype *aux_mean,
dtype *aux_square,
int iter,
dtype belta1,
dtype belta2,
dtype alpha,
dtype reg,
dtype eps,
dtype x) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
int len = row * col;
for (int i = index; i < len; i += step) {
aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i];
aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] *
grad[i];
dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iter + 1)) * x;
dtype square_plus_eps = aux_square[i] + eps;
val[i] = (1 - (is_bias? 0.0f : reg)) * val[i] - aux_mean[i] * lr_t /
cuda_sqrt(square_plus_eps);
}
}
void UpdateAdamW(dtype *val, dtype *grad, int row, int col, bool is_bias, dtype *aux_mean,
dtype *aux_square,
int iter,
dtype belta1,
dtype belta2,
dtype alpha,
dtype reg,
dtype eps) {
int block_count = DefaultBlockCount(row * col);
dtype x = 1.0f / (1 - pow(belta1, iter + 1));
hipLaunchKernelGGL(( KernelUpdateAdamW), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, is_bias, aux_mean,
aux_square,
iter,
belta1,
belta2,
alpha,
reg,
eps,
x);
CheckCudaError();
}
__global__ void KernelUpdateAdam(dtype *val, dtype *grad, int row, int col,
dtype *aux_mean,
dtype *aux_square,
const bool *indexers,
int *iters,
dtype belta1,
dtype belta2,
dtype alpha,
dtype reg,
dtype eps) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
int len = row * col;
for (int i = index; i < len; i += step) {
int count_i = i / row;
if (indexers[count_i]) {
if (row > 1 && col > 1) {
grad[i] += val[i] * reg;
}
aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i];
aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] *
grad[i];
dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2,
iters[count_i] + 1)) / (1 - cuda_pow(belta1,
iters[count_i] + 1));
dtype square_plus_eps = aux_square[i] + eps;
val[i] = val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps);
}
}
}
__global__ void KernelSelfPlusIters(const bool *indexers, int *iters,
int count) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count; i += step) {
if (indexers[i]) {
++iters[i];
}
}
}
void UpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean,
dtype *aux_square,
const bool *indexers,
int *iters,
dtype belta1,
dtype belta2,
dtype alpha,
dtype reg,
dtype eps) {
int block_count = DefaultBlockCount(row * col);
hipLaunchKernelGGL(( KernelUpdateAdam), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, aux_mean,
aux_square, indexers, iters, belta1, belta2, alpha, reg, eps);
CheckCudaError();
block_count = DefaultBlockCount(col);
hipLaunchKernelGGL(( KernelSelfPlusIters), dim3(block_count), dim3(TPB), 0, 0, indexers, iters, col);
CheckCudaError();
}
__global__ void KernelUpdateAdagrad(dtype *val, dtype *grad, int row, int col,
dtype *aux_square,
dtype alpha,
dtype reg,
dtype eps) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
int len = row * col;
for (int i = index; i < len; i += step) {
if (row > 1 && col > 1) {
grad[i] += val[i] * reg;
}
aux_square[i] = aux_square[i] + grad[i] * grad[i];
val[i] = val[i] - grad[i] * alpha / cuda_sqrt(aux_square[i] + eps);
}
}
void UpdateAdagrad(dtype *val, dtype *grad, int row, int col,
dtype *aux_square,
dtype alpha,
dtype reg,
dtype eps) {
int block_count = DefaultBlockCount(row * col);
hipLaunchKernelGGL(( KernelUpdateAdagrad), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, aux_square,
alpha, reg, eps);
CheckCudaError();
}
__global__ void KernelUpdateAdagrad(dtype *val, dtype *grad, int row, int col,
dtype *aux_square,
const bool *indexers,
dtype alpha,
dtype reg,
dtype eps) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
int len = row * col;
for (int i = index; i < len; i += step) {
int count_i = i / col;
if (indexers[count_i]) {
if (row > 1 && col > 1) {
grad[i] += val[i] * reg;
}
aux_square[i] = aux_square[i] + grad[i] * grad[i];
val[i] = val[i] - grad[i] * alpha / cuda_sqrt(aux_square[i] + eps);
}
}
}
void UpdateAdagrad(dtype *val, dtype *grad, int row, int col,
dtype *aux_square,
const bool *indexers,
dtype alpha,
dtype reg,
dtype eps) {
int block_count = DefaultBlockCount(row * col);
hipLaunchKernelGGL(( KernelUpdateAdagrad), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, aux_square,
indexers, alpha, reg, eps);
CheckCudaError();
}
void *GraphHostAlloc() {
void *m;
CallCuda(hipHostMalloc(&m, 10000000, hipHostMallocWriteCombined));
if (m == NULL) {
abort();
}
return m;
}
}
| 573523b6927477d63e2d17e22a6641fc91e0c2e9.cu | #include "N3LDG_cuda.h"
#include <array>
#include <boost/format.hpp>
#include <cstdlib>
#include <cstddef>
#include <vector>
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <cublas_v2.h>
#include "Printf_cuda.cuh"
#include "Printf_cuda.cu"
#include "Memory_cuda.h"
#include <curand.h>
#include <curand_kernel.h>
#include "cnmem.h"
#include <string>
#include <utility>
#include <cstring>
#include <cstdint>
#include <chrono>
#include <thread>
#include <numeric>
#include <memory>
#include "profiler.h"
#include "Memory_cuda.h"
#include "MyTensor-def.h"
namespace n3ldg_cuda {
using namespace std;
using boost::format;
#if USE_FLOAT
#define cuda_sqrt(x) sqrtf(x)
#define cuda_pow(x, y) powf(x, y)
#define cuda_tanh(x) tanhf(x)
#define cuda_exp(x) __expf(x)
#define cuda_log(x) logf(x)
#else
#define cuda_sqrt(x) sqrt(x)
#define cuda_pow(x, y) pow(x, y)
#define cuda_tanh(x) tanh(x)
#define cuda_exp(x) exp(x)
#define cuda_log(x) log(x)
#endif
#define KERNEL_LOG
#ifdef KERNEL_LOG
#define KernelPrintLine(format, ...)\
{\
cuPrintf("block:x=%d,y=%d thread:x=%d,y=%d "#format"\n", blockIdx.x,\
blockIdx.y, threadIdx.x, threadIdx.y,__VA_ARGS__);\
}
#else
#define KernelPrintLine(format, ...)
#endif
constexpr int TPB = 1024;
constexpr int BLOCK_COUNT = 56;
void CallCuda(cudaError_t status) {
if (status != cudaSuccess) {
cerr << "cuda error:" << cudaGetErrorString(status) << endl;
abort();
}
}
void CheckCudaError() {
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
std::cerr << "cuda error:" << cudaGetErrorName(error) << std::endl;
std::cerr << "cuda error:" << cudaGetErrorString(error) << std::endl;
abort();
}
}
void CallCnmem(cnmemStatus_t status) {
assert(status == CNMEM_STATUS_SUCCESS);
}
void CallCublas(cublasStatus_t status) {
assert(status == CUBLAS_STATUS_SUCCESS);
}
void CallCurand(curandStatus status) {
assert(status == CURAND_STATUS_SUCCESS);
}
cublasHandle_t& GetCublasHandle() {
static cublasHandle_t handle;
static bool init;
if (!init) {
init = true;
CallCublas(cublasCreate(&handle));
}
return handle;
}
cudaError_t MyCudaMemcpy(void *dest, const void *src, size_t count,
cudaMemcpyKind kind) {
cudaError_t e;
e = cudaMemcpyAsync(dest, src, count, kind);
CallCuda(e);
return e;
}
void NumberPointerArray::init(dtype **host_arr, int len) {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype*)));
CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype*),
cudaMemcpyHostToDevice));
this->len = len;
}
NumberPointerArray::~NumberPointerArray() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
int NextTwoIntegerPowerNumber(int number) {
int result = 1;
while (number > result) {
result <<= 1;
}
return result;
}
void NumberPointerPointerArray::init(dtype ***host_arr, int len) {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype**)));
CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype*),
cudaMemcpyHostToDevice));
this->len = len;
}
NumberPointerPointerArray::~NumberPointerPointerArray() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void NumberArray::init(int len) {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype)));
this->len = len;
}
void NumberArray::init(dtype *host_arr, int len) {
init(len);
CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype),
cudaMemcpyHostToDevice));
}
NumberArray::~NumberArray() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void DeviceInt::init() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, sizeof(int)));
}
void DeviceInt::copyFromDeviceToHost() {
CallCuda(MyCudaMemcpy(&v, value, sizeof(int), cudaMemcpyDeviceToHost));
}
void DeviceInt::copyFromHostToDevice() {
CallCuda(MyCudaMemcpy(value, &v, sizeof(int), cudaMemcpyHostToDevice));
}
DeviceInt::~DeviceInt() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void DeviceNumber::init() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, sizeof(int)));
}
void DeviceNumber::copyFromDeviceToHost() {
CallCuda(MyCudaMemcpy(&v, value, sizeof(dtype), cudaMemcpyDeviceToHost));
}
DeviceNumber::~DeviceNumber() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void IntPointerArray::init(int **host_arr, int len) {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int*)));
CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(int*),
cudaMemcpyHostToDevice));
this->len = len;
}
IntPointerArray::~IntPointerArray() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void IntArray::init(int *host_arr, int len) {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int)));
CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(int),
cudaMemcpyHostToDevice));
this->len = len;
}
void IntArray::init(int len) {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int)));
this->len = len;
}
IntArray::~IntArray() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void BoolArray::init(bool *host_arr, int len) {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
value = NULL;
}
CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(bool)));
CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(bool),
cudaMemcpyHostToDevice));
this->len = len;
}
void BoolArray::copyFromHost(bool *host_arr) {
CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(bool),
cudaMemcpyHostToDevice));
}
void BoolArray::copyToHost(bool *host_arr) {
CallCuda(MyCudaMemcpy(host_arr, value, len * sizeof(bool),
cudaMemcpyDeviceToHost));
}
BoolArray::~BoolArray() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void Tensor1D::init(int dim) {
initOnDevice(dim);
#if TEST_CUDA
v = new dtype[dim];
zero();
#endif
}
void Tensor1D::initOnMemoryAndDevice(int dim) {
initOnDevice(dim);
v = new dtype[dim];
zero();
}
void Tensor1D::initOnDevice(int dim) {
CallCuda(MemoryPool::Ins().Malloc((void**)&value, dim * sizeof(dtype)));
this->dim = dim;
}
Tensor1D::Tensor1D(const Tensor1D &t) {
dim = t.dim;
memcpy(v, t.v, dim *sizeof(dtype));
CallCuda(MyCudaMemcpy(value, t.value, dim * sizeof(dtype), cudaMemcpyDeviceToDevice));
}
Tensor1D::~Tensor1D() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void Tensor1D::print() const {
cout << "dim:" << dim << endl;
PrintNums(value, dim);
}
void Tensor1D::copyFromHostToDevice() {
assert(v != NULL);
assert(value != NULL);
CallCuda(MyCudaMemcpy(value, v, dim * sizeof(dtype), cudaMemcpyHostToDevice));
}
void Tensor1D::copyFromDeviceToHost() {
CallCuda(MyCudaMemcpy(v, value, dim * sizeof(dtype), cudaMemcpyDeviceToHost));
}
void Tensor2D::initOnMemoryAndDevice(int row, int col) {
initOnDevice(row, col);
v = new dtype[row * col];
zero();
}
void Tensor2D::init(int row, int col) {
initOnDevice(row, col);
#if TEST_CUDA
v = new dtype[row * col];
zero();
#endif
}
void Tensor2D::initOnDevice(int row, int col) {
CallCuda(MemoryPool::Ins().Malloc((void**)&value,
row * col * sizeof(dtype)));
this->row = row;
this->col = col;
this->size = row * col;
}
Tensor2D::Tensor2D(const Tensor2D &t) {
row = t.row;
col = t.col;
memcpy(v, t.v, sizeof(dtype) * row * col);
CallCuda(MyCudaMemcpy(value, t.value, sizeof(dtype) * row * col,
cudaMemcpyDeviceToDevice));
}
Tensor2D::~Tensor2D() {
if (value != NULL) {
CallCuda(MemoryPool::Ins().Free(value));
}
}
void Tensor2D::print() const {
cout << "row:" << row << " col:" << col << endl;
PrintNums(value, size);
}
void Tensor2D::copyFromHostToDevice() {
CallCuda(MyCudaMemcpy(value, v, size * sizeof(dtype), cudaMemcpyHostToDevice));
}
void Tensor2D::copyFromDeviceToHost() {
CallCuda(MyCudaMemcpy(v, value, size * sizeof(dtype), cudaMemcpyDeviceToHost));
}
void Assert(bool v, const std::string &message, const function<void(void)> &call) {
#if TEST_CUDA
if (!v) {
std::cerr << message << std::endl;
call();
abort();
}
#endif
}
__device__ void DeviceAtomicAdd(dtype* address, dtype value) {
float old = value;
float new_old;
do {
new_old = atomicExch(address, 0.0);
new_old += old;
} while ((old = atomicExch(address, new_old))!=0.0);
};
__device__ dtype cuda_dtanh(dtype y) {
return 1.0f - y * y;
}
__device__ dtype cuda_sigmoid(dtype x) {
return 1.0f / (1.0f + cuda_exp(-x));
}
__device__ dtype cuda_dsigmoid(dtype y) {
return y * (1.0f - y);
}
__device__ dtype cuda_relu(dtype x) {
return x > 0.0f ? x : 0.0f;
}
__device__ dtype cuda_drelu(dtype x) {
return x > 0.0f ? 1 : 0.0f;
}
__device__ dtype cuda_leaky_relu(dtype x) {
return x > 0.0f ? x : -0.1f * x;
}
__device__ dtype cuda_dleaky_relu(dtype x) {
return x > 0.0f ? 1.0f : -0.1f;
}
const dtype SELU_LAMBDA = 1.0507009873554804934193349852946;
const dtype SELU_ALPHA = 1.6732632423543772848170429916717;
__device__ dtype cuda_selu(dtype x) {
return x <= 0.0f ? SELU_LAMBDA * SELU_ALPHA * (cuda_exp(x) - 1.0f) :
SELU_LAMBDA * x;
}
__device__ dtype cuda_dselu(dtype x, dtype y) {
return x <= 0.0f ? SELU_LAMBDA * SELU_ALPHA + y : SELU_LAMBDA;
}
void Random(dtype *v, int len, dtype bound) {
dtype *mem = (dtype*)malloc(len * sizeof(dtype));
assert(mem != NULL);
dtype min = -bound, max = bound;
for (int i = 0; i < len; i++) {
mem[i] = (dtype(rand()) / RAND_MAX) * (max - min) + min;
}
CallCuda(MyCudaMemcpy(v, mem, len * sizeof(dtype), cudaMemcpyHostToDevice));
free(mem);
}
__device__ int DeviceDefaultIndex() {
return blockIdx.x * blockDim.x + threadIdx.x;
}
__device__ int DeviceDefaultStep() {
return gridDim.x * blockDim.x;
}
__device__ dtype DeviceAbs(dtype d) {
return d > 0 ? d : -d;
}
int DefaultBlockCount(int len) {
int block_count = (len - 1 + TPB) /
TPB;
return std::min(block_count, BLOCK_COUNT);
}
int DefaultBlockCountWithoutLimit(int len) {
return (len - 1 + TPB) / TPB;
}
__global__ void KernelZero(dtype *v, int len) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= len) {
return;
}
v[index] = 0;
}
void Zero(dtype *v, int len) {
int block_count = (len - 1 + TPB) /
TPB;
KernelZero<<<block_count, TPB>>>(v, len);
CheckCudaError();
}
__global__ void PrintPointers(void **p, int len) {
for (int i = 0; i < len; ++i) {
printf("%p\n", p[i]);
}
}
__global__ void KernelPrintNums(const dtype* p, int len) {
for (int i = 0; i < len; ++i) {
printf("%d %f\n", i, p[i]);
}
}
void PrintNums(const dtype* p, int len) {
KernelPrintNums<<<1, 1>>>(p, len);
cudaDeviceSynchronize();
CheckCudaError();
}
__global__ void KernelPrintNums(const dtype *const *p, int index, int len) {
for (int i = 0; i < len; ++i) {
printf("%d %f\n", i, p[index][i]);
}
}
void PrintNums(const dtype *const *p, int count_i, int len) {
KernelPrintNums<<<1, 1>>>(p, count_i, len);
cudaDeviceSynchronize();
CheckCudaError();
}
__global__ void KernelPrintInts(const int* p, int len) {
for (int i = 0; i < len; ++i) {
printf("%d\n", p[i]);
}
}
void PrintInts(const int* p, int len) {
KernelPrintInts<<<1, 1>>>(p, len);
cudaDeviceSynchronize();
CheckCudaError();
}
void InitCuda(int device_id, float memory_in_gb) {
std::cout << "device_id:" << device_id << std::endl;
CallCuda(cudaSetDeviceFlags(cudaDeviceMapHost));
#if DEVICE_MEMORY == 0
cnmemDevice_t device;
device.size = 10000000000;
device.device = device_id;
cnmemInit(1, &device, CNMEM_FLAGS_DEFAULT);
#else
CallCuda(cudaSetDevice(device_id));
#endif
CallCuda(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
CallCuda(cudaPrintfInit());
MemoryPool::Ins().Init(memory_in_gb);
}
void EndCuda() {
cudaPrintfEnd();
Profiler::Ins().Print();
}
__global__ void KernelCopyFromOneVectorToMultiVectors(const dtype *src,
dtype **dest, int count, int len) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * len; i += step) {
int count_i = i / len;
int len_i = i % len;
dest[count_i][len_i] = src[i];
}
}
void CopyFromOneVectorToMultiVals(const dtype *src, std::vector<dtype*> &vals,
int count,
int len) {
NumberPointerArray val_arr;
val_arr.init((dtype**)vals.data(), vals.size());
int block_count = (len * count - 1 + TPB) / TPB;
block_count = std::min(block_count, BLOCK_COUNT);
KernelCopyFromOneVectorToMultiVectors<<<block_count, TPB>>>(src,
val_arr.value, count, len);
CheckCudaError();
}
void CopyFromHostToDevice(const std::vector<dtype*> &src,
std::vector<dtype*> &dest, int count, int dim) {
dtype *long_src = (dtype*)malloc(count * dim * sizeof(dtype));
if (long_src == NULL) {
std::cerr << "out of memory!" << std::endl;
abort();
}
for (int i = 0; i < count; ++i) {
memcpy(long_src + i * dim, src.at(i), dim * sizeof(dtype));
}
dtype *long_dest = NULL;
CallCuda(MemoryPool::Ins().Malloc((void**)&long_dest,
count * dim * sizeof(dtype*)));
CallCuda(cudaMemcpy(long_dest, long_src, count * dim * sizeof(dtype*),
cudaMemcpyHostToDevice));
CopyFromOneVectorToMultiVals(long_dest, dest, count, dim);
free(long_src);
CallCuda(MemoryPool::Ins().Free(long_dest));
}
__global__ void KernelCopyFromMultiVectorsToOneVector(const dtype **src, dtype *dest, int count,
int len) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * len; i += step) {
int count_i = i / len;
int len_i = i % len;
dest[i] = src[count_i][len_i];
}
}
void CopyFromMultiVectorsToOneVector(const std::vector<dtype*> &src,
dtype *dest,
int count,
int len) {
NumberPointerArray src_arr;
src_arr.init((dtype**)src.data(), src.size());
int block_count = DefaultBlockCount(len * count);
KernelCopyFromMultiVectorsToOneVector<<<block_count, TPB>>>(
(const dtype**)src_arr.value, dest, count, len);
CheckCudaError();
}
void CopyFromDeviceToHost(const std::vector<dtype*> &src,
std::vector<dtype*> &dest, int count, int dim) {
dtype *long_src = NULL;
CallCuda(MemoryPool::Ins().Malloc((void**)&long_src,
count * dim * sizeof(dtype*)));
CopyFromMultiVectorsToOneVector(src, long_src, count, dim);
dtype *long_dest = (dtype*)malloc(count * dim * sizeof(dtype));
if (long_dest == NULL) {
std::cerr << "out of memory!" << std::endl;
abort();
}
CallCuda(cudaMemcpy(long_dest, long_src, count * dim * sizeof(dtype),
cudaMemcpyDeviceToHost));
for (int i = 0; i < count; ++i) {
memcpy(dest.at(i), long_dest + i * dim, dim * sizeof(dtype));
}
CallCuda(MemoryPool::Ins().Free(long_src));
free(long_dest);
}
__global__ void KernelActivated(ActivatedEnum activated, const dtype *src,
dtype**dest,
dtype* dest2,
int count,
int len) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for (int i = index; i < len * count; i += step) {
int count_i = i / len;
int len_i = i % len;
dtype result;
if (activated == ActivatedEnum::TANH) {
result = cuda_tanh(src[i]);
} else if (activated == ActivatedEnum::SIGMOID) {
result = cuda_sigmoid(src[i]);
} else if (activated == ActivatedEnum::RELU) {
result = cuda_relu(src[i]);
} else if (activated == ActivatedEnum::LEAKY_RELU) {
result = cuda_leaky_relu(src[i]);
} else if (activated == ActivatedEnum::SELU) {
result = cuda_selu(src[i]);
} else {
printf("KernelActivated error\n");
return;
}
dest[count_i][len_i] = result;
dest2[i] = result;
}
}
void Activated(ActivatedEnum activated, const dtype *src,
const std::vector<dtype*>& dest,
dtype *dest2,
int len) {
int count = dest.size();
NumberPointerArray dest_arr;
dest_arr.init((dtype**)dest.data(), dest.size());
int block_count = std::min((len * count - 1 + TPB) / TPB, BLOCK_COUNT);
KernelActivated<<<block_count, TPB>>>(activated, src, dest_arr.value, dest2, count, len);
CheckCudaError();
}
__global__ void KernelTanhForward(ActivatedEnum activated, const dtype** xs,
int count,
int dim,
dtype**ys) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
if (activated == ActivatedEnum::TANH) {
ys[count_i][dim_i] = cuda_tanh(xs[count_i][dim_i]);
} else if (activated == ActivatedEnum::SIGMOID) {
ys[count_i][dim_i] = cuda_sigmoid(xs[count_i][dim_i]);
} else {
printf("error\n");
}
}
}
void TanhForward(ActivatedEnum activated, const std::vector<dtype*> &xs,
int count,
int dim,
std::vector<dtype*> &ys) {
NumberPointerArray x_arr, y_arr;
x_arr.init((dtype**)xs.data(), xs.size());
y_arr.init((dtype**)ys.data(), ys.size());
int block_count = DefaultBlockCount(count * dim);
KernelTanhForward<<<block_count, TPB>>>(activated,
(const dtype**)x_arr.value, count, dim, y_arr.value);
CheckCudaError();
}
__global__ void KernelTanhBackward(ActivatedEnum activated,
const dtype **losses,
const dtype **vals,
int count,
int dim,
dtype** in_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
dtype v;
if (activated == ActivatedEnum::TANH) {
v = losses[count_i][dim_i] * (1 - vals[count_i][dim_i] *
vals[count_i][dim_i]);
} else if (activated == ActivatedEnum::SIGMOID) {
v = losses[count_i][dim_i] * (1 - vals[count_i][dim_i]) *
vals[count_i][dim_i];
}
DeviceAtomicAdd(in_losses[count_i] + dim_i, v);
}
}
void TanhBackward(ActivatedEnum activated, const std::vector<dtype*> &losses,
const std::vector<dtype*> &vals,
int count,
int dim,
std::vector<dtype*> &in_losses) {
NumberPointerArray loss_arr, val_arr, in_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
val_arr.init((dtype**)vals.data(), vals.size());
in_loss_arr.init((dtype**)in_losses.data(), in_losses.size());
int block_count = DefaultBlockCount(count * dim);
KernelTanhBackward<<<block_count, TPB>>>(activated ,(const dtype**)loss_arr.value,
(const dtype**)val_arr.value, count, dim, in_loss_arr.value);
CheckCudaError();
}
__global__ void KernelDropoutForward(const dtype** xs, int count, int dim,
bool is_training,
const dtype* drop_mask,
dtype drop_factor,
dtype**ys) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
if (is_training) {
if (drop_mask[i] < drop_factor) {
ys[count_i][dim_i] = 0.0f;
} else {
ys[count_i][dim_i] = xs[count_i][dim_i];
}
} else {
ys[count_i][dim_i] = (1 - drop_factor) * xs[count_i][dim_i];
}
}
}
void DropoutForward(const std::vector<dtype*> &xs, int count, int dim,
bool is_training,
const dtype *drop_mask,
dtype drop_factor,
std::vector<dtype*> &ys) {
if (drop_factor < 0 || drop_factor >= 1.0f) {
std::cerr << "drop value is " << drop_factor << std::endl;
abort();
}
NumberPointerArray x_arr, y_arr;
x_arr.init((dtype**)xs.data(), xs.size());
y_arr.init((dtype**)ys.data(), ys.size());
int block_count = DefaultBlockCount(count * dim);
KernelDropoutForward<<<block_count, TPB>>>((const dtype**)x_arr.value,
count, dim, is_training, drop_mask, drop_factor, y_arr.value);
CheckCudaError();
}
__global__ void KernelDropoutBackward(const dtype **losses, const dtype **vals,
int count,
int dim,
bool is_training,
const dtype* drop_mask,
dtype drop_factor,
dtype** in_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
if (is_training) {
if (drop_mask[i] >= drop_factor) {
DeviceAtomicAdd(in_losses[count_i] + dim_i, losses[count_i][dim_i]);
}
} else {
DeviceAtomicAdd(in_losses[count_i] + dim_i,
(1 - drop_factor) * losses[count_i][dim_i]);
}
}
}
void DropoutBackward(const std::vector<dtype*> &losses,
const std::vector<dtype*> &vals,
int count,
int dim,
bool is_training,
const dtype *drop_mask,
dtype drop_factor,
std::vector<dtype*> &in_losses) {
if (drop_factor < 0 || drop_factor >= 1) {
std::cerr << "drop value is " << drop_factor << std::endl;
abort();
}
NumberPointerArray loss_arr, val_arr, in_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
val_arr.init((dtype**)vals.data(), vals.size());
in_loss_arr.init((dtype**)in_losses.data(), in_losses.size());
int block_count = DefaultBlockCount(count * dim);
KernelDropoutBackward<<<block_count, TPB>>>((const dtype**)loss_arr.value,
(const dtype**)val_arr.value, count, dim, is_training, drop_mask, drop_factor,
in_loss_arr.value);
CheckCudaError();
}
__global__ void KernelBucketForward(const dtype *input, int count, int dim, dtype **ys) {
int index = DeviceDefaultIndex();
for (int i = index; i < count * dim; i+= DeviceDefaultStep()) {
int count_i = i / dim;
int dim_i = i % dim;
ys[count_i][dim_i] = input[count_i * dim + dim_i];
}
}
void BucketForward(const std::vector<dtype> input, int count, int dim, std::vector<dtype*> &ys) {
NumberArray input_arr;
NumberPointerArray ys_arr;
input_arr.init((dtype*)input.data(), input.size());
ys_arr.init((dtype**)ys.data(), ys.size());
int block_count = DefaultBlockCount(count * dim);
KernelBucketForward<<<block_count, TPB>>>((const dtype*)input_arr.value, count, dim,
ys_arr.value);
CheckCudaError();
}
__global__ void KernelCopyForUniNodeForward(const dtype** xs, const dtype* b,
dtype* xs_dest,
dtype* b_dest,
int count,
int x_len,
int b_len,
bool use_b) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int step = gridDim.x * blockDim.x;
int x_total_len = count * x_len;
int b_total_len = count * b_len;
for (int i = index; i < x_total_len + b_total_len; i += step) {
if (i < x_total_len) {
int count_i = i / x_len;
int len_i = i % x_len;
xs_dest[i] = xs[count_i][len_i];
} else if (use_b) {
int b_i = i - x_total_len;
int len_i = b_i % b_len;
b_dest[b_i] = b[len_i];
}
}
}
void CopyForUniNodeForward(const std::vector<dtype*> &xs, const dtype* b,
dtype* xs_dest,
dtype* b_dest,
int count,
int x_len,
int b_len,
bool use_b) {
NumberPointerArray x_arr;
x_arr.init((dtype**)xs.data(), xs.size());
int len = x_len + b_len;
int block_count = std::min((count * len - 1 + TPB) / TPB, 56);
KernelCopyForUniNodeForward<<<block_count, TPB>>>(
(const dtype**)x_arr.value, (const dtype*)b, xs_dest, b_dest,
count, x_len, b_len, use_b);
CheckCudaError();
}
__global__ void KernelCopyForBiNodeForward(const dtype **x1s,
const dtype **x2s,
const dtype *b,
dtype *x1s_dest,
dtype *x2s_dest,
dtype *b_dest,
int count,
int x1_len,
int x2_len,
bool use_b,
int b_len) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
int x1_total_len = count * x1_len;
int x2_total_len = count * x2_len;
int b_total_len = use_b ? count * b_len : 0;
int total_len = x1_total_len + x2_total_len + b_total_len;
for (int i = index; i < total_len; i += step) {
if (i < x2_total_len) {
int len_i = i % x2_len;
int count_i = i / x2_len;
x2s_dest[i] = x2s[count_i][len_i];
} else if (i >= x2_total_len && i < x1_total_len + x2_total_len) {
int len_i = (i - x2_total_len) % x1_len;
int count_i = (i - x2_total_len) / x1_len;
x1s_dest[i - x2_total_len] = x1s[count_i][len_i];
} else {
int b_i = (i - x1_total_len - x2_total_len);
int len_i = b_i % b_len;
b_dest[b_i] = b[len_i];
}
}
}
void CopyForBiNodeForward(const std::vector<dtype*>& x1s,
const std::vector<dtype *>& x2s,
const dtype *b,
dtype *x1s_dest,
dtype *x2s_dest,
dtype *b_dest,
int count,
int x1_len,
int x2_len,
bool use_b,
int b_len) {
int len = x1_len + x2_len + b_len;
int block_count = DefaultBlockCount(count * len);
NumberPointerArray x1_arr, x2_arr;
x1_arr.init((dtype**)x1s.data(), x1s.size());
x2_arr.init((dtype**)x2s.data(), x2s.size());
KernelCopyForBiNodeForward<<<block_count, TPB>>>(
(const dtype**)x1_arr.value,
(const dtype**)x2_arr.value,
b,
x1s_dest,
x2s_dest,
b_dest,
count,
x1_len,
x2_len,
use_b,
b_len);
CheckCudaError();
}
void MatrixMultiplyMatrix(dtype *W, dtype *x, dtype *y, int row, int col,
int count, bool useb, bool should_x_transpose,
bool should_W_transpose) {
cublasHandle_t &handle = GetCublasHandle();
dtype alpha = 1;
dtype beta = useb? 1 : 0;
cublasOperation_t x_op = should_x_transpose ? CUBLAS_OP_T : CUBLAS_OP_N;
int ldx = should_x_transpose ? count : col;
cublasOperation_t W_op = should_W_transpose ? CUBLAS_OP_T : CUBLAS_OP_N;
int ldw = should_W_transpose ? col : row;
#if USE_FLOAT
CallCublas(cublasSgemm(handle, W_op, x_op, row, count, col,
&alpha, W, ldw, x, ldx, &beta, y, row));
#else
CallCublas(cublasDgemm(handle, W_op, x_op, row, count, col,
&alpha, W, ldw, x, ldx, &beta, y, row));
#endif
}
__global__ void KernelVerify(dtype *host, dtype *device, int len,
const char *message, bool *success) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < len; i += step) {
dtype loss = host[index] - device[index];
if (DeviceAbs(loss) > 0.001 && DeviceAbs(loss) > 0.001 * DeviceAbs(host[index])) {
*success = false;
KernelPrintLine("KernelVerify: host:%f device:%f loss:%f",
host[index],
device[index],
loss);
}
}
}
bool Verify(dtype *host, dtype *device, int len, const char* message) {
NumberArray arr;
arr.init(host, len);
int block_count = DefaultBlockCount(len);
char *m = NULL;
CallCuda(MemoryPool::Ins().Malloc((void**)&m,
(strlen(message) + 1) * sizeof(char)));
CallCuda(MyCudaMemcpy(m, message,
(strlen(message) + 1) * sizeof(char), cudaMemcpyHostToDevice));
bool success = true;
bool *dev_success = NULL;
CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, 8 * sizeof(bool)));
CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool),
cudaMemcpyHostToDevice));
KernelVerify<<<block_count, TPB>>>(arr.value, device, len, m, dev_success);
CheckCudaError();
CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool),
cudaMemcpyDeviceToHost));
MemoryPool::Ins().Free(dev_success);
MemoryPool::Ins().Free(m);
cudaDeviceSynchronize();
cudaPrintfDisplay(stdout, true);
if (!success) {
cout << message << endl;
}
return success;
}
__global__ void KernelVerify(bool *host, bool *device, int len,
const char *message, bool *success) {
int index = DeviceDefaultIndex();
if (index < len) {
if (host[index] != device[index]) {
*success = false;
printf("KernelVerify %s: host:%d device:%d \n", message,
host[index],
device[index]);
KernelPrintLine("KernelVerify: host:%d device:%d", host[index],
device[index]);
}
}
}
bool Verify(bool *host, bool *device, int len, const char* message) {
BoolArray arr;
arr.init(host, len);
int block_count = (len + TPB - 1) / TPB;
char *m = NULL;
CallCuda(MemoryPool::Ins().Malloc((void**)&m,
(strlen(message) + 1) * sizeof(char)));
CallCuda(MyCudaMemcpy(m, message,
(strlen(message) + 1) * sizeof(char), cudaMemcpyHostToDevice));
bool success = true;
bool *dev_success = NULL;
CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, 8 * sizeof(bool)));
CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool),
cudaMemcpyHostToDevice));
KernelVerify<<<block_count, TPB>>>(arr.value, device, len, m, dev_success);
CheckCudaError();
CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool),
cudaMemcpyDeviceToHost));
MemoryPool::Ins().Free(dev_success);
MemoryPool::Ins().Free(m);
cudaDeviceSynchronize();
cudaPrintfDisplay(stdout, true);
return success;
}
__global__ void KernelVerify(int *host, int *device, int len,
const char *message, bool *success) {
int index = DeviceDefaultIndex();
if (index < len) {
if (host[index] != device[index]) {
*success = false;
printf("KernelVerify %s: host:%d device:%d \n", message,
host[index],
device[index]);
KernelPrintLine("KernelVerify: host:%d device:%d", host[index],
device[index]);
}
}
}
bool Verify(int *host, int *device, int len, const char* message) {
IntArray arr;
arr.init(host, len);
int block_count = (len + TPB - 1) / TPB;
char *m = NULL;
CallCuda(MemoryPool::Ins().Malloc((void**)&m,
(strlen(message) + 1) * sizeof(char)));
CallCuda(MyCudaMemcpy(m, message,
(strlen(message) + 1) * sizeof(char), cudaMemcpyHostToDevice));
bool success = true;
bool *dev_success = NULL;
CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, sizeof(bool)));
CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool),
cudaMemcpyHostToDevice));
KernelVerify<<<block_count, TPB>>>(arr.value, device, len, m, dev_success);
CheckCudaError();
CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool),
cudaMemcpyDeviceToHost));
MemoryPool::Ins().Free(dev_success);
MemoryPool::Ins().Free(m);
cudaDeviceSynchronize();
cudaPrintfDisplay(stdout, true);
return success;
}
constexpr int MAX_BLOCK_POWER = 100;
MemoryPool& MemoryPool::Ins() {
static MemoryPool *p;
if (p == NULL) {
p = new MemoryPool;
p->free_blocks_.resize(MAX_BLOCK_POWER + 1);
p->busy_blocks_.reserve(10000);
}
return *p;
}
void appendFreeBlock(const MemoryBlock &memory_block,
vector<map<void*, MemoryBlock>> &free_blocks,
int i,
const unordered_map<void*, MemoryBlock> &busy_blocks) {
if (memory_block.size != (1 << i)) {
cerr << boost::format("incorrect block size %1%, but i is %2%") % memory_block.size % i <<
endl;
abort();
}
free_blocks.at(i).insert(make_pair(memory_block.p, memory_block));
}
cudaError_t MemoryPool::Malloc(void **p, int size) {
assert(*p == NULL);
Profiler &profiler = Profiler::Ins();
profiler.BeginEvent("Malloc");
#if DEVICE_MEMORY == 0
CallCnmem(cnmemMalloc(p, size, NULL));
profiler.EndEvent();
return cudaSuccess;
#elif DEVICE_MEMORY == 1
cudaError_t r = cudaMalloc(p, size);
profiler.EndEvent();
return r;
#else
int fit_size = 1;
int n = 0;
while (fit_size < size) {
fit_size <<= 1;
++n;
}
cudaError_t status = cudaErrorMemoryAllocation;
while (status != cudaSuccess) {
if (free_blocks_.at(n).empty()) {
int higher_power = n + 1;
while (higher_power <= MAX_BLOCK_POWER && free_blocks_.at(higher_power).empty()) {
++higher_power;
}
if (higher_power > MAX_BLOCK_POWER) {
while (status != cudaSuccess) {
status = cudaMalloc(p, fit_size);
}
CallCuda(status);
MemoryBlock block(*p, fit_size);
busy_blocks_.insert(std::make_pair(*p, block));
} else {
auto &v = free_blocks_.at(higher_power);
MemoryBlock &to_split = v.rbegin()->second;
int half_size = to_split.size >> 1;
void *half_address = static_cast<void*>(static_cast<char*>(to_split.p) +
half_size);
MemoryBlock low_block(to_split.p, half_size, to_split.buddy),
high_block(half_address, half_size, to_split.p);
v.erase(v.rbegin()->first);
appendFreeBlock(low_block, free_blocks_, higher_power - 1, busy_blocks_);
appendFreeBlock(high_block, free_blocks_, higher_power - 1, busy_blocks_);
}
} else {
status = cudaSuccess;
int this_size = free_blocks_.at(n).size();
MemoryBlock &block = free_blocks_.at(n).rbegin()->second;
*p = block.p;
busy_blocks_.insert(std::make_pair(block.p, block));
free_blocks_.at(n).erase(free_blocks_.at(n).rbegin()->first);
}
}
profiler.EndEvent();
return status;
#endif
}
std::pair<const MemoryBlock *, const MemoryBlock *> lowerAndhigherBlocks(const MemoryBlock &a,
const MemoryBlock &b) {
if (a.size != b.size) {
cerr << "a.size is not equal to b.size" << endl;
abort();
}
int distance = static_cast<char*>(a.p) - static_cast<char*>(b.p);
if (distance == 0) {
cerr << "block a and b has the same address" << endl;
abort();
}
const MemoryBlock &low = distance > 0 ? b : a;
const MemoryBlock &high = distance > 0 ? a : b;
return std::make_pair(&low, &high);
}
bool isBuddies(const MemoryBlock &a, const MemoryBlock &b) {
if (a.size != b.size) {
return false;
}
auto pair = lowerAndhigherBlocks(a, b);
return pair.second->buddy == pair.first->p &&
((char*)pair.second->p - (char*)pair.first->p) == a.size;
}
MemoryBlock mergeBlocks(const MemoryBlock &a, const MemoryBlock &b) {
if (a.size != b.size) {
cerr << "sizes of memory blocks to merge not equal" << endl;
abort();
}
auto pair = lowerAndhigherBlocks(a, b);
if ((char*)pair.second->p - (char*)pair.first->p != a.size ||
(a.p != b.buddy && a.buddy != b.p)) {
cerr << "a and b are not buddies" << endl;
cerr << boost::format("a:%1%\nb:%2%") % a.toString() % b.toString() << endl;
abort();
}
MemoryBlock block(pair.first->p, pair.first->size << 1, pair.first->buddy);
return block;
}
void returnFreeBlock(const MemoryBlock &block, vector<map<void*, MemoryBlock>> &free_blocks,
int power,
const unordered_map<void*, MemoryBlock> &busy_blocks) {
Profiler &profiler = Profiler::Ins();
profiler.BeginEvent("returnFreeBlock");
MemoryBlock current_block = block;
for (int i = power; i <= MAX_BLOCK_POWER; ++i) {
map<void*, MemoryBlock> &v = free_blocks.at(i);
void *free_p = (char*)current_block.p - (char*)current_block.buddy == current_block.size ?
current_block.buddy : (void*)((char*)current_block.p + current_block.size);
auto it = v.find(free_p);
if (it == v.end() || (it->second.p != current_block.buddy &&
it->second.buddy != current_block.p)) {
appendFreeBlock(current_block, free_blocks, i, busy_blocks);
break;
} else {
MemoryBlock merged_block = mergeBlocks(it->second, current_block);
current_block = merged_block;
v.erase(it);
}
}
profiler.EndEvent();
}
cudaError_t MemoryPool::Free(void *p) {
Profiler &profiler = Profiler::Ins();
profiler.BeginEvent("Free");
#if DEVICE_MEMORY == 0
CallCnmem(cnmemFree(p, NULL));
profiler.EndEvent();
#elif DEVICE_MEMORY == 1
cudaError_t r = cudaFree(p);
profiler.EndEvent();
return r;
#else
auto it = busy_blocks_.find(p);
if (it == busy_blocks_.end()) {
cerr << "cannot find busy block " << p << endl;
abort();
}
int size = it->second.size;
int n = 0;
while (size > 1) {
size >>= 1;
++n;
}
if (it->second.size != (1 << n)) {
cerr << boost::format("size:%1% n:%2%") % it->second.size % n << endl;
abort();
}
auto block = it->second;
busy_blocks_.erase(it);
returnFreeBlock(block, free_blocks_, n, busy_blocks_);
it = busy_blocks_.find(p);
if (it != busy_blocks_.end()) {
cerr << "can find erased block " << p << endl;
abort();
}
profiler.EndEvent();
if (busy_blocks_.find(p) != busy_blocks_.end()) {
cerr << boost::format("Malloc - find freed p in busy blocks") << endl;
}
return cudaSuccess;
#endif
}
void Profiler::EndCudaEvent() {
//cudaDeviceSynchronize();
EndEvent();
}
__global__ void KernelCalculateLtyForUniBackward(ActivatedEnum activated,
const dtype *const*ly,
const dtype *ty,
const dtype *y,
dtype *lty,
int count,
int dim) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
int len = count * dim;
for (int i = index; i < len; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
dtype yi = y[i];
dtype lyv = ly[count_i][dim_i];
if (activated == ActivatedEnum::TANH) {
lty[i] = lyv * cuda_dtanh(yi);
} else if (activated == ActivatedEnum::SIGMOID) {
lty[i] = lyv * cuda_dsigmoid(yi);
} else if (activated == ActivatedEnum::RELU) {
lty[i] = lyv * cuda_drelu(ty[i]);
} else if (activated == ActivatedEnum::LEAKY_RELU) {
lty[i] = lyv * cuda_dleaky_relu(ty[i]);
} else if (activated == ActivatedEnum::SELU) {
lty[i] = lyv * cuda_dselu(ty[i], yi);
} else {
printf("KernelCalculateLtyForUniBackward error\n");
}
}
}
void CalculateLtyForUniBackward(ActivatedEnum activated,
const std::vector<dtype*> &ly,
const dtype *ty,
const dtype *y,
dtype *lty,
int count,
int dim) {
NumberPointerArray ly_arr;
ly_arr.init((dtype**)ly.data(), ly.size());
int block_count = std::min(BLOCK_COUNT, (count * dim + TPB - 1) / TPB);
KernelCalculateLtyForUniBackward<<<block_count, TPB>>>(activated,
ly_arr.value, ty, y, lty, count, dim);
CheckCudaError();
cudaDeviceSynchronize();
}
__global__ void KernelAddLtyToParamBiasAndAddLxToInputLossesForUniBackward(
const dtype *lty,
const dtype *lx,
dtype *b,
dtype **losses,
int count,
int out_dim,
int in_dim,
dtype *block_sums,
int *global_block_count,
bool use_b) {
__shared__ volatile dtype shared_arr[TPB];
int count_i = blockIdx.y * blockDim.x + threadIdx.x;
int dim_i = blockIdx.x;
if (dim_i < out_dim) {
if (use_b) {
if (threadIdx.x == 0 && blockIdx.y == 0) {
global_block_count[dim_i] = 0;
}
int lty_index = count_i * out_dim + dim_i;
shared_arr[threadIdx.x] = count_i < count ? lty[lty_index] : 0.0f;
__syncthreads();
for (int i = (TPB >> 1); i > 0; i>>=1) {
if (threadIdx.x < i) {
shared_arr[threadIdx.x] += shared_arr[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
block_sums[gridDim.y * blockIdx.x + blockIdx.y] =
shared_arr[0];
if (atomicAdd(global_block_count + dim_i, 1) ==
gridDim.y - 1) {
dtype sum = 0.0;
for (int i = 0; i < gridDim.y; ++i) {
sum += block_sums[gridDim.y * blockIdx.x + i];
}
DeviceAtomicAdd(b + dim_i, sum);
}
}
}
} else {
if (count_i < count) {
dim_i -= out_dim;
int lx_index = dim_i + count_i * in_dim;
DeviceAtomicAdd(losses[count_i] + dim_i, lx[lx_index]);
}
}
}
void AddLtyToParamBiasAndAddLxToInputLossesForUniBackward(const dtype *lty,
const dtype *lx, dtype *b, std::vector<dtype*> &losses, int count,
int out_dim, int in_dim, bool use_b) {
int block_y = (count - 1 + TPB) / TPB;
dim3 block_dim(out_dim + in_dim, block_y, 1);
NumberPointerArray loss_arr;
loss_arr.init(losses.data(), count);
Tensor1D block_sums;
block_sums.init(block_y * out_dim);
IntArray global_block_count_arr;
global_block_count_arr.init(out_dim);
KernelAddLtyToParamBiasAndAddLxToInputLossesForUniBackward<<<block_dim,
TPB>>>(lty, lx, b, loss_arr.value, count, out_dim, in_dim,
block_sums.value, global_block_count_arr.value, use_b);
CheckCudaError();
}
__global__ void KernelAddLtyToParamBiasAndAddLxToInputLossesForBiBackward(
const dtype *lty,
const dtype *lx1,
const dtype *lx2,
dtype *b,
dtype **losses1,
dtype **losses2,
int count,
int out_dim,
int in_dim1,
int in_dim2,
bool use_b,
dtype *block_sums,
int *global_block_count) {
__shared__ volatile dtype shared_arr[TPB];
int count_i = blockIdx.y * blockDim.x + threadIdx.x;
int dim_i = blockIdx.x;
if (dim_i < out_dim) {
if (threadIdx.x == 0 && blockIdx.y == 0) {
global_block_count[dim_i] = 0;
}
//int lty_index = dim_i * count + count_i;
int lty_index = dim_i + count_i * out_dim;
shared_arr[threadIdx.x] = count_i < count ? lty[lty_index] : 0.0f;
__syncthreads();
for (int i = (TPB >> 1); i > 0; i>>=1) {
if (threadIdx.x < i) {
shared_arr[threadIdx.x] += shared_arr[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
block_sums[gridDim.y * blockIdx.x + blockIdx.y] = shared_arr[0];
if (atomicAdd(global_block_count + dim_i, 1) == gridDim.y - 1) {
dtype sum = 0.0;
for (int i = 0; i < gridDim.y; ++i) {
sum += block_sums[gridDim.y * blockIdx.x + i];
}
if (use_b) {
DeviceAtomicAdd(b + dim_i, sum);
}
}
}
} else if (dim_i < out_dim + in_dim1) {
if (count_i < count) {
dim_i -= out_dim;
int lx_index = dim_i + count_i * in_dim1;
DeviceAtomicAdd(losses1[count_i] + dim_i, lx1[lx_index]);
}
} else {
if (count_i < count) {
dim_i -= (out_dim + in_dim1);
int lx_index = dim_i + count_i * in_dim2;
DeviceAtomicAdd(losses2[count_i] + dim_i, lx2[lx_index]);
}
}
}
void AddLtyToParamBiasAndAddLxToInputLossesForBiBackward(const dtype *lty,
const dtype *lx1,
const dtype *lx2,
dtype *b,
std::vector<dtype*> &losses1,
std::vector<dtype*> &losses2,
int count,
int out_dim,
int in_dim1,
int in_dim2,
bool use_b) {
int block_y = (count - 1 + TPB) / TPB;
dim3 block_dim(out_dim + in_dim1 + in_dim2, block_y, 1);
NumberPointerArray loss1_arr;
loss1_arr.init(losses1.data(), count);
NumberPointerArray loss2_arr;
loss2_arr.init(losses2.data(), count);
Tensor1D block_sums;
block_sums.init(block_y * out_dim);
IntArray global_block_count_arr;
global_block_count_arr.init(out_dim);
KernelAddLtyToParamBiasAndAddLxToInputLossesForBiBackward<<<block_dim,
TPB>>>(lty, lx1, lx2, b, loss1_arr.value, loss2_arr.value, count,
out_dim, in_dim1, in_dim2, use_b, block_sums.value,
global_block_count_arr.value);
CheckCudaError();
}
constexpr int MAX_BATCH_COUNT = 1000000;
__global__ void KernelInitCurandStates(curandState_t *states) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int step = gridDim.x * blockDim.x;
for (int i = index; i < MAX_BATCH_COUNT; i += step) {
curand_init(0, i, 0, &states[i]);
}
}
curandState_t *GetCurandStates() {
static curandState_t *states;
if (states == NULL) {
MemoryPool &pool = MemoryPool::Ins();
CallCuda(pool.Malloc((void**)&states, sizeof(curandState_t) *
MAX_BATCH_COUNT));
KernelInitCurandStates<<<BLOCK_COUNT, TPB>>>( states);
CheckCudaError();
}
return states;
}
curandGenerator_t &GetGenerator() {
static curandGenerator_t gen;
static bool init;
if (!init) {
CallCurand(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
CallCurand(curandSetPseudoRandomGeneratorSeed(gen, 0));
init = true;
}
return gen;
}
void CalculateDropoutMask(dtype drop_factor, int count, int dim, dtype* mask) {
curandGenerator_t &gen = GetGenerator();
CallCurand(curandGenerateUniform(gen, mask, count * dim));
}
__global__ void KernelConcatForward(dtype **ins, int *in_dims,
dtype **outs,
int count,
int in_count,
int out_dim) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < out_dim * count; i += step) {
int out_dim_i = i % out_dim;
int count_i = i / out_dim;
int in_dim_sum = 0;
int last_in_dim_sum;
int offset_j = 0;
for (int j = 0; j < in_count; ++j) {
last_in_dim_sum = in_dim_sum;
in_dim_sum += in_dims[j];
offset_j = j;
if (out_dim_i < in_dim_sum) {
break;
}
}
int in_dim_i = out_dim_i - last_in_dim_sum;
dtype v = ins[count_i * in_count + offset_j][in_dim_i];
outs[count_i][out_dim_i] = v;
}
}
void ConcatForward(const std::vector<dtype*> &in_vals,
const std::vector<int> &in_dims,
std::vector<dtype*> &vals,
int count,
int in_count,
int out_dim) {
int len = count * out_dim;
int block_count = std::min(BLOCK_COUNT, (len - 1 + TPB) / TPB);
NumberPointerArray in_val_arr, val_arr;
in_val_arr.init((dtype**)in_vals.data(), in_vals.size());
val_arr.init((dtype**)vals.data(), vals.size());
IntArray in_dim_arr;
in_dim_arr.init((int*)in_dims.data(), in_dims.size());
KernelConcatForward<<<block_count, TPB>>>(in_val_arr.value,
in_dim_arr.value, val_arr.value, count, in_count, out_dim);
CheckCudaError();
}
__global__ void KernelConcatBackward(dtype** in_losses, int *in_dims,
dtype **out_losses,
int count,
int in_count,
int out_dim) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < out_dim * count; i += step) {
int out_dim_i = i % out_dim;
int count_i = i / out_dim;
int in_dim_sum = 0;
int last_in_dim_sum;
int offset_j = 0;
for (int j = 0; j < in_count; ++j) {
last_in_dim_sum = in_dim_sum;
in_dim_sum += in_dims[j];
offset_j = j;
if (out_dim_i < in_dim_sum) {
break;
}
}
int in_dim_i = out_dim_i - last_in_dim_sum;
DeviceAtomicAdd(in_losses[count_i * in_count + offset_j] +
in_dim_i, out_losses[count_i][out_dim_i]);
}
}
void ConcatBackward(const std::vector<dtype*> &in_losses,
const std::vector<int> &in_dims,
std::vector<dtype*> &losses,
int count,
int in_count,
int out_dim) {
int len = count * out_dim;
int block_count = std::min(BLOCK_COUNT, (len - 1 + TPB) / TPB);
NumberPointerArray in_loss_arr, loss_arr;
in_loss_arr.init((dtype**)in_losses.data(), in_losses.size());
loss_arr.init((dtype**)losses.data(), losses.size());
IntArray in_dim_arr;
in_dim_arr.init((int*)in_dims.data(), in_dims.size());
KernelConcatBackward<<<block_count, TPB>>>(in_loss_arr.value,
in_dim_arr.value, loss_arr.value, count, in_count, out_dim);
CheckCudaError();
}
__global__ void KernelMemset(dtype *p, int len, dtype value) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < len; i+= step) {
p[i] = value;
}
}
void Memset(dtype *p, int len, dtype value) {
int block_count = std::min(BLOCK_COUNT, (len - 1 + TPB) / TPB);
KernelMemset<<<block_count, TPB>>>(p, len, value);
CheckCudaError();
}
__global__ void KernelMemset(bool *p, int len, bool value) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < len; i+= step) {
p[i] = value;
}
}
void Memset(bool *p, int len, bool value) {
int block_count = std::min(BLOCK_COUNT, (len - 1 + TPB) / TPB);
KernelMemset<<<block_count, TPB>>>(p, len, value);
CheckCudaError();
}
void *Malloc(int size) {
void *p;
CallCuda(cudaMalloc(&p, size));
return p;
}
__global__ void KernelBatchMemset(dtype **p, int count, int dim, dtype value) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count ; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
p[count_i][dim_i] = value;
}
}
void BatchMemset(const std::vector<dtype*> &vec, int count, int dim,
dtype value) {
int block_count = (count * dim -1 + TPB) / TPB;
block_count = std::min(block_count, BLOCK_COUNT);
NumberPointerArray vec_arr;
vec_arr.init((dtype**)vec.data(), vec.size());
KernelBatchMemset<<<block_count, TPB>>>(vec_arr.value, count, dim, value);
CheckCudaError();
}
__global__ void KernelLookupForward(const int *xids, const dtype *vocabulary,
int count,
int dim,
dtype **vals) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
int xid = xids[count_i];
if (xid >= 0) {
int voc_i = xid * dim + dim_i;
vals[count_i][dim_i] = vocabulary[voc_i];
} else {
vals[count_i][dim_i] = 0.0f;
}
}
}
void LookupForward(const std::vector<int> &xids, const dtype *vocabulary,
int count,
int dim,
std::vector<dtype*> &vals) {
int block_count = std::min(BLOCK_COUNT, (count * dim - 1 + TPB) / TPB);
IntArray xid_arr;
xid_arr.init((int*)xids.data(), xids.size());
NumberPointerArray val_arr;
val_arr.init((dtype**)vals.data(), vals.size());
KernelLookupForward<<<block_count, TPB>>>(xid_arr.value, vocabulary,
count, dim, const_cast<dtype**>(val_arr.value));
CheckCudaError();
}
__global__ void KernelLookupBackward(const int *xids, int unknown_id,
bool fine_tune,
const dtype** losses,
int count,
int dim,
dtype *grad,
bool *indexers) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
int xid = xids[count_i];
if (xid == unknown_id || fine_tune) {
assert(xid >= 0);
if (dim_i == 0) {
indexers[xid] = true;
}
DeviceAtomicAdd(grad + xid * dim + dim_i, losses[count_i][dim_i]);
}
}
}
void LookupBackward(const std::vector<int> &xids, int unknown_id,
bool fine_tune,
const std::vector<dtype*> &losses,
int count,
int dim,
dtype *grad,
bool *indexers) {
int block_count = std::min((count * dim - 1 + TPB) / TPB, BLOCK_COUNT);
IntArray pl_arr;
pl_arr.init((int*)xids.data(), xids.size());
IntArray xid_arr;
xid_arr.init((int*)pl_arr.value, xids.size());
NumberPointerArray loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
KernelLookupBackward<<<block_count, TPB>>>(
const_cast<const int *>(xid_arr.value),
unknown_id,
fine_tune,
const_cast<const dtype**>(loss_arr.value),
count,
dim,
grad,
indexers);
CheckCudaError();
}
__global__ void KernelPoolForward(PoolingEnum pooling, dtype **ins,
int *in_counts, int max_in_count, dtype **outs, int count, int dim,
int* hit_inputs) {
__shared__ volatile extern dtype pool_shared_arr[];
volatile dtype* shared_indexers = pool_shared_arr + blockDim.x;
int batch_i = blockIdx.y;
int in_count = in_counts[batch_i];
int in_count_i = threadIdx.x;
int dim_i = blockIdx.x;
if (in_count_i < in_count) {
pool_shared_arr[threadIdx.x] = ins[batch_i * max_in_count +
in_count_i][dim_i];
} else {
pool_shared_arr[threadIdx.x] = pooling == PoolingEnum::MAX ?
-INFINITY : INFINITY;
}
shared_indexers[threadIdx.x] = threadIdx.x;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0;i >>=1) {
if (threadIdx.x < i) {
int plus_i = threadIdx.x + i;
if (pooling == PoolingEnum::MAX) {
if (pool_shared_arr[threadIdx.x] < pool_shared_arr[plus_i]) {
pool_shared_arr[threadIdx.x] = pool_shared_arr[plus_i];
shared_indexers[threadIdx.x] = shared_indexers[plus_i];
}
} else {
if (pool_shared_arr[threadIdx.x] > pool_shared_arr[plus_i]) {
pool_shared_arr[threadIdx.x] = pool_shared_arr[plus_i];
shared_indexers[threadIdx.x] = shared_indexers[plus_i];
}
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
hit_inputs[batch_i * dim + dim_i] = shared_indexers[0];
outs[batch_i][dim_i] = pool_shared_arr[0];
}
}
void PoolForward(PoolingEnum pooling, const std::vector<dtype*> &in_vals,
std::vector<dtype*> &vals,
int count,
const std::vector<int> &in_counts,
int dim,
int *hit_inputs) {
int max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
int thread_count = 8;
while (max_in_count > thread_count) {
thread_count <<= 1;
}
dim3 block_dim(dim, count, 1);
NumberPointerArray in_val_arr;
in_val_arr.init((dtype**)in_vals.data(), in_vals.size());
NumberPointerArray val_arr;
val_arr.init((dtype**)vals.data(), vals.size());
IntArray in_count_arr;
in_count_arr.init((int*)in_counts.data(), in_counts.size());
KernelPoolForward<<<block_dim, thread_count, thread_count * 2 *
sizeof(dtype)>>>(pooling, in_val_arr.value, in_count_arr.value,
max_in_count, val_arr.value, count, dim, hit_inputs);
CheckCudaError();
}
__global__ void KernelPoolBackward(const dtype ** losses,
const int *hit_inputs,
int max_in_count,
int count,
int dim,
dtype **in_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
int input_i = hit_inputs[i];
dtype loss = losses[count_i][dim_i];
DeviceAtomicAdd(in_losses[count_i * max_in_count + input_i] + dim_i,
loss);
}
}
void PoolBackward(const std::vector<dtype*> &losses,
std::vector<dtype*> &in_losses,
const std::vector<int> &in_counts,
const int *hit_inputs,
int count,
int dim) {
NumberPointerArray loss_arr, in_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
in_loss_arr.init((dtype**)in_losses.data(), in_losses.size());
int max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
int block_count = (count * dim - 1 + TPB) / TPB;
block_count = std::min(block_count, BLOCK_COUNT);
KernelPoolBackward<<<block_count, TPB>>>((const dtype**)loss_arr.value,
hit_inputs,
max_in_count,
count,
dim,
in_loss_arr.value);
CheckCudaError();
}
__global__ void KernelSumPoolForward(PoolingEnum pooling,
const dtype **in_vals,
int count,
int dim,
const int *in_counts,
int max_in_count,
dtype **vals) {
__shared__ volatile extern dtype pool_shared_arr[];
int batch_i = blockIdx.y;
int in_count = in_counts[batch_i];
int in_count_i = threadIdx.x;
int dim_i = blockIdx.x;
if (in_count_i < in_count) {
pool_shared_arr[threadIdx.x] = in_vals[batch_i * max_in_count +
in_count_i][dim_i];
} else {
pool_shared_arr[threadIdx.x] = 0.0f;
}
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0;i >>=1) {
if (threadIdx.x < i) {
int plus_i = threadIdx.x + i;
pool_shared_arr[threadIdx.x] += pool_shared_arr[plus_i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
vals[batch_i][dim_i] = pooling == PoolingEnum::SUM ?
pool_shared_arr[0] : pool_shared_arr[0] / in_counts[batch_i];
}
}
void SumPoolForward(PoolingEnum pooling, const std::vector<dtype*> &in_vals,
int count,
int dim,
const std::vector<int> &in_counts,
std::vector<dtype*> &vals) {
int max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
int thread_count = 8;
while (max_in_count > thread_count) {
thread_count <<= 1;
}
dim3 block_dim(dim, count, 1);
NumberPointerArray in_val_arr;
in_val_arr.init((dtype**)in_vals.data(), in_vals.size());
IntArray in_count_arr;
in_count_arr.init((int*)in_counts.data(), in_counts.size());
NumberPointerArray val_arr;
val_arr.init((dtype**)vals.data(), vals.size());
KernelSumPoolForward<<<block_dim, thread_count,
thread_count * sizeof(dtype)>>>(pooling,
(const dtype**)in_val_arr.value, count, dim,
(const int*)in_count_arr.value, max_in_count, val_arr.value);
CheckCudaError();
}
__global__ void KernelSumBackward(PoolingEnum pooling, const dtype **losses,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype **in_losses) {
int global_in_count_i = blockIdx.x * max_in_count + blockIdx.y;
for (int i = threadIdx.x; i < dim; i += blockDim.x) {
if (blockIdx.y < in_counts[blockIdx.x]) {
DeviceAtomicAdd(in_losses[global_in_count_i] + i, pooling == PoolingEnum::SUM ?
losses[blockIdx.x][i] : losses[blockIdx.x][i] / in_counts[blockIdx.x]);
}
}
}
void SumPoolBackward(PoolingEnum pooling, const std::vector<dtype*> &losses,
const std::vector<int> &in_counts,
int count,
int dim,
std::vector<dtype*> &in_losses) {
int thread_count = 8;
while (thread_count < dim) {
thread_count <<= 1;
}
thread_count = std::min(TPB, thread_count);
int max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
dim3 block_dim(count, max_in_count, 1);
NumberPointerArray loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
IntArray in_count_arr;
in_count_arr.init((int*)in_counts.data(), in_counts.size());
NumberPointerArray in_loss_arr;
in_loss_arr.init((dtype**)in_losses.data(), in_losses.size());
KernelSumBackward<<<block_dim, thread_count>>>(pooling,
(const dtype**)loss_arr.value, (const int*)in_count_arr.value,
max_in_count, count, dim, in_loss_arr.value);
CheckCudaError();
}
//__global_ void KernelCalculateNormalizedForAttention(const dtype** unnormeds, const int *in_counts,
// int max_in_count,
// int count,
// dtype** normalized_scalars) {
// __shared__ volatile extern dtype shared_arr[];
// int in_count = in_counts[blockIdx.x];
// int global_in_count_i = max_in_count * blockIdx.x + threadIdx.x;
// dtype exped_value = threadIdx.x < in_count ? cuda_exp(unnormeds[global_in_count_i][0]) : 0.0f;
// shared_arr[threadIdx.x] = exped_value;
// for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
// if (threadIdx.x < i) {
// int plus_i = threadIdx.x + i;
// shared_arr[threadIdx.x] += attention_shared_arr[plus_i];
// }
// __syncthreads();
// }
// if (threadIdx.x < in_count) {
// normalized_scalars[blockIdx.y][blockIdx.x * max_in_count + threadIdx.x] = mask;
// }
//}
__global__ void KernelScalarAttentionForward(const dtype** ins,
const dtype **unnormeds,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype **masks,
dtype **vals) {
__shared__ volatile extern dtype attention_shared_arr[];
volatile dtype *shared_unnormed_masks = attention_shared_arr + blockDim.x;
int count_i = blockIdx.y;
int in_count = in_counts[count_i];
int dim_i = blockIdx.x;
int global_in_count_i = blockIdx.y * max_in_count + threadIdx.x;
dtype unnormed_mask = threadIdx.x < in_count ?
cuda_exp(unnormeds[global_in_count_i][0]) : 0.0f;
attention_shared_arr[threadIdx.x] = unnormed_mask;
shared_unnormed_masks[threadIdx.x] = unnormed_mask;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
int plus_i = threadIdx.x + i;
attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i];
}
__syncthreads();
}
dtype mask = threadIdx.x < in_count ? shared_unnormed_masks[threadIdx.x] /
attention_shared_arr[0] : 0.0f;
if (threadIdx.x < in_count) {
masks[blockIdx.y][blockIdx.x * max_in_count + threadIdx.x] = mask;
}
dtype in = threadIdx.x < in_count ? ins[global_in_count_i][dim_i] : 0.0f;
attention_shared_arr[threadIdx.x] = threadIdx.x < in_count ?
mask * in : 0.0f;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
int plus_i = threadIdx.x + i;
attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
vals[blockIdx.y][blockIdx.x] = attention_shared_arr[0];
}
}
void ScalarAttentionForward(const std::vector<dtype*> &ins,
const std::vector<dtype*> &unnormeds,
const std::vector<int> &in_counts, int count, int dim,
std::vector<dtype*> &masks, std::vector<dtype*> &vals) {
int max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
int thread_count = 8;
while (max_in_count > thread_count) {
thread_count <<= 1;
}
dim3 block_dim(dim, count, 1);
NumberPointerArray in_arr;
in_arr.init((dtype**)ins.data(), ins.size());
NumberPointerArray unnormed_arr;
unnormed_arr.init((dtype**)unnormeds.data(), unnormeds.size());
NumberPointerArray mask_arr;
mask_arr.init((dtype**)masks.data(), masks.size());
NumberPointerArray val_arr;
val_arr.init((dtype**)vals.data(), vals.size());
IntArray in_count_arr;
in_count_arr.init((int*)in_counts.data(), in_counts.size());
KernelScalarAttentionForward<<<block_dim, thread_count, 2 * thread_count *
sizeof(dtype)>>>((const dtype**)in_arr.value,
(const dtype**)unnormed_arr.value,
(const int*)in_count_arr.value,
max_in_count, count, dim, mask_arr.value, val_arr.value);
CheckCudaError();
}
__global__ void KernelScalarAttentionMaskAndInLoss(const dtype **losses,
const dtype **in_vals,
const dtype **masks,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype *mask_losses,
dtype **in_losses) {
// blockIdx.x : in_count_i
// blockIdx.y : count_i
// threadIdx.x : dim_i
__shared__ extern volatile dtype att_mask_loss_shared_arr[];
int in_count = in_counts[blockIdx.y];
int global_in_count_i = blockIdx.y * max_in_count + blockIdx.x;
if (in_count <= blockIdx.x) {
return;
}
for (int i = threadIdx.x; i < dim; i += blockDim.x) {
DeviceAtomicAdd(in_losses[global_in_count_i] + i, losses[blockIdx.y][i] *
masks[blockIdx.y][max_in_count * threadIdx.x + blockIdx.x]);
}
att_mask_loss_shared_arr[threadIdx.x] = 0.0f;
for (int i = threadIdx.x; i < dim; i += blockDim.x) {
att_mask_loss_shared_arr[threadIdx.x] += losses[blockIdx.y][i] *
in_vals[global_in_count_i][i];
}
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
att_mask_loss_shared_arr[threadIdx.x] +=
att_mask_loss_shared_arr[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
mask_losses[global_in_count_i] = att_mask_loss_shared_arr[0];
}
}
void ScalarAttentionMaskAndInLoss(const dtype** losses,
const dtype** in_vals,
const dtype **masks,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype *mask_losses,
dtype **in_losses) {
dim3 block_dim(max_in_count, count, 1);
int thread_count = 8;
if (dim >= TPB) {
thread_count = TPB;
} else {
while (dim > thread_count) {
thread_count <<= 1;
}
}
KernelScalarAttentionMaskAndInLoss<<<block_dim, thread_count,
thread_count * sizeof(dtype)>>>(losses, in_vals, masks, in_counts,
max_in_count, count, dim, mask_losses, in_losses);
CheckCudaError();
}
__global__ void KernelScalarAttentionBackward(const dtype** masks,
const dtype *mask_losses,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype **unnormed_losses) {
__shared__ volatile extern dtype shared_att_bckwrd_arr[];
int global_in_count_i = max_in_count * blockIdx.x + threadIdx.x;
int in_count = in_counts[blockIdx.x];
if (threadIdx.x < in_count && blockIdx.y == 0) {
DeviceAtomicAdd(unnormed_losses[global_in_count_i],
masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] *
mask_losses[global_in_count_i]);
}
shared_att_bckwrd_arr[threadIdx.x] = threadIdx.x < in_count ?
masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] *
mask_losses[global_in_count_i] : 0.0f;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_att_bckwrd_arr[threadIdx.x] +=
shared_att_bckwrd_arr[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x < in_count && blockIdx.y == 0) {
DeviceAtomicAdd(unnormed_losses[global_in_count_i],
-shared_att_bckwrd_arr[0] * masks[blockIdx.x][threadIdx.x]);
}
}
void ScalarAttentionBackward(const std::vector<dtype*> &losses,
const std::vector<dtype*> &in_vals,
const std::vector<dtype*> &masks,
const std::vector<int> &in_counts,
int count,
int dim,
std::vector<dtype*> &in_losses,
std::vector<dtype*> &unnormed_losses) {
NumberPointerArray loss_arr, mask_arr, in_loss_arr, unnormed_loss_arr,
in_val_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
mask_arr.init((dtype**)masks.data(), masks.size());
in_loss_arr.init((dtype**)in_losses.data(), in_losses.size());
unnormed_loss_arr.init((dtype**)unnormed_losses.data(),
unnormed_losses.size());
in_val_arr.init((dtype**)in_vals.data(), in_vals.size());
IntArray in_count_arr;
in_count_arr.init((int*)in_counts.data(), in_counts.size());
int max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
NumberArray mask_loss_arr;
mask_loss_arr.init(count * max_in_count);
ScalarAttentionMaskAndInLoss((const dtype**)loss_arr.value,
(const dtype**)in_val_arr.value, (const dtype**)mask_arr.value,
(const int*)in_count_arr.value, max_in_count, count, dim,
mask_loss_arr.value, in_loss_arr.value);
dim3 block_dim(count, dim, 1);
int thread_count = 8;
while (thread_count < max_in_count) {
thread_count <<= 1;
}
KernelScalarAttentionBackward<<<block_dim, thread_count,
thread_count * sizeof(dtype)>>>((const dtype**)mask_arr.value,
(const dtype*)mask_loss_arr.value,
(const int*)in_count_arr.value, max_in_count, count, dim,
unnormed_loss_arr.value);
CheckCudaError();
}
__global__ void KernelVectorAttentionForward(const dtype** ins,
const dtype **unnormeds,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype **masks,
dtype **vals) {
__shared__ volatile extern dtype attention_shared_arr[];
volatile dtype *shared_unnormed_masks = attention_shared_arr + blockDim.x;
int count_i = blockIdx.y;
int in_count = in_counts[count_i];
int dim_i = blockIdx.x;
int global_in_count_i = blockIdx.y * max_in_count + threadIdx.x;
dtype unnormed_mask = threadIdx.x < in_count ?
cuda_exp(unnormeds[global_in_count_i][blockIdx.x]) : 0.0f;
attention_shared_arr[threadIdx.x] = unnormed_mask;
shared_unnormed_masks[threadIdx.x] = unnormed_mask;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
int plus_i = threadIdx.x + i;
attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i];
}
__syncthreads();
}
dtype mask = threadIdx.x < in_count ? shared_unnormed_masks[threadIdx.x] /
attention_shared_arr[0] : 0.0f;
if (threadIdx.x < in_count) {
masks[blockIdx.y][blockIdx.x * max_in_count + threadIdx.x] = mask;
}
dtype in = threadIdx.x < in_count ? ins[global_in_count_i][dim_i] : 0.0f;
attention_shared_arr[threadIdx.x] = threadIdx.x < in_count ?
mask * in : 0.0f;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
int plus_i = threadIdx.x + i;
attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
vals[blockIdx.y][blockIdx.x] = attention_shared_arr[0];
}
}
void VectorAttentionForward(const std::vector<dtype*> &ins,
const std::vector<dtype*> &unnormeds,
const std::vector<int> &in_counts, int count, int dim,
std::vector<dtype*> &masks, std::vector<dtype*> &vals) {
int max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
int thread_count = 8;
while (max_in_count > thread_count) {
thread_count <<= 1;
}
dim3 block_dim(dim, count, 1);
NumberPointerArray in_arr;
in_arr.init((dtype**)ins.data(), ins.size());
NumberPointerArray unnormed_arr;
unnormed_arr.init((dtype**)unnormeds.data(), unnormeds.size());
NumberPointerArray mask_arr;
mask_arr.init((dtype**)masks.data(), masks.size());
NumberPointerArray val_arr;
val_arr.init((dtype**)vals.data(), vals.size());
IntArray in_count_arr;
in_count_arr.init((int*)in_counts.data(), in_counts.size());
KernelVectorAttentionForward<<<block_dim, thread_count, 2 * thread_count *
sizeof(dtype)>>>((const dtype**)in_arr.value,
(const dtype**)unnormed_arr.value,
(const int*)in_count_arr.value,
max_in_count, count, dim, mask_arr.value, val_arr.value);
CheckCudaError();
}
__global__ void KernelVectorAttentionMaskAndInLoss(const dtype **losses,
const dtype **in_vals,
const dtype **masks,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype **mask_losses,
dtype **in_losses) {
// blockIdx.x : in_count_i
// blockIdx.y : count_i
// threadIdx.x : dim_i
int in_count = in_counts[blockIdx.y];
int global_in_count_i = blockIdx.y * max_in_count + blockIdx.x;
if (in_count <= blockIdx.x) {
return;
}
for (int i = threadIdx.x; i < dim; i += blockDim.x) {
DeviceAtomicAdd(in_losses[global_in_count_i] + i, losses[blockIdx.y][i] *
masks[blockIdx.y][max_in_count * i + blockIdx.x]);
mask_losses[blockIdx.y][max_in_count * i + blockIdx.x] =
losses[blockIdx.y][i] * in_vals[global_in_count_i][i];
}
}
void VectorAttentionMaskAndInLoss(const dtype** losses,
const dtype** in_vals,
const dtype** masks,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype **mask_losses,
dtype **in_losses) {
dim3 block_dim(max_in_count, count, 1);
int thread_count = 8;
if (dim >= TPB) {
thread_count = TPB;
} else {
while (dim > thread_count) {
thread_count <<= 1;
}
}
KernelVectorAttentionMaskAndInLoss<<<block_dim, thread_count,
thread_count * sizeof(dtype)>>>(losses, in_vals, masks, in_counts,
max_in_count, count, dim, mask_losses, in_losses);
CheckCudaError();
}
__global__ void KernelVectorAttentionBackward(const dtype** masks,
const dtype **mask_losses,
const int *in_counts,
int max_in_count,
int count,
int dim,
dtype **unnormed_losses) {
__shared__ volatile extern dtype shared_att_bckwrd_arr[];
int global_in_count_i = max_in_count * blockIdx.x + threadIdx.x;
int in_count = in_counts[blockIdx.x];
if (threadIdx.x < in_count) {
DeviceAtomicAdd(unnormed_losses[global_in_count_i] + blockIdx.y,
masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] *
mask_losses[blockIdx.x][blockIdx.y * max_in_count +
threadIdx.x]);
}
shared_att_bckwrd_arr[threadIdx.x] = threadIdx.x < in_count ?
masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] *
mask_losses[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] :
0.0f;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_att_bckwrd_arr[threadIdx.x] +=
shared_att_bckwrd_arr[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x < in_count) {
DeviceAtomicAdd(unnormed_losses[global_in_count_i] + blockIdx.y,
-shared_att_bckwrd_arr[0] * masks[blockIdx.x][blockIdx.y *
max_in_count + threadIdx.x]);
}
}
void VectorAttentionBackward(const std::vector<dtype*> &losses,
const std::vector<dtype*> &in_vals,
const std::vector<dtype*> &masks,
const std::vector<int> &in_counts,
int count,
int dim,
std::vector<dtype*> &in_losses,
std::vector<dtype*> &unnormed_losses) {
NumberPointerArray loss_arr, mask_arr, in_loss_arr, unnormed_loss_arr,
in_val_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
mask_arr.init((dtype**)masks.data(), masks.size());
in_loss_arr.init((dtype**)in_losses.data(), in_losses.size());
unnormed_loss_arr.init((dtype**)unnormed_losses.data(),
unnormed_losses.size());
in_val_arr.init((dtype**)in_vals.data(), in_vals.size());
IntArray in_count_arr;
in_count_arr.init((int*)in_counts.data(), in_counts.size());
int max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
std::vector<std::shared_ptr<NumberArray>> mask_losses;
mask_losses.reserve(count);
for (int i = 0; i < count; ++i) {
std::shared_ptr<NumberArray> p = std::make_shared<NumberArray>();
p->init(max_in_count * dim);
mask_losses.push_back(p);
}
std::vector<dtype*> raw_mask_losses;
raw_mask_losses.reserve(count);
for (auto &p : mask_losses) {
raw_mask_losses.push_back(p->value);
}
NumberPointerArray mask_loss_arr;
mask_loss_arr.init((dtype**)raw_mask_losses.data(), mask_losses.size());
VectorAttentionMaskAndInLoss((const dtype**)loss_arr.value,
(const dtype**)in_val_arr.value, (const dtype**)mask_arr.value,
(const int*)in_count_arr.value, max_in_count, count, dim,
mask_loss_arr.value, in_loss_arr.value);
dim3 block_dim(count, dim, 1);
int thread_count = 8;
while (thread_count < max_in_count) {
thread_count <<= 1;
}
KernelVectorAttentionBackward<<<block_dim, thread_count,
thread_count * sizeof(dtype)>>>((const dtype**)mask_arr.value,
(const dtype**)mask_loss_arr.value,
(const int*)in_count_arr.value, max_in_count, count, dim,
unnormed_loss_arr.value);
CheckCudaError();
}
__global__ void KernelPMultiForward(const dtype **ins1, const dtype **ins2,
int count,
int dim,
dtype** vals) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
vals[count_i][dim_i] = ins1[count_i][dim_i] * ins2[count_i][dim_i];
}
}
void PMultiForward(const std::vector<dtype*> &ins1,
const std::vector<dtype*> &ins2,
int count,
int dim,
std::vector<dtype*> &vals) {
int block_count = DefaultBlockCount(count * dim);
NumberPointerArray ins1_arr, ins2_arr, vals_arr;
ins1_arr.init((dtype**)ins1.data(), count);
ins2_arr.init((dtype**)ins2.data(), count);
vals_arr.init((dtype**)vals.data(), count);
KernelPMultiForward<<<block_count, TPB>>>((const dtype**)ins1_arr.value,
(const dtype**)ins2_arr.value, count, dim, vals_arr.value);
CheckCudaError();
}
__global__ void KernelDivForward(const dtype *const *numerators, const dtype *const *denominators,
int count,
int dim,
dtype **results) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
results[count_i][dim_i] = numerators[count_i][dim_i] / denominators[count_i][0];
}
}
void DivForwartd(const vector<const dtype*> numerators, const vector<const dtype*> denominators,
int count,
int dim,
vector<dtype*> &results) {
int block_count = DefaultBlockCount(count * dim);
NumberPointerArray numerator_arr, denominator_arr, result_arr;
numerator_arr.init((dtype**)numerators.data(), count);
denominator_arr.init((dtype**)denominators.data(), count);
result_arr.init((dtype**)results.data(), count);
KernelDivForward<<<block_count, TPB>>>(numerator_arr.value, denominator_arr.value, count, dim,
result_arr.value);
CheckCudaError();
}
__global__ void KernelDivNumeratorBackward(const dtype *const *losses,
const dtype *const *denominator_vals,
int count,
int dim,
dtype *const *numerator_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
DeviceAtomicAdd(numerator_losses[count_i] + dim_i, losses[count_i][dim_i] /
denominator_vals[count_i][0]);
}
}
__global__ void KernelDivDenominatorBackward(const dtype *const *losses,
const dtype *const *numerator_vals,
const dtype *const *denominator_vals,
int count,
int dim,
dtype *block_sums,
int *block_counters,
dtype *const *denominator_losses) {
__shared__ volatile dtype shared_sum[TPB];
__shared__ volatile bool is_last_block;
__shared__ volatile dtype square;
if (threadIdx.x == 0 && blockIdx.y == 0) {
block_counters[blockIdx.x] = 0;
}
int count_i = blockIdx.x;
if (threadIdx.x == 0) {
is_last_block = false;
square = denominator_vals[count_i][0] * denominator_vals[count_i][0];
}
__syncthreads();
int offset = blockIdx.y * blockDim.x + threadIdx.x;
shared_sum[threadIdx.x] = offset < dim ? losses[count_i][offset] *
numerator_vals[count_i][offset] / square : 0.0f;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
__syncthreads();
}
int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y;
if (threadIdx.x == 0) {
block_sums[block_sums_offset] = shared_sum[0];
if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) {
is_last_block = true;
}
}
__syncthreads();
if (is_last_block) {
dtype sum = 0.0f;
for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) {
int offset = blockIdx.x * gridDim.y + i;
sum += block_sums[offset];
}
shared_sum[threadIdx.x] = sum;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
__syncthreads();
}
if (threadIdx.x == 0) {
DeviceAtomicAdd(denominator_losses[count_i], -shared_sum[0]);
}
}
}
void DivBackward(const vector<const dtype*> &losses, const vector<const dtype*> &denominator_vals,
const vector<const dtype*> &numerator_vals,
int count,
int dim,
vector<dtype*> &numerator_losses,
vector<dtype*> &denominator_losses) {
NumberPointerArray loss_arr, denominator_val_arr, numerator_val_arr, numerator_loss_arr,
denominator_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
denominator_val_arr.init((dtype**)denominator_vals.data(), denominator_vals.size());
numerator_val_arr.init((dtype**)numerator_vals.data(), numerator_vals.size());
numerator_loss_arr.init((dtype**)numerator_losses.data(), numerator_losses.size());
denominator_loss_arr.init((dtype**)denominator_losses.data(), denominator_losses.size());
int block_count = DefaultBlockCount(count * dim);
KernelDivNumeratorBackward<<<block_count, TPB>>>(loss_arr.value, denominator_val_arr.value,
count,
dim,
numerator_loss_arr.value);
CheckCudaError();
int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB);
int block_y_count = (dim - 1 + thread_count) / thread_count;
dim3 block_dim(count, block_y_count, 1);
NumberArray block_sums;
block_sums.init(block_y_count * count);
IntArray block_counters;
block_counters.init(count);
KernelDivDenominatorBackward<<<block_dim , thread_count>>>(loss_arr.value,
numerator_val_arr.value, denominator_val_arr.value, count, dim, block_sums.value,
block_counters.value, denominator_loss_arr.value);
CheckCudaError();
}
__global__ void KernelSplitForward(const dtype *const *inputs, const int *offsets, int count,
int dim,
dtype **results) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
int offset = offsets[count_i];
results[count_i][dim_i] = inputs[count_i][offset + dim_i];
}
}
void SplitForward(const vector<const dtype*> &inputs, const vector<int> &offsets, int count,
int dim,
vector<dtype*> &results) {
NumberPointerArray input_arr, result_arr;
input_arr.init((dtype**)inputs.data(), inputs.size());
result_arr.init((dtype**)results.data(), results.size());
IntArray offset_arr;
offset_arr.init((int*)offsets.data(), offsets.size());
int block_count = DefaultBlockCount(count * dim);
KernelSplitForward<<<block_count, TPB>>>(input_arr.value, offset_arr.value, count, dim,
result_arr.value);
CheckCudaError();
}
__global__ void KernelSplitBackward(const dtype *const *losses, const int *offsets, int count,
int dim,
dtype *const *input_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
int offset = offsets[count_i];
DeviceAtomicAdd(input_losses[count_i] + offset + dim_i, losses[count_i][dim_i]);
}
}
void SplitBackward(const vector<const dtype*> &losses, const vector<int> offsets, int count,
int dim,
const vector<dtype*> &input_losses) {
NumberPointerArray loss_arr, input_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
input_loss_arr.init((dtype**)input_losses.data(), input_losses.size());
IntArray offset_arr;
offset_arr.init((int*)offsets.data(), offsets.size());
int block_count = DefaultBlockCount(count * dim);
KernelSplitBackward<<<block_count, TPB>>>(loss_arr.value, offset_arr.value, count, dim,
input_loss_arr.value);
CheckCudaError();
}
__global__ void KernelSubForward(const dtype *const *minuend, const dtype *const *subtrahend,
int count,
int dim,
dtype **results) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
results[count_i][dim_i] = minuend[count_i][dim_i] - subtrahend[count_i][dim_i];
}
}
void SubForward(const std::vector<const dtype*> &minuend,
const std::vector<const dtype*> &subtrahend,
int count,
int dim,
std::vector<dtype*> &results) {
int block_count = DefaultBlockCount(count * dim);
NumberPointerArray minuend_arr, subtrahend_arr, result_arr;
minuend_arr.init((dtype**)minuend.data(), count);
subtrahend_arr.init((dtype**)subtrahend.data(), count);
result_arr.init((dtype**)results.data(), count);
KernelSubForward<<<block_count, TPB>>>((const dtype* const*)minuend_arr.value,
(const dtype *const *)subtrahend_arr.value, count, dim, result_arr.value);
CheckCudaError();
}
__global__ void KernelSubBackward(const dtype *const *losses, int count, int dim,
dtype *const *minuend_losses,
dtype *const *subtrahend_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
DeviceAtomicAdd(minuend_losses[count_i] + dim_i, losses[count_i][dim_i]);
DeviceAtomicAdd(subtrahend_losses[count_i] + dim_i, -losses[count_i][dim_i]);
}
}
void SubBackward(const std::vector<const dtype*> &losses, int count, int dim,
std::vector<dtype*> &minuend_losses,
std::vector<dtype*> &subtrahend_losses) {
int block_count = DefaultBlockCount(count * dim);
NumberPointerArray loss_arr, minuend_loss_arr, subtrahend_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
minuend_loss_arr.init((dtype**)minuend_losses.data(), minuend_losses.size());
subtrahend_loss_arr.init((dtype**)subtrahend_losses.data(), subtrahend_losses.size());
KernelSubBackward<<<block_count, TPB>>>((const dtype *const *)loss_arr.value, count, dim,
(dtype *const *)minuend_loss_arr.value, (dtype *const *)subtrahend_loss_arr.value);
CheckCudaError();
}
__global__ void KernelPMultiBackward(const dtype **losses,
const dtype **in_vals1,
const dtype **in_vals2,
int count,
int dim,
dtype** in_losses1,
dtype** in_losses2) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
DeviceAtomicAdd(in_losses1[count_i] + dim_i,
losses[count_i][dim_i] * in_vals2[count_i][dim_i]);
DeviceAtomicAdd(in_losses2[count_i] + dim_i,
losses[count_i][dim_i] * in_vals1[count_i][dim_i]);
}
}
void PMultiBackward(const std::vector<dtype*> &losses,
const std::vector<dtype*> &in_vals1,
const std::vector<dtype*> &in_vals2,
int count,
int dim,
std::vector<dtype*> &in_losses1,
std::vector<dtype*> &in_losses2) {
int block_count = DefaultBlockCount(count * dim);
NumberPointerArray losses_arr, in_vals1_arr, in_vals2_arr, in_losses1_arr,
in_losses2_arr;
losses_arr.init((dtype**)losses.data(), losses.size());
in_vals1_arr.init((dtype**)in_vals1.data(), in_vals1.size());
in_vals2_arr.init((dtype**)in_vals2.data(), in_vals2.size());
in_losses1_arr.init((dtype**)in_losses1.data(), in_losses1.size());
in_losses2_arr.init((dtype**)in_losses2.data(), in_losses2.size());
KernelPMultiBackward<<<block_count, TPB>>>((const dtype**)losses_arr.value,
(const dtype**)in_vals1_arr.value,
(const dtype**)in_vals2_arr.value, count, dim, in_losses1_arr.value, in_losses2_arr.value);
CheckCudaError();
}
__global__ void KernelPAddForward(const dtype*** ins, int count, int dim,
int in_count,
dtype **vals) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i+= step) {
int count_i = i / dim;
int dim_i = i % dim;
dtype sum = ins[0][count_i][dim_i];
for (int j = 1; j < in_count; ++j) {
sum += ins[j][count_i][dim_i];
}
vals[count_i][dim_i] = sum;
}
}
__global__ void KernelPDotForward(const dtype **in_vals1,
const dtype **in_vals2,
int count,
int dim,
dtype** vals) {
volatile __shared__ extern dtype shared_val[];
if (threadIdx.x < dim) {
shared_val[threadIdx.x] = in_vals1[blockIdx.x][threadIdx.x] *
in_vals2[blockIdx.x][threadIdx.x];
} else {
shared_val[threadIdx.x] = 0.0f;
}
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_val[threadIdx.x] += shared_val[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
vals[blockIdx.x][0] = shared_val[0];
}
}
void PDotForward(const std::vector<dtype*> &ins1,
const std::vector<dtype*> &ins2,
int count,
int dim,
std::vector<dtype*> &vals) {
NumberPointerArray in1_arr, in2_arr, val_arr;
in1_arr.init((dtype**)ins1.data(), ins1.size());
in2_arr.init((dtype**)ins2.data(), ins2.size());
val_arr.init((dtype**)vals.data(), vals.size());
int thread_count = NextTwoIntegerPowerNumber(dim);
KernelPDotForward<<<count, thread_count, thread_count * sizeof(dtype)>>>((
const dtype**)in1_arr.value, (const dtype**)in2_arr.value,
count, dim, val_arr.value);
CheckCudaError();
}
__global__ void KernelPDotBackward(const dtype **losses,
const dtype **in_vals1,
const dtype **in_vals2,
int count,
int dim,
dtype **in_losses1,
dtype **in_losses2) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
DeviceAtomicAdd(in_losses1[count_i] + dim_i,
losses[count_i][0] * in_vals2[count_i][dim_i]);
DeviceAtomicAdd(in_losses2[count_i] + dim_i,
losses[count_i][0] * in_vals1[count_i][dim_i]);
}
}
void PDotBackward(const std::vector<dtype*> &losses,
const std::vector<dtype*> &in_vals1,
const std::vector<dtype*> &in_vals2,
int count,
int dim,
std::vector<dtype*> &in_losses1,
std::vector<dtype*> &in_losses2) {
NumberPointerArray in1_loss_arr, in2_loss_arr, loss_arr, in_val1_arr,
in_val2_arr;
in1_loss_arr.init((dtype**)in_losses1.data(), in_losses1.size());
in2_loss_arr.init((dtype**)in_losses2.data(), in_losses2.size());
loss_arr.init((dtype**)losses.data(), losses.size());
in_val1_arr.init((dtype**)in_vals1.data(), in_vals1.size());
in_val2_arr.init((dtype**)in_vals2.data(), in_vals2.size());
int block_count = DefaultBlockCount(count * dim);
KernelPDotBackward<<<block_count, TPB>>>((const dtype**)loss_arr.value,
(const dtype**)in_val1_arr.value, (const dtype**)in_val2_arr.value,
count, dim, in1_loss_arr.value, in2_loss_arr.value);
CheckCudaError();
}
void PAddForward(const std::vector<std::vector<dtype*>> &ins, int count,
int dim,
int in_count,
std::vector<dtype*> &vals) {
std::vector<std::shared_ptr<NumberPointerArray>> gpu_addr;
gpu_addr.reserve(ins.size());
for (const std::vector<dtype*> &x : ins) {
std::shared_ptr<NumberPointerArray> arr =
std::make_shared<NumberPointerArray>();
arr->init((dtype**)x.data(), x.size());
gpu_addr.push_back(arr);
}
std::vector<dtype**> ins_gpu;
ins_gpu.reserve(ins.size());
for (auto &ptr : gpu_addr) {
ins_gpu.push_back(ptr->value);
}
NumberPointerPointerArray in_arr;
in_arr.init(ins_gpu.data(), ins_gpu.size());
NumberPointerArray out_arr;
out_arr.init(vals.data(), vals.size());
int block_count = DefaultBlockCount(count * dim);
KernelPAddForward<<<block_count, TPB>>>((const dtype***)in_arr.value,
count, dim, in_count, out_arr.value);
CheckCudaError();
}
__global__ void KernelPAddBackward(const dtype **losses, int count, int dim,
int in_count,
dtype ***in_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
int dim_mul_count = dim * count;
for (int i = index; i < dim_mul_count * in_count; i += step) {
int in_count_i = i / dim_mul_count;
int dim_mul_count_i = i % dim_mul_count;
int count_i = dim_mul_count_i / dim;
int dim_i = dim_mul_count_i % dim;
DeviceAtomicAdd(in_losses[in_count_i][count_i] + dim_i, losses[count_i][dim_i]);
}
}
void PAddBackward(const std::vector<dtype*> &losses, int count, int dim,
int in_count,
std::vector<std::vector<dtype*>> &in_losses) {
std::vector<std::shared_ptr<NumberPointerArray>> gpu_addr;
gpu_addr.reserve(in_losses.size());
for (const std::vector<dtype*> &x : in_losses) {
std::shared_ptr<NumberPointerArray> arr =
std::make_shared<NumberPointerArray>();
arr->init((dtype**)x.data(), x.size());
gpu_addr.push_back(arr);
}
std::vector<dtype**> in_losses_gpu;
in_losses_gpu.reserve(in_losses.size());
for (auto &ptr : gpu_addr) {
in_losses_gpu.push_back(ptr->value);
}
NumberPointerPointerArray in_loss_arr;
in_loss_arr.init(in_losses_gpu.data(), in_losses_gpu.size());
NumberPointerArray out_loss_arr;
out_loss_arr.init((dtype**)losses.data(), losses.size());
int block_count = DefaultBlockCount(in_count * count * dim);
KernelPAddBackward<<<block_count, TPB>>>((const dtype**)out_loss_arr.value,
count, dim, in_count, in_loss_arr.value);
CheckCudaError();
}
__global__ void KernelSoftMaxLoss(const dtype **vals, dtype **losses,
int *correct_count, int *answers, int batchsize, int count, int dim) {
volatile __shared__ int opt_label;
volatile __shared__ dtype shared_val[TPB];
volatile __shared__ int64_t max_indexes[TPB];
volatile __shared__ dtype scores_sum[TPB];
volatile __shared__ dtype scores[TPB];
int dim_i = threadIdx.x;
int count_i = blockIdx.x;
if (count_i == 0 && dim_i == 0) {
*correct_count = 0;
}
shared_val[dim_i] = dim_i < dim ? vals[count_i][dim_i] : -INFINITY;
max_indexes[dim_i] = dim_i;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (shared_val[threadIdx.x + i] > shared_val[threadIdx.x]) { // race
shared_val[threadIdx.x] = shared_val[threadIdx.x + i]; // race
max_indexes[threadIdx.x] = max_indexes[threadIdx.x + i]; // race
}
__syncthreads();
}
if (threadIdx.x == 0) {
opt_label = max_indexes[0];
if (answers[count_i] == opt_label) {
atomicAdd(correct_count, 1);
}
}
__syncthreads();
dtype max_score = vals[count_i][opt_label];
dtype score = dim_i < dim ? cuda_exp(vals[count_i][dim_i] - max_score) :
0.0f;
scores[dim_i] = score;
scores_sum[dim_i] = score;
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
scores_sum[threadIdx.x] = scores_sum[threadIdx.x] +
scores_sum[threadIdx.x + i]; // race
__syncthreads();
}
if (dim_i < dim) {
losses[count_i][dim_i] = (scores[dim_i] / scores_sum[0] -
(dim_i == answers[count_i] ? 1 : 0)) / batchsize;
}
}
void SoftMaxLoss(const std::vector<dtype*> &vals, std::vector<dtype*> &losses,
int *correct_count,
const std::vector<int> &answers,
int batchsize,
int count,
int dim) {
if (dim > TPB) {
abort();
}
int thread_count = NextTwoIntegerPowerNumber(dim);
NumberPointerArray val_arr;
val_arr.init((dtype**)vals.data(), vals.size());
NumberPointerArray loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
IntArray answer_arr;
answer_arr.init((int*)answers.data(), answers.size());
KernelSoftMaxLoss<<<count, thread_count>>>(
const_cast<const dtype **>(val_arr.value),
const_cast<dtype **>(loss_arr.value),
correct_count,
answer_arr.value,
batchsize,
count,
dim);
CheckCudaError();
}
__global__ void Predict(const dtype *val, int dim, int *result) {
__shared__ volatile dtype shared_vals[TPB];
__shared__ volatile dtype shared_indexes[TPB];
shared_indexes[threadIdx.x] = threadIdx.x;
if (threadIdx.x < dim) {
shared_vals[threadIdx.x] = val[threadIdx.x];
} else {
shared_vals[threadIdx.x] = -10000000.0f;
}
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (shared_vals[threadIdx.x] < shared_vals[threadIdx.x + i]) {
shared_vals[threadIdx.x] = shared_vals[threadIdx.x + i];
shared_indexes[threadIdx.x] = shared_indexes[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
*result = shared_indexes[0];
}
}
int Predict(const dtype* val, int dim) {
if (dim > TPB) {
abort();
}
int thread_count = NextTwoIntegerPowerNumber(dim);
DeviceInt result;
result.init();
Predict<<<1, thread_count>>>(val, dim, result.value);
CheckCudaError();
result.copyFromDeviceToHost();
return result.v;
}
__global__ void KernelMax(const dtype *const *v, int count, int dim, dtype *block_maxes,
int *block_max_is,
int *block_counters,
int *max_indexes,
dtype *max_vals) {
__shared__ volatile dtype shared_max[TPB];
__shared__ volatile dtype shared_max_i[TPB];
__shared__ volatile bool is_last_block;
if (threadIdx.x == 0 && blockIdx.y == 0) {
block_counters[blockIdx.x] = 0;
}
if (threadIdx.x == 0) {
is_last_block = false;
}
int count_i = blockIdx.x;
int offset = blockIdx.y * blockDim.x + threadIdx.x;
shared_max[threadIdx.x] = offset < dim ? v[count_i][offset] : -INFINITY;
shared_max_i[threadIdx.x] = offset;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i && shared_max[threadIdx.x] < shared_max[threadIdx.x + i]) {
shared_max[threadIdx.x] = shared_max[threadIdx.x + i];
shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i];
}
__syncthreads();
}
int block_maxes_offset = blockIdx.x * gridDim.y + blockIdx.y;
if (threadIdx.x == 0) {
block_maxes[block_maxes_offset] = shared_max[0];
block_max_is[block_maxes_offset] = shared_max_i[0];
//if (shared_max_i[0] >= dim) {
// KernelPrintLine("dim:%d shared_max_i[0]:%d shared_max[0]:%f", dim, shared_max_i[0],
// shared_max[0]);
//}
if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) {
is_last_block = true;
}
}
__syncthreads();
if (is_last_block) {
dtype max = -INFINITY;
int max_i = 100000;
//if (threadIdx.x == 0) {
// for (int i = 0; i < gridDim.y; ++i) {
// int offset = blockIdx.x * gridDim.y + i;
// KernelPrintLine("i:%d block_maxes[%d]:%f", i, offset, block_maxes[offset]);
// }
//}
for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) {
int offset = blockIdx.x * gridDim.y + i;
if (block_maxes[offset] > max) {
max = block_maxes[offset];
max_i = block_max_is[offset];
//if (max_i >= dim) {
// KernelPrintLine("max_i:%d blockIdx.x:%d gridDim.y:%d i:%d offset:%d",
// max_i, blockIdx.x, gridDim.y, i, offset);
//}
}
}
shared_max[threadIdx.x] = max;
shared_max_i[threadIdx.x] = max_i;
//if (max_i >= dim) {
// KernelPrintLine("count_i:%d dim:%d max_i:%d", count_i, dim, max_i);
//}
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i && shared_max[threadIdx.x + i] > shared_max[threadIdx.x]) {
shared_max[threadIdx.x] = shared_max[threadIdx.x + i];
shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i];
//if (shared_max_i[threadIdx.x] >= dim) {
// KernelPrintLine("index:%d v:%f" shared_max_i[threadIdx.x],
// shared_max[threadIdx.x]);
//}
}
__syncthreads();
}
if (threadIdx.x == 0) {
max_vals[count_i] = shared_max[0];
max_indexes[count_i] = shared_max_i[0];
}
}
}
__global__ void KernelSingleMax(const dtype *const *v, int count, int dim,
int *max_indexes,
dtype *max_vals) {
for (int count_i = 0; count_i < count; ++count_i) {
dtype max_val = -INFINITY;
int max_i;
for (int dim_i = 0; dim_i < dim; ++ dim_i) {
if (v[count_i][dim_i] > max_val) {
max_val = v[count_i][dim_i];
max_i = dim_i;
}
}
max_indexes[count_i] = max_i;
max_vals[count_i] = max_val;
}
}
void Max(const dtype *const *v, int count, int dim, int *max_indexes, dtype *max_vals) {
int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB);
int block_y_count = (dim - 1 + thread_count) / thread_count;
dim3 block_dim(count, block_y_count, 1);
NumberArray block_maxes;
block_maxes.init(block_y_count * count);
IntArray block_max_is, block_counters;
block_max_is.init(block_y_count * count);
block_counters.init(count);
KernelMax<<<block_dim, thread_count>>>(v, count, dim, block_maxes.value, block_max_is.value,
block_counters.value, max_indexes, max_vals);
cudaPrintfDisplay(stdout, true);
CheckCudaError();
#if TEST_CUDA
NumberArray max_val_arr;
IntArray max_indexer_arr;
max_val_arr.init(count);
max_indexer_arr.init(count);
KernelSingleMax<<<1, 1>>>(v, count, dim, max_indexer_arr.value, max_val_arr.value);
CheckCudaError();
vector<int> max_indexer_target(count), max_indexer_gold(count);
MyCudaMemcpy(max_indexer_target.data(), max_indexes, count * sizeof(int), cudaMemcpyDeviceToHost);
MyCudaMemcpy(max_indexer_gold.data(), max_indexer_arr.value, count * sizeof(int),
cudaMemcpyDeviceToHost);
for (int i = 0; i < count; ++i) {
if (max_indexer_target.at(i) != max_indexer_gold.at(i)) {
cerr << format("max_indexer_target:%1% max_indexer_gold:%2%") % max_indexer_target.at(i)
% max_indexer_gold.at(i) << endl;
PrintNums(v, i, dim);
abort();
}
}
#endif
CheckCudaError();
}
__global__ void KernelExp(const dtype *const *in, int count, int dim, const dtype *number_to_sub,
dtype *const *out) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
out[count_i][dim_i] = cuda_exp(in[count_i][dim_i] - number_to_sub[count_i]);
}
}
void Exp(const dtype *const *in, int count, int dim, const dtype *number_to_sub,
dtype *const *out) {
int block_count = DefaultBlockCount(dim * count);
KernelExp<<<block_count, TPB>>>(in, count, dim, number_to_sub, out);
CheckCudaError();
}
__global__ void KernelExpForward(const dtype* const *inputs, int count, int dim,
dtype *const *results) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
results[count_i][dim_i] = cuda_exp(inputs[count_i][dim_i]);
}
}
void ExpForward(const vector<const dtype*> &inputs, int count, int dim, vector<dtype*> &results) {
NumberPointerArray input_arr, result_arr;
input_arr.init((dtype**)inputs.data(), inputs.size());
result_arr.init((dtype**)results.data(), results.size());
int block_count = DefaultBlockCount(dim * count);
KernelExpForward<<<block_count, TPB>>>(input_arr.value, count, dim, result_arr.value);
CheckCudaError();
}
__global__ void KernelExpBackward(const dtype* const *losses, const dtype* const *vals,
int count,
int dim,
dtype *const *input_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
DeviceAtomicAdd(input_losses[count_i], losses[count_i][dim_i] * vals[count_i][dim_i]);
}
}
void ExpBackward(const vector<const dtype*> &losses, const vector<const dtype*> &vals, int count,
int dim,
vector<dtype*> input_losses) {
NumberPointerArray loss_arr, val_arr, input_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
val_arr.init((dtype**)vals.data(), vals.size());
input_loss_arr.init((dtype**)input_losses.data(), input_losses.size());
int block_count = DefaultBlockCount(dim * count);
KernelExpBackward<<<block_count, TPB>>>(loss_arr.value, val_arr.value, count, dim,
input_loss_arr.value);
CheckCudaError();
}
__global__ void KernelSum(const dtype *const *v, int count, int dim, dtype *block_sums,
int *block_counters,
dtype *sum_vals) {
__shared__ volatile dtype shared_sum[TPB];
__shared__ volatile bool is_last_block;
if (threadIdx.x == 0 && blockIdx.y == 0) {
block_counters[blockIdx.x] = 0;
}
if (threadIdx.x == 0) {
is_last_block = false;
}
int count_i = blockIdx.x;
int offset = blockIdx.y * blockDim.x + threadIdx.x;
shared_sum[threadIdx.x] = offset < dim ? v[count_i][offset] : 0.0f;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y;
if (threadIdx.x == 0) {
block_sums[block_sums_offset] = shared_sum[0];
if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) {
is_last_block = true;
}
}
__syncthreads();
if (is_last_block) {
dtype sum = 0.0f;
for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) {
int offset = blockIdx.x * gridDim.y + i;
sum += block_sums[offset];
}
shared_sum[threadIdx.x] = sum;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
sum_vals[count_i] = shared_sum[0];
}
}
}
void Sum(const dtype *const *v, int count, int dim, dtype *sum_vals) {
int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB);
int block_y_count = (dim - 1 + thread_count) / thread_count;
dim3 block_dim(count, block_y_count, 1);
NumberArray block_sums;
block_sums.init(block_y_count * count);
IntArray block_counters;
block_counters.init(count);
KernelSum<<<block_dim, thread_count>>>(v, count, dim, block_sums.value, block_counters.value,
sum_vals);
CheckCudaError();
}
__global__ void KernelSoftMaxLossByExp(const dtype *const *exps, int count, int dim,
const dtype *const *vals,
const dtype *sums,
const dtype *max_vals,
const int *answers,
dtype reverse_batchsize,
dtype **grads,
dtype *losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < dim * count; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
dtype loss = exps[count_i][dim_i] / sums[count_i];
if (dim_i == answers[count_i]) {
loss -= 1.0f;
}
grads[count_i][dim_i] = loss * reverse_batchsize;
losses[count_i] = (cuda_log(sums[count_i]) - vals[count_i][answers[count_i]] + max_vals[count_i])
* reverse_batchsize;
}
}
void SoftMaxLossByExp(const dtype *const *exps, int count, int dim, const dtype *const *vals,
const dtype *sums,
const dtype *max_vals,
const int *answers,
dtype reverse_batchsize,
dtype **grads,
dtype *losses) {
int block_count = DefaultBlockCount(dim * count);
KernelSoftMaxLossByExp<<<block_count, TPB>>>(exps, count, dim, vals, sums, max_vals, answers,
reverse_batchsize, grads, losses);
CheckCudaError();
}
std::pair<dtype, std::vector<int>> SoftMaxLoss(const std::vector<const dtype *> &vals_vector,
int count,
int dim,
const std::vector<int> &gold_answers,
int batchsize,
const std::vector<dtype *> &losses_vector) {
IntArray answer_arr, gold_answer_arr;
answer_arr.init(count);
gold_answer_arr.init((int*)gold_answers.data(), count);
NumberArray max_vals, sum_vals;
max_vals.init(count);
sum_vals.init(count);
NumberPointerArray vals, losses;
vals.init((dtype**)vals_vector.data(), count);
losses.init((dtype**)losses_vector.data(), count);
Max(vals.value, count, dim, answer_arr.value, max_vals.value);
Exp(vals.value, count, dim, max_vals.value, losses.value);
Sum(losses.value, count, dim, sum_vals.value);
NumberArray loss_arr;
loss_arr.init(count);
SoftMaxLossByExp(losses.value, count, dim, vals.value, sum_vals.value, max_vals.value,
gold_answer_arr.value, 1.0 / batchsize, losses.value, loss_arr.value);
vector<int> answers(count);
MyCudaMemcpy(answers.data(), answer_arr.value, count * sizeof(int), cudaMemcpyDeviceToHost);
for (int word_id : answers) {
if (word_id < 0) {
for (int id : answers) {
cerr << id << " ";
}
cerr << endl;
abort();
}
}
vector<dtype> loss_vector(count);
MyCudaMemcpy(loss_vector.data(), loss_arr.value, count * sizeof(dtype), cudaMemcpyDeviceToHost);
dtype loss_sum = accumulate(loss_vector.begin(), loss_vector.end(), 0.0f);
return std::make_pair(loss_sum, answers);
}
__global__ void KernelMaxScalarForward(const dtype *const *v, int count, int dim,
dtype *block_maxes,
int *block_max_is,
int *block_counters,
int *max_indexes,
dtype **max_vals) {
__shared__ volatile dtype shared_max[TPB];
__shared__ volatile dtype shared_max_i[TPB];
__shared__ volatile bool is_last_block;
if (threadIdx.x == 0 && blockIdx.y == 0) {
block_counters[blockIdx.x] = 0;
}
if (threadIdx.x == 0) {
is_last_block = false;
}
int count_i = blockIdx.x;
int offset = blockIdx.y * blockDim.x + threadIdx.x;
shared_max[threadIdx.x] = offset < dim ? v[count_i][offset] : -INFINITY;
shared_max_i[threadIdx.x] = offset;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i && shared_max[threadIdx.x] < shared_max[threadIdx.x + i]) {
shared_max[threadIdx.x] = shared_max[threadIdx.x + i];
shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i];
}
__syncthreads();
}
int block_maxes_offset = blockIdx.x * gridDim.y + blockIdx.y;
if (threadIdx.x == 0) {
block_maxes[block_maxes_offset] = shared_max[0];
block_max_is[block_maxes_offset] = shared_max_i[0];
if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) {
is_last_block = true;
}
}
__syncthreads();
if (is_last_block) {
dtype max = -INFINITY;
int max_i = 100000;
for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) {
int offset = blockIdx.x * gridDim.y + i;
if (block_maxes[offset] > max) {
max = block_maxes[offset];
max_i = block_max_is[offset];
}
}
shared_max[threadIdx.x] = max;
shared_max_i[threadIdx.x] = max_i;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i && shared_max[threadIdx.x + i] > shared_max[threadIdx.x]) {
shared_max[threadIdx.x] = shared_max[threadIdx.x + i];
shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
max_vals[count_i][0] = shared_max[0];
max_indexes[count_i] = shared_max_i[0];
}
}
}
void MaxScalarForward(const vector<const dtype*> &inputs, int count, int dim,
vector<dtype*> &results,
vector<int> &max_indexes) {
int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB);
int block_y_count = (dim - 1 + thread_count) / thread_count;
dim3 block_dim(count, block_y_count, 1);
NumberArray block_maxes;
block_maxes.init(block_y_count * count);
IntArray block_max_is, block_counters;
block_max_is.init(block_y_count * count);
block_counters.init(count);
NumberPointerArray input_arr;
input_arr.init((dtype**)inputs.data(), inputs.size());
NumberPointerArray result_arr;
result_arr.init((dtype**)results.data(), results.size());
IntArray max_index_arr;
max_index_arr.init(max_indexes.size());
KernelMaxScalarForward<<<block_dim, thread_count>>>(input_arr.value, count, dim,
block_maxes.value, block_max_is.value, block_counters.value, max_index_arr.value,
result_arr.value);
CheckCudaError();
MyCudaMemcpy(max_indexes.data(), max_index_arr.value, count * sizeof(int),
cudaMemcpyDeviceToHost);
}
__global__ void KernelMaxScalarBackward(const dtype *const *losses, const int *indexes, int count,
dtype *const *input_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count; i += step) {
DeviceAtomicAdd(input_losses[i] + indexes[i], losses[i][0]);
}
}
void MaxScalarBackward(const vector<const dtype *> &losses, const vector<int> &indexes, int count,
const vector<dtype*> &input_losses) {
int block_count = DefaultBlockCount(count);
NumberPointerArray loss_arr, input_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
input_loss_arr.init((dtype**)input_losses.data(), input_losses.size());
IntArray index_arr;
index_arr.init((int*)indexes.data(), indexes.size());
KernelMaxScalarBackward<<<block_count, TPB>>>(loss_arr.value, index_arr.value, count,
input_loss_arr.value);
}
__global__ void KernelVectorSumForward(const dtype *const *v, int count, int dim,
dtype *block_sums,
int *block_counters,
dtype **results) {
__shared__ volatile dtype shared_sum[TPB];
__shared__ volatile bool is_last_block;
if (threadIdx.x == 0 && blockIdx.y == 0) {
block_counters[blockIdx.x] = 0;
}
if (threadIdx.x == 0) {
is_last_block = false;
}
int count_i = blockIdx.x;
int offset = blockIdx.y * blockDim.x + threadIdx.x;
shared_sum[threadIdx.x] = offset < dim ? v[count_i][offset] : 0.0f;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y;
if (threadIdx.x == 0) {
block_sums[block_sums_offset] = shared_sum[0];
if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) {
is_last_block = true;
}
}
__syncthreads();
if (is_last_block) {
dtype sum = 0.0f;
for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) {
int offset = blockIdx.x * gridDim.y + i;
sum += block_sums[offset];
}
shared_sum[threadIdx.x] = sum;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
results[count_i][0] = shared_sum[0];
}
}
}
void VectorSumForward(const vector<const dtype *> &inputs, int count, int dim,
vector<dtype*> &results) {
int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB);
int block_y_count = (dim - 1 + thread_count) / thread_count;
dim3 block_dim(count, block_y_count, 1);
NumberArray block_sums;
block_sums.init(block_y_count * count);
IntArray block_counters;
block_counters.init(count);
NumberPointerArray input_arr;
input_arr.init((dtype**)inputs.data(), inputs.size());
NumberPointerArray result_arr;
result_arr.init((dtype**)results.data(), results.size());
KernelVectorSumForward<<<block_dim, thread_count>>>(input_arr.value, count, dim,
block_sums.value, block_counters.value, result_arr.value);
CheckCudaError();
}
__global__ void KernelVectorSumBackward(const dtype *const *losses, int count, int dim,
dtype * *const input_losses) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
DeviceAtomicAdd(input_losses[count_i] + dim_i, losses[count_i][0]);
}
}
void VectorSumBackward(const vector<const dtype*> &losses, int count, int dim,
vector<dtype*> &input_losses) {
int block_count = DefaultBlockCount(count * dim);
NumberPointerArray loss_arr, input_loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
input_loss_arr.init((dtype**)input_losses.data(), input_losses.size());
KernelVectorSumBackward<<<block_count, TPB>>>(loss_arr.value, count, dim,
input_loss_arr.value);
CheckCudaError();
}
__global__ void KernelScalarToVectorForward(const dtype* const* inputs, int count, int dim,
dtype *const *results) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count * dim; i += step) {
int count_i = i / dim;
int dim_i = i % dim;
results[count_i][dim_i] = inputs[count_i][0];
}
}
void ScalarToVectorForward(const vector<const dtype*> &inputs, int count, int dim,
vector<dtype*> &results) {
int block_count = DefaultBlockCount(dim * count);
NumberPointerArray input_arr;
input_arr.init((dtype**)inputs.data(), inputs.size());
NumberPointerArray result_arr;
result_arr.init((dtype**)results.data(), inputs.size());
KernelScalarToVectorForward<<<block_count, TPB>>>(input_arr.value, count, dim,
result_arr.value);
CheckCudaError();
}
__global__ void KernelScalarToVectorBackward(const dtype *const *losses, int count, int dim,
dtype *block_sums,
int *block_counters,
dtype *const *input_losses) {
__shared__ volatile dtype shared_sum[TPB];
__shared__ volatile bool is_last_block;
if (threadIdx.x == 0 && blockIdx.y == 0) {
block_counters[blockIdx.x] = 0;
}
if (threadIdx.x == 0) {
is_last_block = false;
}
int count_i = blockIdx.x;
int offset = blockIdx.y * blockDim.x + threadIdx.x;
shared_sum[threadIdx.x] = offset < dim ? losses[count_i][offset] : 0.0f;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y;
if (threadIdx.x == 0) {
block_sums[block_sums_offset] = shared_sum[0];
if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) {
is_last_block = true;
}
}
__syncthreads();
if (is_last_block) {
dtype sum = 0.0f;
for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) {
int offset = blockIdx.x * gridDim.y + i;
sum += block_sums[offset];
}
shared_sum[threadIdx.x] = sum;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
DeviceAtomicAdd(input_losses[count_i], shared_sum[0]);
}
}
}
void ScalarToVectorBackward(const vector<const dtype*> &losses, int count, int dim,
vector<dtype*> &input_losses) {
int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB);
int block_y_count = (dim - 1 + thread_count) / thread_count;
dim3 block_dim(count, block_y_count, 1);
NumberArray block_sums;
block_sums.init(block_y_count * count);
IntArray block_counters;
block_counters.init(count);
NumberPointerArray loss_arr;
loss_arr.init((dtype**)losses.data(), losses.size());
NumberPointerArray input_loss_arr;
input_loss_arr.init((dtype**)input_losses.data(), input_losses.size());
KernelScalarToVectorBackward<<<block_dim, thread_count>>>(loss_arr.value, count, dim,
block_sums.value, block_counters.value, input_loss_arr.value);
CheckCudaError();
}
__global__ void KernelSquareSum(const dtype *v, int len, dtype *global_sum,
int *block_counter, dtype *result) {
__shared__ volatile dtype shared_sum[TPB];
__shared__ volatile bool is_last_block;
int index = DeviceDefaultIndex();
if (index == 0) {
*block_counter = 0;
}
if (threadIdx.x == 0) {
is_last_block = false;
}
shared_sum[threadIdx.x] = 0.0f;
for (int i = index; i < len; i += blockDim.x * gridDim.x) {
shared_sum[threadIdx.x] += v[i] * v[i];
}
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
global_sum[blockIdx.x] = shared_sum[0];
if (atomicAdd(block_counter, 1) == gridDim.x - 1) {
is_last_block = true;
}
}
__syncthreads();
if (is_last_block) {
dtype sum = 0.0f;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sum += global_sum[i];
}
shared_sum[threadIdx.x] = sum;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
*result = shared_sum[0];
}
}
}
dtype SquareSum(const dtype *v, int len) {
int block_count = DefaultBlockCount(len);
NumberArray global_sum;
global_sum.init(block_count);
DeviceInt block_counter;
block_counter.init();
DeviceNumber result;
result.init();
KernelSquareSum<<<block_count, TPB>>>(v, len,
global_sum.value, block_counter.value, result.value);
CheckCudaError();
result.copyFromDeviceToHost();
return result.v;
}
__global__ void KernelSquareSum(const dtype *v, const bool *indexers,
int count,
int dim,
dtype *global_sum,
int *block_counter,
dtype *result) {
__shared__ volatile dtype shared_sum[TPB];
__shared__ volatile bool is_last_block;
int index = DeviceDefaultIndex();
if (index == 0) {
*block_counter = 0;
}
if (threadIdx.x == 0) {
global_sum[blockIdx.x] = 0.0f;
is_last_block = false;
}
int count_i = index / dim;
if (index < count * dim && indexers[count_i]) {
shared_sum[threadIdx.x] = v[index] * v[index];
} else {
shared_sum[threadIdx.x] = 0.0f;
}
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
global_sum[blockIdx.x] = shared_sum[0];
if (atomicAdd(block_counter, 1) == gridDim.x - 1) {
is_last_block = true;
}
}
__syncthreads();
if (is_last_block) {
float sum = 0.0f;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sum += global_sum[i];
}
shared_sum[threadIdx.x] = sum;
__syncthreads();
for (int i = (blockDim.x >> 1); i > 0; i >>= 1) {
if (threadIdx.x < i) {
shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
*result = shared_sum[0];
}
}
}
dtype SquareSum(const dtype *v, const bool *indexers, int count, int dim) {
int block_count = DefaultBlockCountWithoutLimit(count * dim);
NumberArray global_sum;
global_sum.init(block_count);
DeviceInt block_counter;
block_counter.init();
DeviceNumber result;
result.init();
KernelSquareSum<<<block_count, TPB>>>(v, indexers,
count, dim, global_sum.value, block_counter.value, result.value);
CheckCudaError();
result.copyFromDeviceToHost();
return result.v;
}
__global__ void KernelRescale(dtype *v, int len, dtype scale) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < len; i += step) {
v[i] *= scale;
}
}
void Rescale(dtype *v, int len, dtype scale) {
int block_count = DefaultBlockCount(len);
KernelRescale<<<block_count, TPB>>>(v, len, scale);
CheckCudaError();
}
__global__ void KernelUpdateAdam(dtype *val, dtype *grad, int row, int col, bool is_bias,
dtype *aux_mean,
dtype *aux_square,
int iter,
dtype belta1,
dtype belta2,
dtype alpha,
dtype reg,
dtype eps,
dtype x) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
int len = row * col;
for (int i = index; i < len; i += step) {
if (!is_bias) {
grad[i] += val[i] * reg;
}
aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i];
aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] *
grad[i];
dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iter + 1)) * x;
dtype square_plus_eps = aux_square[i] + eps;
val[i] = val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps);
}
}
void UpdateAdam(dtype *val, dtype *grad, int row, int col, bool is_bias, dtype *aux_mean,
dtype *aux_square,
int iter,
dtype belta1,
dtype belta2,
dtype alpha,
dtype reg,
dtype eps) {
int block_count = DefaultBlockCount(row * col);
dtype x = 1.0f / (1 - pow(belta1, iter + 1));
KernelUpdateAdam<<<block_count, TPB>>>(val, grad, row, col, is_bias, aux_mean,
aux_square,
iter,
belta1,
belta2,
alpha,
reg,
eps,
x);
CheckCudaError();
}
__global__ void KernelUpdateAdamW(dtype *val, dtype *grad, int row, int col, bool is_bias,
dtype *aux_mean,
dtype *aux_square,
int iter,
dtype belta1,
dtype belta2,
dtype alpha,
dtype reg,
dtype eps,
dtype x) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
int len = row * col;
for (int i = index; i < len; i += step) {
aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i];
aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] *
grad[i];
dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iter + 1)) * x;
dtype square_plus_eps = aux_square[i] + eps;
val[i] = (1 - (is_bias? 0.0f : reg)) * val[i] - aux_mean[i] * lr_t /
cuda_sqrt(square_plus_eps);
}
}
void UpdateAdamW(dtype *val, dtype *grad, int row, int col, bool is_bias, dtype *aux_mean,
dtype *aux_square,
int iter,
dtype belta1,
dtype belta2,
dtype alpha,
dtype reg,
dtype eps) {
int block_count = DefaultBlockCount(row * col);
dtype x = 1.0f / (1 - pow(belta1, iter + 1));
KernelUpdateAdamW<<<block_count, TPB>>>(val, grad, row, col, is_bias, aux_mean,
aux_square,
iter,
belta1,
belta2,
alpha,
reg,
eps,
x);
CheckCudaError();
}
__global__ void KernelUpdateAdam(dtype *val, dtype *grad, int row, int col,
dtype *aux_mean,
dtype *aux_square,
const bool *indexers,
int *iters,
dtype belta1,
dtype belta2,
dtype alpha,
dtype reg,
dtype eps) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
int len = row * col;
for (int i = index; i < len; i += step) {
int count_i = i / row;
if (indexers[count_i]) {
if (row > 1 && col > 1) {
grad[i] += val[i] * reg;
}
aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i];
aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] *
grad[i];
dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2,
iters[count_i] + 1)) / (1 - cuda_pow(belta1,
iters[count_i] + 1));
dtype square_plus_eps = aux_square[i] + eps;
val[i] = val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps);
}
}
}
__global__ void KernelSelfPlusIters(const bool *indexers, int *iters,
int count) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count; i += step) {
if (indexers[i]) {
++iters[i];
}
}
}
void UpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean,
dtype *aux_square,
const bool *indexers,
int *iters,
dtype belta1,
dtype belta2,
dtype alpha,
dtype reg,
dtype eps) {
int block_count = DefaultBlockCount(row * col);
KernelUpdateAdam<<<block_count, TPB>>>(val, grad, row, col, aux_mean,
aux_square, indexers, iters, belta1, belta2, alpha, reg, eps);
CheckCudaError();
block_count = DefaultBlockCount(col);
KernelSelfPlusIters<<<block_count, TPB>>>(indexers, iters, col);
CheckCudaError();
}
__global__ void KernelUpdateAdagrad(dtype *val, dtype *grad, int row, int col,
dtype *aux_square,
dtype alpha,
dtype reg,
dtype eps) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
int len = row * col;
for (int i = index; i < len; i += step) {
if (row > 1 && col > 1) {
grad[i] += val[i] * reg;
}
aux_square[i] = aux_square[i] + grad[i] * grad[i];
val[i] = val[i] - grad[i] * alpha / cuda_sqrt(aux_square[i] + eps);
}
}
void UpdateAdagrad(dtype *val, dtype *grad, int row, int col,
dtype *aux_square,
dtype alpha,
dtype reg,
dtype eps) {
int block_count = DefaultBlockCount(row * col);
KernelUpdateAdagrad<<<block_count, TPB>>>(val, grad, row, col, aux_square,
alpha, reg, eps);
CheckCudaError();
}
__global__ void KernelUpdateAdagrad(dtype *val, dtype *grad, int row, int col,
dtype *aux_square,
const bool *indexers,
dtype alpha,
dtype reg,
dtype eps) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
int len = row * col;
for (int i = index; i < len; i += step) {
int count_i = i / col;
if (indexers[count_i]) {
if (row > 1 && col > 1) {
grad[i] += val[i] * reg;
}
aux_square[i] = aux_square[i] + grad[i] * grad[i];
val[i] = val[i] - grad[i] * alpha / cuda_sqrt(aux_square[i] + eps);
}
}
}
void UpdateAdagrad(dtype *val, dtype *grad, int row, int col,
dtype *aux_square,
const bool *indexers,
dtype alpha,
dtype reg,
dtype eps) {
int block_count = DefaultBlockCount(row * col);
KernelUpdateAdagrad<<<block_count, TPB>>>(val, grad, row, col, aux_square,
indexers, alpha, reg, eps);
CheckCudaError();
}
void *GraphHostAlloc() {
void *m;
CallCuda(cudaHostAlloc(&m, 10000000, cudaHostAllocWriteCombined));
if (m == NULL) {
abort();
}
return m;
}
}
|
fafda4a598bd5e525c089ace9127acf9d26259d5.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void add_one_v1(int n, float* x) {
int i = threadIdx.x;
int j = threadIdx.y;
x[i + j * n] += 1;
}
__global__ void add_one_v2(int n, float* x) {
int left_up_of_block_x = blockIdx.x * blockDim.x;
int left_up_of_block_y = blockIdx.y * blockDim.y;
int i = left_up_of_block_x + threadIdx.x;
int j = left_up_of_block_y + threadIdx.y;
if (i < n) {
x[i + j * n] += 1;
}
}
void initialize_input(float* h_A, int n) {
for (int i = 0; i < n * n; i++) {
h_A[i] = i;
}
}
int main(void) {
int N = 16;
size_t size = N * N * sizeof(float);
// Allocate input vectors h_A in host memory
float* h_A = (float*)malloc(size);
initialize_input(h_A, N);
// Allocate vectors in device memory
float* d_A;
hipMalloc(&d_A, size);
// Copy vectors from host memory to device memory
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add_one_v1), dim3(1), dim3(N), 0, 0, N, d_A);
// Copy result from device memory to host memory
hipMemcpy(h_A, d_A, size, hipMemcpyDeviceToHost);
printf("result: %f,%f,%f,%f\n", h_A[0], h_A[1], h_A[N * N - 2], h_A[N * N - 1]);
dim3 threadsPerBlock(8, 8);
dim3 numBlocks(N / threadsPerBlock.x, N / threadsPerBlock.y);
hipLaunchKernelGGL(( add_one_v2), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, N, d_A);
// Copy result from device memory to host memory
hipMemcpy(h_A, d_A, size, hipMemcpyDeviceToHost);
printf("result: %f,%f,%f,%f\n", h_A[0], h_A[1], h_A[N * N - 2], h_A[N * N - 1]);
// Free device memory
hipFree(d_A);
// Free host memory
free(h_A);
}
| fafda4a598bd5e525c089ace9127acf9d26259d5.cu | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void add_one_v1(int n, float* x) {
int i = threadIdx.x;
int j = threadIdx.y;
x[i + j * n] += 1;
}
__global__ void add_one_v2(int n, float* x) {
int left_up_of_block_x = blockIdx.x * blockDim.x;
int left_up_of_block_y = blockIdx.y * blockDim.y;
int i = left_up_of_block_x + threadIdx.x;
int j = left_up_of_block_y + threadIdx.y;
if (i < n) {
x[i + j * n] += 1;
}
}
void initialize_input(float* h_A, int n) {
for (int i = 0; i < n * n; i++) {
h_A[i] = i;
}
}
int main(void) {
int N = 16;
size_t size = N * N * sizeof(float);
// Allocate input vectors h_A in host memory
float* h_A = (float*)malloc(size);
initialize_input(h_A, N);
// Allocate vectors in device memory
float* d_A;
cudaMalloc(&d_A, size);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
add_one_v1<<<1, N>>>(N, d_A);
// Copy result from device memory to host memory
cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
printf("result: %f,%f,%f,%f\n", h_A[0], h_A[1], h_A[N * N - 2], h_A[N * N - 1]);
dim3 threadsPerBlock(8, 8);
dim3 numBlocks(N / threadsPerBlock.x, N / threadsPerBlock.y);
add_one_v2<<<numBlocks, threadsPerBlock>>>(N, d_A);
// Copy result from device memory to host memory
cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
printf("result: %f,%f,%f,%f\n", h_A[0], h_A[1], h_A[N * N - 2], h_A[N * N - 1]);
// Free device memory
cudaFree(d_A);
// Free host memory
free(h_A);
}
|
b6b30c5d6c0588478f8169a7791f8c3f4d2c4bf0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* compile: nvcc -o gather -arch=sm_35 -O3 gather.cu -I /usr/local/cuda/samples/common/inc/
*/
#include <iostream>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
using namespace std;
#define MAX_SHUFFLE_TIME (2099999999)
__global__ void gather_kernel(int *d_in, int *d_out, int *d_idx, int num)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
int globalSIze = blockDim.x * gridDim.x;
while (globalId < num) {
d_out[d_idx[globalId]] = d_in[globalId];
globalId += globalSIze;
}
}
float scatter(int *d_in, int *d_out, int *d_idx, int num)
{
int blockSize = 1024;
int ele_per_thread = 16;
int gridSize = num / blockSize / ele_per_thread;
dim3 grid(gridSize);
dim3 block(blockSize);
float totalTime;
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
hipLaunchKernelGGL(( gather_kernel), dim3(grid), dim3(block), 0, 0, d_in, d_out, d_idx, num);
hipEventRecord(end);
hipEventSynchronize(start);
hipEventSynchronize(end);
hipEventElapsedTime(&totalTime, start, end);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
std::cout<<hipGetErrorString(err)<<std::endl;
return totalTime;
}
void test_bandwidth(int len) {
std::cout<<"Data size(Copy): "<<len<<" ("<<len/1024/1024*sizeof(int)<<"MB)"<<'\t';
float aveTime = 0.0;
int *h_in, *d_in, *d_out, *h_idx, *d_idx;
h_in = new int[len];
h_idx = new int[len];
for(int i = 0; i < len; i++) {
h_in[i] = i;
h_idx[i] = i;
}
unsigned shuffleTime = (len * 3 < MAX_SHUFFLE_TIME)? len*3 : MAX_SHUFFLE_TIME;
srand((unsigned)time(NULL));
sleep(1);
/*data shuffling*/
int temp, from = 0, to = 0;
for(int i = 0; i < shuffleTime; i++) {
from = rand() % len;
to = rand() % len;
temp = h_idx[from];
h_idx[from] = h_idx[to];
h_idx[to] = temp;
}
checkCudaErrors(hipMalloc(&d_in,sizeof(int)*len));
checkCudaErrors(hipMalloc(&d_out,sizeof(int)*len));
checkCudaErrors(hipMalloc(&d_idx,sizeof(int)*len));
hipMemcpy(d_in, h_in, sizeof(int)*len, hipMemcpyHostToDevice);
hipMemcpy(d_idx, h_idx, sizeof(int)*len, hipMemcpyHostToDevice);
int experTime = 10;
for(int i = 0; i < experTime; i++) {
float tempTime = scatter(d_in, d_out, d_idx, len);
if (i != 0) aveTime += tempTime;
}
aveTime /= (experTime - 1);
delete[] h_in;
delete[] h_idx;
checkCudaErrors(hipFree(d_in));
checkCudaErrors(hipFree(d_idx));
checkCudaErrors(hipFree(d_out));
std::cout<<"Time:"<<aveTime<<" ms"<<'\t'
<<"Throughput:"<<1.0*len* sizeof(int)/1024/1024/1024/aveTime*1e3<<" GB/s"<<std::endl; //compared with scan
}
int main()
{
/*MB*/
for(int data_size_MB = 128; data_size_MB < 4096; data_size_MB += 256) {
int data_size = data_size_MB/ sizeof(int) * 1024 * 1024;
test_bandwidth(data_size);
}
return 0;
} | b6b30c5d6c0588478f8169a7791f8c3f4d2c4bf0.cu | /*
* compile: nvcc -o gather -arch=sm_35 -O3 gather.cu -I /usr/local/cuda/samples/common/inc/
*/
#include <iostream>
#include <cuda_runtime.h>
#include <helper_cuda.h>
using namespace std;
#define MAX_SHUFFLE_TIME (2099999999)
__global__ void gather_kernel(int *d_in, int *d_out, int *d_idx, int num)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
int globalSIze = blockDim.x * gridDim.x;
while (globalId < num) {
d_out[d_idx[globalId]] = d_in[globalId];
globalId += globalSIze;
}
}
float scatter(int *d_in, int *d_out, int *d_idx, int num)
{
int blockSize = 1024;
int ele_per_thread = 16;
int gridSize = num / blockSize / ele_per_thread;
dim3 grid(gridSize);
dim3 block(blockSize);
float totalTime;
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
gather_kernel<<<grid, block>>>(d_in, d_out, d_idx, num);
cudaEventRecord(end);
cudaEventSynchronize(start);
cudaEventSynchronize(end);
cudaEventElapsedTime(&totalTime, start, end);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
std::cout<<cudaGetErrorString(err)<<std::endl;
return totalTime;
}
void test_bandwidth(int len) {
std::cout<<"Data size(Copy): "<<len<<" ("<<len/1024/1024*sizeof(int)<<"MB)"<<'\t';
float aveTime = 0.0;
int *h_in, *d_in, *d_out, *h_idx, *d_idx;
h_in = new int[len];
h_idx = new int[len];
for(int i = 0; i < len; i++) {
h_in[i] = i;
h_idx[i] = i;
}
unsigned shuffleTime = (len * 3 < MAX_SHUFFLE_TIME)? len*3 : MAX_SHUFFLE_TIME;
srand((unsigned)time(NULL));
sleep(1);
/*data shuffling*/
int temp, from = 0, to = 0;
for(int i = 0; i < shuffleTime; i++) {
from = rand() % len;
to = rand() % len;
temp = h_idx[from];
h_idx[from] = h_idx[to];
h_idx[to] = temp;
}
checkCudaErrors(cudaMalloc(&d_in,sizeof(int)*len));
checkCudaErrors(cudaMalloc(&d_out,sizeof(int)*len));
checkCudaErrors(cudaMalloc(&d_idx,sizeof(int)*len));
cudaMemcpy(d_in, h_in, sizeof(int)*len, cudaMemcpyHostToDevice);
cudaMemcpy(d_idx, h_idx, sizeof(int)*len, cudaMemcpyHostToDevice);
int experTime = 10;
for(int i = 0; i < experTime; i++) {
float tempTime = scatter(d_in, d_out, d_idx, len);
if (i != 0) aveTime += tempTime;
}
aveTime /= (experTime - 1);
delete[] h_in;
delete[] h_idx;
checkCudaErrors(cudaFree(d_in));
checkCudaErrors(cudaFree(d_idx));
checkCudaErrors(cudaFree(d_out));
std::cout<<"Time:"<<aveTime<<" ms"<<'\t'
<<"Throughput:"<<1.0*len* sizeof(int)/1024/1024/1024/aveTime*1e3<<" GB/s"<<std::endl; //compared with scan
}
int main()
{
/*MB*/
for(int data_size_MB = 128; data_size_MB < 4096; data_size_MB += 256) {
int data_size = data_size_MB/ sizeof(int) * 1024 * 1024;
test_bandwidth(data_size);
}
return 0;
} |
2d1ff41c5f5d523ef59a52a8bab80ad12f0657df.hip | // !!! This is a file automatically generated by hipify!!!
/**
* File: kernel_p_rotate_molecola_2.cu
*
* Author: Lorenzo Casalini
* Date: Summer 2020
* Summary of File:
* This file contains a parallel implementation of function Matc Probe Shape. The parallel kernel
* is the function place_in_best_angle: each thread rotates the fragment in the angle corrisponding to his
* thread id and then evaluates the expansion. The difference with file kernel_p_place_best_angle.cu is that
* now, instead of using a vector of expansions in the unified global memory, we initialize it in the shared
* memory, with the porpouse of reducing the time of access and the time for managing unified memory.
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#define _USE_MATH_DEFINES
#include <math.h>
#include <stdbool.h>
#include <assert.h>
typedef struct
{
char name[40];
int n_atoms;
int n_bonds;
double atoms[500];
int bonds[300];
}molecola;
typedef struct
{
int head;
int tail;
int elements[500];
}queue;
#define repetitions 10
#define enable_refiniment false
#define high_precision_step 1
#define low_precision_step 30
#define threshold 0.2
inline hipError_t checkCuda(hipError_t result)
{
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
return result;
}
/**
* Populates a struct molecola in memory given the name of the text file
* @param molecola_name
* @param m1 empty struct molecola to be inizialized
*/
void create_molecola(char* molecola_name,molecola* m1) {
FILE *input_file;
int line_index = 0;
char* res;
char line[500];
int number_of_atoms;
int number_of_bounds;
char path[50];
strcpy(path,"molecules/");
strcat(path, molecola_name);
input_file = fopen(path, "r");
if (input_file == NULL) {
printf("fopen non funziona\n");
return;
}
res = fgets(line, 100, input_file);
fgets(line, 100, input_file);
fgets(line, 100, input_file);
char* numero = strtok(line, " ");
number_of_atoms = atoi(numero);
numero = strtok(NULL, " ");
number_of_bounds = atoi(numero);
m1->n_atoms = number_of_atoms;
m1->n_bonds = number_of_bounds;
fgets(line, 100, input_file);
fgets(line, 100, input_file);
fgets(line, 100, input_file);
fgets(line, 100, input_file);
while(1){
char * token = strtok(line, " ");
line_index = atoi(token) - 1;
token = strtok(NULL, " ");
token = strtok(NULL, " ");
m1->atoms[3*line_index] = atof(token);
token = strtok(NULL, " ");
m1->atoms[3*line_index+1] = atof(token);
token = strtok(NULL, " ");
m1->atoms[3*line_index + 2] = atof(token);
fgets(line,100,input_file);
if(strncmp(line,"@<TRIPOS>",5)==0){
break;
}
}
fgets(line, 100, input_file);
while (strcmp(line, "@<TRIPOS>SUBSTRUCTURE\n") != 0 && res != NULL && strcmp(res,"\n")!=0) {
char * token = strtok(line, " ");
line_index = atoi(token) - 1;
token = strtok(NULL, " ");
m1->bonds[2*line_index] = atoi(token);
token = strtok(NULL, " ");
m1->bonds[2*line_index+1] = atoi(token);
res = fgets(line, 100, input_file);
}
fclose(input_file);
strcpy(m1->name,molecola_name);
}
/**
* Checks if a node is present in the queue
* @param node index of a node
* @param queue struct queue
*/
__host__ __device__ bool isPresent(int node, queue* queue){
for (int i = 0; i < queue->tail; i++) {
if (queue->elements[i] == node) {
return true; }
}
return false;
}
/**
* Does a breadth first search on the graph defined by the bonds of the molecola without the one specified by the parameter bond index.
* It populates queue with the atoms found in the search.
* The queue must be already inizialized having as first element the first node of the search
* @param bond_index the bond we must eliminate from the search
* @param molecola
* @param queue the queue we populate with the adjacent elements, it must be already inizialized
*/
__host__ __device__ void bfs(int bond_index,molecola*molecola,queue* queue){
int node = queue->elements[queue->head];
int n_bonds = molecola -> n_bonds;
while(queue->head < queue->tail){
for(int i = 0; i<n_bonds;i++){
if(i!=bond_index){
if (molecola->bonds[2 * i] == node && !isPresent(molecola->bonds[2 * i + 1], queue)) {
queue->elements[queue->tail] = molecola->bonds[2 * i + 1];
queue->tail += 1;
}
else if(molecola->bonds[2 * i+1] == node && !isPresent(molecola->bonds[2 * i], queue)){
queue->elements[queue->tail] = molecola->bonds[2 * i];
queue->tail += 1;
}
}
}
queue->head +=1;
node = queue -> elements[queue->head];
}
}
/**
* It inizialize a queue and then call a bfs to populate the queue with the nodes adjacent to atom.
* @param molecola
* @param queue to be inizialized, bfs will put in this queue the adjacent nodes
* @param atom
* @param bond_index
*/
__host__ __device__ void find_adjacent_nodes(molecola* molecola, queue* queue, int atom, int bond_index) {
queue->elements[0] = atom;
queue->head = 0;
queue->tail = 1;
bfs(bond_index, molecola, queue);
}
/**
* Given a molecola and a bound, it initializes two queues, in the first queue there is the left atom of the bond, in the second one the right atom. Then it calls a bfs for each queue and if in the queues
* there are the same elements it is not a rotamer because the two fragment are connected. If in a queue there is only one atom it is not a rotamer.
* @param bond_index the index of the bond to check
* @param molecola
* @return isRotamer
*/
bool isRotamer(int bond_index, molecola* molecola) {
int first_node, second_node;
bool isRotamer;
queue q1;
queue q2;
first_node = molecola->bonds[2*bond_index];
second_node = molecola->bonds[2*bond_index+1];
q1.tail = 1;
q1.head = 0;
q1.elements[0] = first_node;
q2.tail = 1;
q2.head = 0;
q2.elements[0] = second_node;
bfs(bond_index, molecola, &q1);
bfs(bond_index, molecola, &q2);
isRotamer = true;
for (int i = 0; i < q1.tail; i++) {
for (int j = 0; j < q2.tail; j++) {
if (q1.elements[i] == q2.elements[j]){
isRotamer = false;
}
}
}
if (q1.tail == 1 || q2.tail == 1) {
isRotamer = false;
}
return isRotamer;
}
/**
* Given a molecola it check every bound if it is a rotamer, then puts the index of the rotamer in a list
* @param molecola
* @param number_of_rotamers a pointer to an int where it puts the number of rotamer it has found
* @return rotamer_list the indeces all the rotamers found
*/
int* find_rotamers(molecola* molecola, int* number_of_rotamers) {
//sempre chiamarlo con un n_rotamers = 0
int size = molecola->n_bonds;
bool* x;
int n_rotamers = 0;
int* rotamer_list;
int rotamer_index = 0;
x = (bool*)malloc(size* sizeof(int));
for (int i = 0; i < size; i++) {
if (isRotamer(i, molecola)) { x[i] = true; }
else { x[i] = false; }
}
for (int i = 0; i < size; i++) {
if (x[i]) {
n_rotamers += 1;
}
}
rotamer_list = (int*)malloc(n_rotamers * sizeof(int));
for (int i = 0; i < size; i++) {
if (x[i]) {
rotamer_list[rotamer_index] = i;
rotamer_index += 1;
}
}
free(x);
*number_of_rotamers = n_rotamers;
return rotamer_list;
}
__host__ __device__ void normalise(double* x, double* y, double* z) {
double w = sqrt(*x * *x + *y * *y + *z * *z);
*x = *x / w;
*y = *y / w;
*z = *z / w;
}
/**
* Given a rotamer it rotates an atom of a given angle around the axis defined by the rotamer
* @param molecola
* @param atom to rotate
* @param rotamer index of the axis we want to rotate around the atom
* @param angle
*/
__host__ __device__ void rotate_atom(molecola*molecola, int atom, int rotamer, int angle) {
double px, py, pz, p1x, p1y, p1z, p2x, p2y, p2z, rx, ry, rz, qx, qy, qz;
double tetha = angle*M_PI / 180;
int rotamer1_index, rotamer2_index;
double costheta, sintheta;
px = molecola->atoms[3 * (atom - 1)];
py = molecola->atoms[3 * (atom - 1) + 1];
pz = molecola->atoms[3 * (atom - 1) + 2];
rotamer1_index = molecola->bonds[2 * rotamer];
rotamer2_index = molecola->bonds[2 * rotamer + 1];
p1x = molecola->atoms[3 * (rotamer1_index - 1)];
p1y = molecola->atoms[3 * (rotamer1_index - 1) + 1];
p1z = molecola->atoms[3 * (rotamer1_index - 1) + 2];
p2x = molecola->atoms[3 * (rotamer2_index - 1)];
p2y = molecola->atoms[3 * (rotamer2_index - 1) + 1];
p2z = molecola->atoms[3 * (rotamer2_index - 1) + 2];
rx = p2x - p1x;
ry = p2y - p1y;
rz = p2z - p1z;
px = px - p1x;
py = py - p1y;
pz = pz - p1z;
normalise(&rx, &ry, &rz);
costheta = cos(tetha);
sintheta = sin(tetha);
qx = 0;
qy = 0;
qz = 0;
qx += (costheta + (1 - costheta)* rx*rx)*px;
qx += ((1 - costheta) * rx * ry - rz * sintheta) * py;
qx += ((1 - costheta) * rx * rz + ry * sintheta) * pz;
qy += ((1 - costheta) * rx * ry + rz * sintheta) * px;
qy += (costheta + (1 - costheta) * ry * ry) * py;
qy += ((1 - costheta) * ry * rz - rx * sintheta) * pz;
qz += ((1 - costheta) * rx * rz - ry * sintheta) * px;
qz += ((1 - costheta) * ry * rz + rx * sintheta) * py;
qz += (costheta + (1 - costheta) * rz * rz) * pz;
qx += p1x;
qy += p1y;
qz += p1z;
molecola->atoms[3 * (atom - 1)] = qx;
molecola->atoms[3 * (atom - 1) + 1] = qy;
molecola->atoms[3 * (atom - 1) + 2] = qz;
}
/**
* Calculates the distance between two atoms of a molecola
* @param index_1 first atom
* @param index_2 second atom
* @return distance
*/
__host__ __device__ double distance(molecola* molecola, int index_1, int index_2) {
double distance;
double x1, y1, z1, x2, y2, z2;
x1 = molecola->atoms[3 * (index_1)];
y1 = molecola->atoms[3 * (index_1) + 1];
z1 = molecola->atoms[3 * (index_1) + 2];
x2 = molecola->atoms[3 * (index_2)];
y2 = molecola->atoms[3 * (index_2) + 1];
z2 = molecola->atoms[3 * (index_2) + 2];
distance = sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2) + pow(z1 - z2, 2));
return distance;
}
/**
* Given a molecola calculates the sum of the distances of all its atoms
* @param molecola
* @return expansion sum of the distances of the atoms in molecola
*/
__host__ __device__ double measure_expansion(molecola* molecola) {
double expansion = 0;
for (int i = 0; i < molecola->n_atoms; i++) {
for (int j = 0; j < molecola->n_atoms; j++) {
if (j > i) {
expansion += distance(molecola, i, j);
}
}
}
return expansion;
}
/**
* Given a rotamer and the index of an belonging to that rotamer, first finds the atoms connected the first one and then rotates them
* @param molecola
* @param rotamer the index of the bound which we want to rotate the fragmen around
* @param atom the index of the atom, belonging to the rotamer, that indicates which part of the molecola we must rotate
* @param angle angle of rotation
*/
__host__ __device__ void rotate_molecola(molecola* molecola, int rotamer, int atom, int angle) {
queue q1;
find_adjacent_nodes(molecola, &q1, atom, rotamer);
for (int i = 0; i < q1.tail; i++) {
rotate_atom(molecola, q1.elements[i], rotamer, angle);
}
}
/**
* Calculates if a ligand is feasible, a ligand is feasible if all its atoms' distances are > 0.8 A
* @param molecola
*/
__host__ __device__ bool is_ligand_feasible(molecola* molecola) {
for (int i = 0; i < molecola->n_atoms; i++) {
for (int j = 0; j < molecola->n_atoms; j++) {
if (j > i) {
if (distance(molecola, i, j) < 0.8) {
return false;
}
}
}
}
return true;
}
/**
* Calculates the size of the fragment defined by the rotamer index
* @param molecola
* @param bond the index of the rotamer
* @param index is 1 if we evaluate left fragment, 2 right fragment
* @return size_pct the size of the fragment w.r.t. the size of molecola
*/
double fragment_size(molecola* molecola, int bond, int index) {
//index 1 if left fragment , 2 right
queue q1;
q1.tail = 1;
q1.head = 0;
double size_pct;
if (index == 1) {
q1.elements[0] = molecola->bonds[2 * bond];
}
else if (index == 2) {
q1.elements[0] = molecola->bonds[2 * bond + 1];
}
else {
printf("Fragment size: Index must be between 1 and 2");
return 0;
}
bfs(bond, molecola, &q1);
size_pct = (double)q1.tail / molecola->n_atoms;
return size_pct;
}
/**
* This kernel creates a molecola in the local memory of each thread and initializes it as the molecola passed
* as argument, then creates a array of expansions in the shared memory and initializes it to 0. After that
* each threads rotates the fragment of the angle corresponding to its thread_id and writes the corrisponding
* expansion in the vector of expansions. At the end one threads selects the best expansion and the corrisponding
* angle and rotates the original molecule of the angle find before.
* @param mol
* @param bond index of the bond around which we rotate the fragment
* @param atom index of the atom, is needed to define which part of the molecola we must rotate
* @param step step of the rotations
* @param min_range of rotations
* @param max_range of rotations
*/
__global__ void place_in_best_angle(molecola* mol, int bond, int atom, int step, int min_range, int max_range){
__shared__ double expansions[360];
int thread_id =blockIdx.x*blockDim.x + threadIdx.x;
double best_expansion=0;
double best_angle;
molecola mol2;
mol2 = *mol;
if(thread_id < 360){
expansions[thread_id] = 0;
}
if((thread_id == min_range || thread_id % step == 0) && thread_id < max_range){
rotate_molecola(&mol2, bond, atom, thread_id);
if(is_ligand_feasible(&mol2)){
expansions[thread_id] = measure_expansion(&mol2);
}
}
if(thread_id == 0){
for(int i = 0; i<360; i++){
if(expansions[i] > best_expansion){
best_expansion = expansions[i];
best_angle = i;
}
}
rotate_molecola(mol, bond, atom, best_angle);
}
}
/**
* Places molecola in the middle of each tile, then evaluate the expansion of the molecola with the fragment placed in the tile,
* at the end returns the index of the tile with the best expansion
* @param mol
* @param n_tiles number of tile to evaluate
* @param bond index of the bond around which we rotate the fragment
* @param atom index of the atom, is needed to define which part of the molecola we must rotate
* @return best_expansion_tile index of the tile with best expansion
*/
int find_best_tile(molecola* mol, int n_tiles, int bond, int atom) {
molecola mol2;
mol2 = *mol;
int tile_size;
double expansion;
double best_expansion=0;
int best_expansion_tile=0;
tile_size = floor(360 / n_tiles);
rotate_molecola(&mol2, bond, atom, tile_size / 2);
if (is_ligand_feasible(&mol2)) {
best_expansion = measure_expansion(&mol2);
best_expansion_tile = 0;
}
for (int i = 1; i < n_tiles; i++) {
rotate_molecola(&mol2, bond, atom, tile_size);
if (is_ligand_feasible(&mol2)) {
expansion = measure_expansion(&mol2);
if (expansion > best_expansion) {
best_expansion = expansion;
best_expansion_tile = i;
}
}
}
return best_expansion_tile;
}
/**
* Given a molecola, this function find the rotamers and for each rotamer rotates first the left fragment and then the left one, in a way that depends on the fragment size
* @param molecola a struct molecola
*/
double match_probe_shape(molecola* molecola) {
int* rotamer_list;
int rotamer_index = 0;
int n_rotamers = 0;
int best_tile;
int n_tiles = 18;
int tile_size = 360/n_tiles;
//non sono sicuro sia giusto
rotamer_list = find_rotamers(molecola, &n_rotamers);
for (int j = 0; j < repetitions; j++) {
for (int i = 0; i < n_rotamers; i++) {
rotamer_index = rotamer_list[i];
if (fragment_size(molecola, rotamer_index, 1) < threshold) {
hipLaunchKernelGGL(( place_in_best_angle), dim3(1),dim3(360), 0, 0, molecola, rotamer_index, molecola->bonds[2 * rotamer_index], low_precision_step, 0, 360);
checkCuda(hipDeviceSynchronize());
}
else {
if (enable_refiniment) {
best_tile = find_best_tile(molecola, n_tiles, rotamer_index, molecola->bonds[2 * rotamer_index]);
hipLaunchKernelGGL(( place_in_best_angle), dim3(1),dim3(360), 0, 0, molecola, rotamer_index, molecola->bonds[2 * rotamer_index], high_precision_step, best_tile*tile_size, (best_tile + 1)*tile_size);
checkCuda(hipDeviceSynchronize());
}
else {
hipLaunchKernelGGL(( place_in_best_angle), dim3(1),dim3(360), 0, 0, molecola, rotamer_index, molecola->bonds[2 * rotamer_index], high_precision_step, 0, 360);
checkCuda(hipDeviceSynchronize());
}
}
if (fragment_size(molecola, rotamer_index, 2) < threshold) {
hipLaunchKernelGGL(( place_in_best_angle), dim3(1),dim3(360), 0, 0, molecola, rotamer_index, molecola->bonds[2 * rotamer_index + 1], low_precision_step, 0, 360);
checkCuda(hipDeviceSynchronize());
}
else {
if (enable_refiniment) {
best_tile = find_best_tile(molecola, n_tiles, rotamer_index, molecola->bonds[2 * rotamer_index + 1]);
hipLaunchKernelGGL(( place_in_best_angle), dim3(1),dim3(360), 0, 0, molecola, rotamer_index, molecola->bonds[2 * rotamer_index + 1], high_precision_step, best_tile*tile_size, (best_tile + 1)*tile_size);
checkCuda(hipDeviceSynchronize());
}
else {
hipLaunchKernelGGL(( place_in_best_angle), dim3(1),dim3(360), 0, 0, molecola, rotamer_index, molecola->bonds[2 * rotamer_index + 1], high_precision_step, 0, 360);
checkCuda(hipDeviceSynchronize());
}
}
}
}
free(rotamer_list);
return measure_expansion(molecola);
}
int main() {
clock_t begin = clock();
int n_molecole = 48;
double espansion;
int deviceId;
hipGetDevice(&deviceId);
molecola* m1;
//molecola list_of_molecole[1];
char* molecole_list[] = {"Aspirin.mol2","Diclofenac.mol2","Diplosalsalate.mol2","Flurbiprofen.mol2","Focalin.mol2","Losmiprofen.mol2","Melatonin.mol2","Myfortic.mol2","Nifuradene.mol2","Oxybenzone.mol2","Propiomazine.mol2","Raloxifene.mol2","Relacatib.mol2", "Ribasphere.mol2","Roxoperone.mol2","Sulindac.mol2",
"1b9v_deposited_1.mol2", "1br6_deposited_1.mol2","1bxq_ligand.mol2", "1c1b_deposited_1.mol2","1ctr_deposited_1.mol2","1cvu_deposited_1.mol2","1cx2_deposited_1.mol2",
"1ezq_deposited_1.mol2", "1fcx_deposited_1.mol2", "1fl3_deposited_1.mol2", "1fm6_deposited_1.mol2","1fm9_deposited_1.mol2","1fmz_ligand.mol2","1fq5_deposited_1.mol2",
"1gvx_ligand.mol2", "1gwx_deposited_1.mol2","1h23_ligand.mol2", "1hp0_deposited_1.mol2","1hvy_deposited_1.mol2", "1iiq_ligand.mol2","1lpz_deposited_1.mol2",
"1mq6_deposited_1.mol2","1oyt_deposited_1.mol2", "1pso_deposited_1.mol2","1s19_deposited_1.mol2","1uml_deposited_1.mol2","1ydt_deposited_1.mol2","2hnx_ligand.mol2",
"3l3n_ligand.mol2", "3nhi_ligand.mol2","4djp_ligand.mol2","4gid_ligand.mol2"};
for (int i = 0; i < n_molecole; i++) {
checkCuda(hipMallocManaged(&m1, sizeof(molecola)));
create_molecola(molecole_list[i],m1);
espansion = measure_expansion(m1);
printf("Before expansion Molecola: %s , espansion: %f\n", m1->name,espansion);
espansion = match_probe_shape(m1);
printf("Molecola: %s, expansion: %f\n", m1->name, espansion);
checkCuda(hipFree(m1));
}
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("\nTime spent: %f\n", time_spent);
return 0;
} | 2d1ff41c5f5d523ef59a52a8bab80ad12f0657df.cu | /**
* File: kernel_p_rotate_molecola_2.cu
*
* Author: Lorenzo Casalini
* Date: Summer 2020
* Summary of File:
* This file contains a parallel implementation of function Matc Probe Shape. The parallel kernel
* is the function place_in_best_angle: each thread rotates the fragment in the angle corrisponding to his
* thread id and then evaluates the expansion. The difference with file kernel_p_place_best_angle.cu is that
* now, instead of using a vector of expansions in the unified global memory, we initialize it in the shared
* memory, with the porpouse of reducing the time of access and the time for managing unified memory.
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#define _USE_MATH_DEFINES
#include <math.h>
#include <stdbool.h>
#include <assert.h>
typedef struct
{
char name[40];
int n_atoms;
int n_bonds;
double atoms[500];
int bonds[300];
}molecola;
typedef struct
{
int head;
int tail;
int elements[500];
}queue;
#define repetitions 10
#define enable_refiniment false
#define high_precision_step 1
#define low_precision_step 30
#define threshold 0.2
inline cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
/**
* Populates a struct molecola in memory given the name of the text file
* @param molecola_name
* @param m1 empty struct molecola to be inizialized
*/
void create_molecola(char* molecola_name,molecola* m1) {
FILE *input_file;
int line_index = 0;
char* res;
char line[500];
int number_of_atoms;
int number_of_bounds;
char path[50];
strcpy(path,"molecules/");
strcat(path, molecola_name);
input_file = fopen(path, "r");
if (input_file == NULL) {
printf("fopen non funziona\n");
return;
}
res = fgets(line, 100, input_file);
fgets(line, 100, input_file);
fgets(line, 100, input_file);
char* numero = strtok(line, " ");
number_of_atoms = atoi(numero);
numero = strtok(NULL, " ");
number_of_bounds = atoi(numero);
m1->n_atoms = number_of_atoms;
m1->n_bonds = number_of_bounds;
fgets(line, 100, input_file);
fgets(line, 100, input_file);
fgets(line, 100, input_file);
fgets(line, 100, input_file);
while(1){
char * token = strtok(line, " ");
line_index = atoi(token) - 1;
token = strtok(NULL, " ");
token = strtok(NULL, " ");
m1->atoms[3*line_index] = atof(token);
token = strtok(NULL, " ");
m1->atoms[3*line_index+1] = atof(token);
token = strtok(NULL, " ");
m1->atoms[3*line_index + 2] = atof(token);
fgets(line,100,input_file);
if(strncmp(line,"@<TRIPOS>",5)==0){
break;
}
}
fgets(line, 100, input_file);
while (strcmp(line, "@<TRIPOS>SUBSTRUCTURE\n") != 0 && res != NULL && strcmp(res,"\n")!=0) {
char * token = strtok(line, " ");
line_index = atoi(token) - 1;
token = strtok(NULL, " ");
m1->bonds[2*line_index] = atoi(token);
token = strtok(NULL, " ");
m1->bonds[2*line_index+1] = atoi(token);
res = fgets(line, 100, input_file);
}
fclose(input_file);
strcpy(m1->name,molecola_name);
}
/**
* Checks if a node is present in the queue
* @param node index of a node
* @param queue struct queue
*/
__host__ __device__ bool isPresent(int node, queue* queue){
for (int i = 0; i < queue->tail; i++) {
if (queue->elements[i] == node) {
return true; }
}
return false;
}
/**
* Does a breadth first search on the graph defined by the bonds of the molecola without the one specified by the parameter bond index.
* It populates queue with the atoms found in the search.
* The queue must be already inizialized having as first element the first node of the search
* @param bond_index the bond we must eliminate from the search
* @param molecola
* @param queue the queue we populate with the adjacent elements, it must be already inizialized
*/
__host__ __device__ void bfs(int bond_index,molecola*molecola,queue* queue){
int node = queue->elements[queue->head];
int n_bonds = molecola -> n_bonds;
while(queue->head < queue->tail){
for(int i = 0; i<n_bonds;i++){
if(i!=bond_index){
if (molecola->bonds[2 * i] == node && !isPresent(molecola->bonds[2 * i + 1], queue)) {
queue->elements[queue->tail] = molecola->bonds[2 * i + 1];
queue->tail += 1;
}
else if(molecola->bonds[2 * i+1] == node && !isPresent(molecola->bonds[2 * i], queue)){
queue->elements[queue->tail] = molecola->bonds[2 * i];
queue->tail += 1;
}
}
}
queue->head +=1;
node = queue -> elements[queue->head];
}
}
/**
* It inizialize a queue and then call a bfs to populate the queue with the nodes adjacent to atom.
* @param molecola
* @param queue to be inizialized, bfs will put in this queue the adjacent nodes
* @param atom
* @param bond_index
*/
__host__ __device__ void find_adjacent_nodes(molecola* molecola, queue* queue, int atom, int bond_index) {
queue->elements[0] = atom;
queue->head = 0;
queue->tail = 1;
bfs(bond_index, molecola, queue);
}
/**
* Given a molecola and a bound, it initializes two queues, in the first queue there is the left atom of the bond, in the second one the right atom. Then it calls a bfs for each queue and if in the queues
* there are the same elements it is not a rotamer because the two fragment are connected. If in a queue there is only one atom it is not a rotamer.
* @param bond_index the index of the bond to check
* @param molecola
* @return isRotamer
*/
bool isRotamer(int bond_index, molecola* molecola) {
int first_node, second_node;
bool isRotamer;
queue q1;
queue q2;
first_node = molecola->bonds[2*bond_index];
second_node = molecola->bonds[2*bond_index+1];
q1.tail = 1;
q1.head = 0;
q1.elements[0] = first_node;
q2.tail = 1;
q2.head = 0;
q2.elements[0] = second_node;
bfs(bond_index, molecola, &q1);
bfs(bond_index, molecola, &q2);
isRotamer = true;
for (int i = 0; i < q1.tail; i++) {
for (int j = 0; j < q2.tail; j++) {
if (q1.elements[i] == q2.elements[j]){
isRotamer = false;
}
}
}
if (q1.tail == 1 || q2.tail == 1) {
isRotamer = false;
}
return isRotamer;
}
/**
* Given a molecola it check every bound if it is a rotamer, then puts the index of the rotamer in a list
* @param molecola
* @param number_of_rotamers a pointer to an int where it puts the number of rotamer it has found
* @return rotamer_list the indeces all the rotamers found
*/
int* find_rotamers(molecola* molecola, int* number_of_rotamers) {
//sempre chiamarlo con un n_rotamers = 0
int size = molecola->n_bonds;
bool* x;
int n_rotamers = 0;
int* rotamer_list;
int rotamer_index = 0;
x = (bool*)malloc(size* sizeof(int));
for (int i = 0; i < size; i++) {
if (isRotamer(i, molecola)) { x[i] = true; }
else { x[i] = false; }
}
for (int i = 0; i < size; i++) {
if (x[i]) {
n_rotamers += 1;
}
}
rotamer_list = (int*)malloc(n_rotamers * sizeof(int));
for (int i = 0; i < size; i++) {
if (x[i]) {
rotamer_list[rotamer_index] = i;
rotamer_index += 1;
}
}
free(x);
*number_of_rotamers = n_rotamers;
return rotamer_list;
}
__host__ __device__ void normalise(double* x, double* y, double* z) {
double w = sqrt(*x * *x + *y * *y + *z * *z);
*x = *x / w;
*y = *y / w;
*z = *z / w;
}
/**
* Given a rotamer it rotates an atom of a given angle around the axis defined by the rotamer
* @param molecola
* @param atom to rotate
* @param rotamer index of the axis we want to rotate around the atom
* @param angle
*/
__host__ __device__ void rotate_atom(molecola*molecola, int atom, int rotamer, int angle) {
double px, py, pz, p1x, p1y, p1z, p2x, p2y, p2z, rx, ry, rz, qx, qy, qz;
double tetha = angle*M_PI / 180;
int rotamer1_index, rotamer2_index;
double costheta, sintheta;
px = molecola->atoms[3 * (atom - 1)];
py = molecola->atoms[3 * (atom - 1) + 1];
pz = molecola->atoms[3 * (atom - 1) + 2];
rotamer1_index = molecola->bonds[2 * rotamer];
rotamer2_index = molecola->bonds[2 * rotamer + 1];
p1x = molecola->atoms[3 * (rotamer1_index - 1)];
p1y = molecola->atoms[3 * (rotamer1_index - 1) + 1];
p1z = molecola->atoms[3 * (rotamer1_index - 1) + 2];
p2x = molecola->atoms[3 * (rotamer2_index - 1)];
p2y = molecola->atoms[3 * (rotamer2_index - 1) + 1];
p2z = molecola->atoms[3 * (rotamer2_index - 1) + 2];
rx = p2x - p1x;
ry = p2y - p1y;
rz = p2z - p1z;
px = px - p1x;
py = py - p1y;
pz = pz - p1z;
normalise(&rx, &ry, &rz);
costheta = cos(tetha);
sintheta = sin(tetha);
qx = 0;
qy = 0;
qz = 0;
qx += (costheta + (1 - costheta)* rx*rx)*px;
qx += ((1 - costheta) * rx * ry - rz * sintheta) * py;
qx += ((1 - costheta) * rx * rz + ry * sintheta) * pz;
qy += ((1 - costheta) * rx * ry + rz * sintheta) * px;
qy += (costheta + (1 - costheta) * ry * ry) * py;
qy += ((1 - costheta) * ry * rz - rx * sintheta) * pz;
qz += ((1 - costheta) * rx * rz - ry * sintheta) * px;
qz += ((1 - costheta) * ry * rz + rx * sintheta) * py;
qz += (costheta + (1 - costheta) * rz * rz) * pz;
qx += p1x;
qy += p1y;
qz += p1z;
molecola->atoms[3 * (atom - 1)] = qx;
molecola->atoms[3 * (atom - 1) + 1] = qy;
molecola->atoms[3 * (atom - 1) + 2] = qz;
}
/**
* Calculates the distance between two atoms of a molecola
* @param index_1 first atom
* @param index_2 second atom
* @return distance
*/
__host__ __device__ double distance(molecola* molecola, int index_1, int index_2) {
double distance;
double x1, y1, z1, x2, y2, z2;
x1 = molecola->atoms[3 * (index_1)];
y1 = molecola->atoms[3 * (index_1) + 1];
z1 = molecola->atoms[3 * (index_1) + 2];
x2 = molecola->atoms[3 * (index_2)];
y2 = molecola->atoms[3 * (index_2) + 1];
z2 = molecola->atoms[3 * (index_2) + 2];
distance = sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2) + pow(z1 - z2, 2));
return distance;
}
/**
* Given a molecola calculates the sum of the distances of all its atoms
* @param molecola
* @return expansion sum of the distances of the atoms in molecola
*/
__host__ __device__ double measure_expansion(molecola* molecola) {
double expansion = 0;
for (int i = 0; i < molecola->n_atoms; i++) {
for (int j = 0; j < molecola->n_atoms; j++) {
if (j > i) {
expansion += distance(molecola, i, j);
}
}
}
return expansion;
}
/**
* Given a rotamer and the index of an belonging to that rotamer, first finds the atoms connected the first one and then rotates them
* @param molecola
* @param rotamer the index of the bound which we want to rotate the fragmen around
* @param atom the index of the atom, belonging to the rotamer, that indicates which part of the molecola we must rotate
* @param angle angle of rotation
*/
__host__ __device__ void rotate_molecola(molecola* molecola, int rotamer, int atom, int angle) {
queue q1;
find_adjacent_nodes(molecola, &q1, atom, rotamer);
for (int i = 0; i < q1.tail; i++) {
rotate_atom(molecola, q1.elements[i], rotamer, angle);
}
}
/**
* Calculates if a ligand is feasible, a ligand is feasible if all its atoms' distances are > 0.8 A
* @param molecola
*/
__host__ __device__ bool is_ligand_feasible(molecola* molecola) {
for (int i = 0; i < molecola->n_atoms; i++) {
for (int j = 0; j < molecola->n_atoms; j++) {
if (j > i) {
if (distance(molecola, i, j) < 0.8) {
return false;
}
}
}
}
return true;
}
/**
* Calculates the size of the fragment defined by the rotamer index
* @param molecola
* @param bond the index of the rotamer
* @param index is 1 if we evaluate left fragment, 2 right fragment
* @return size_pct the size of the fragment w.r.t. the size of molecola
*/
double fragment_size(molecola* molecola, int bond, int index) {
//index 1 if left fragment , 2 right
queue q1;
q1.tail = 1;
q1.head = 0;
double size_pct;
if (index == 1) {
q1.elements[0] = molecola->bonds[2 * bond];
}
else if (index == 2) {
q1.elements[0] = molecola->bonds[2 * bond + 1];
}
else {
printf("Fragment size: Index must be between 1 and 2");
return 0;
}
bfs(bond, molecola, &q1);
size_pct = (double)q1.tail / molecola->n_atoms;
return size_pct;
}
/**
* This kernel creates a molecola in the local memory of each thread and initializes it as the molecola passed
* as argument, then creates a array of expansions in the shared memory and initializes it to 0. After that
* each threads rotates the fragment of the angle corresponding to its thread_id and writes the corrisponding
* expansion in the vector of expansions. At the end one threads selects the best expansion and the corrisponding
* angle and rotates the original molecule of the angle find before.
* @param mol
* @param bond index of the bond around which we rotate the fragment
* @param atom index of the atom, is needed to define which part of the molecola we must rotate
* @param step step of the rotations
* @param min_range of rotations
* @param max_range of rotations
*/
__global__ void place_in_best_angle(molecola* mol, int bond, int atom, int step, int min_range, int max_range){
__shared__ double expansions[360];
int thread_id =blockIdx.x*blockDim.x + threadIdx.x;
double best_expansion=0;
double best_angle;
molecola mol2;
mol2 = *mol;
if(thread_id < 360){
expansions[thread_id] = 0;
}
if((thread_id == min_range || thread_id % step == 0) && thread_id < max_range){
rotate_molecola(&mol2, bond, atom, thread_id);
if(is_ligand_feasible(&mol2)){
expansions[thread_id] = measure_expansion(&mol2);
}
}
if(thread_id == 0){
for(int i = 0; i<360; i++){
if(expansions[i] > best_expansion){
best_expansion = expansions[i];
best_angle = i;
}
}
rotate_molecola(mol, bond, atom, best_angle);
}
}
/**
* Places molecola in the middle of each tile, then evaluate the expansion of the molecola with the fragment placed in the tile,
* at the end returns the index of the tile with the best expansion
* @param mol
* @param n_tiles number of tile to evaluate
* @param bond index of the bond around which we rotate the fragment
* @param atom index of the atom, is needed to define which part of the molecola we must rotate
* @return best_expansion_tile index of the tile with best expansion
*/
int find_best_tile(molecola* mol, int n_tiles, int bond, int atom) {
molecola mol2;
mol2 = *mol;
int tile_size;
double expansion;
double best_expansion=0;
int best_expansion_tile=0;
tile_size = floor(360 / n_tiles);
rotate_molecola(&mol2, bond, atom, tile_size / 2);
if (is_ligand_feasible(&mol2)) {
best_expansion = measure_expansion(&mol2);
best_expansion_tile = 0;
}
for (int i = 1; i < n_tiles; i++) {
rotate_molecola(&mol2, bond, atom, tile_size);
if (is_ligand_feasible(&mol2)) {
expansion = measure_expansion(&mol2);
if (expansion > best_expansion) {
best_expansion = expansion;
best_expansion_tile = i;
}
}
}
return best_expansion_tile;
}
/**
* Given a molecola, this function find the rotamers and for each rotamer rotates first the left fragment and then the left one, in a way that depends on the fragment size
* @param molecola a struct molecola
*/
double match_probe_shape(molecola* molecola) {
int* rotamer_list;
int rotamer_index = 0;
int n_rotamers = 0;
int best_tile;
int n_tiles = 18;
int tile_size = 360/n_tiles;
//non sono sicuro sia giusto
rotamer_list = find_rotamers(molecola, &n_rotamers);
for (int j = 0; j < repetitions; j++) {
for (int i = 0; i < n_rotamers; i++) {
rotamer_index = rotamer_list[i];
if (fragment_size(molecola, rotamer_index, 1) < threshold) {
place_in_best_angle<<<1,360>>>(molecola, rotamer_index, molecola->bonds[2 * rotamer_index], low_precision_step, 0, 360);
checkCuda(cudaDeviceSynchronize());
}
else {
if (enable_refiniment) {
best_tile = find_best_tile(molecola, n_tiles, rotamer_index, molecola->bonds[2 * rotamer_index]);
place_in_best_angle<<<1,360>>>(molecola, rotamer_index, molecola->bonds[2 * rotamer_index], high_precision_step, best_tile*tile_size, (best_tile + 1)*tile_size);
checkCuda(cudaDeviceSynchronize());
}
else {
place_in_best_angle<<<1,360>>>(molecola, rotamer_index, molecola->bonds[2 * rotamer_index], high_precision_step, 0, 360);
checkCuda(cudaDeviceSynchronize());
}
}
if (fragment_size(molecola, rotamer_index, 2) < threshold) {
place_in_best_angle<<<1,360>>>(molecola, rotamer_index, molecola->bonds[2 * rotamer_index + 1], low_precision_step, 0, 360);
checkCuda(cudaDeviceSynchronize());
}
else {
if (enable_refiniment) {
best_tile = find_best_tile(molecola, n_tiles, rotamer_index, molecola->bonds[2 * rotamer_index + 1]);
place_in_best_angle<<<1,360>>>(molecola, rotamer_index, molecola->bonds[2 * rotamer_index + 1], high_precision_step, best_tile*tile_size, (best_tile + 1)*tile_size);
checkCuda(cudaDeviceSynchronize());
}
else {
place_in_best_angle<<<1,360>>>(molecola, rotamer_index, molecola->bonds[2 * rotamer_index + 1], high_precision_step, 0, 360);
checkCuda(cudaDeviceSynchronize());
}
}
}
}
free(rotamer_list);
return measure_expansion(molecola);
}
int main() {
clock_t begin = clock();
int n_molecole = 48;
double espansion;
int deviceId;
cudaGetDevice(&deviceId);
molecola* m1;
//molecola list_of_molecole[1];
char* molecole_list[] = {"Aspirin.mol2","Diclofenac.mol2","Diplosalsalate.mol2","Flurbiprofen.mol2","Focalin.mol2","Losmiprofen.mol2","Melatonin.mol2","Myfortic.mol2","Nifuradene.mol2","Oxybenzone.mol2","Propiomazine.mol2","Raloxifene.mol2","Relacatib.mol2", "Ribasphere.mol2","Roxoperone.mol2","Sulindac.mol2",
"1b9v_deposited_1.mol2", "1br6_deposited_1.mol2","1bxq_ligand.mol2", "1c1b_deposited_1.mol2","1ctr_deposited_1.mol2","1cvu_deposited_1.mol2","1cx2_deposited_1.mol2",
"1ezq_deposited_1.mol2", "1fcx_deposited_1.mol2", "1fl3_deposited_1.mol2", "1fm6_deposited_1.mol2","1fm9_deposited_1.mol2","1fmz_ligand.mol2","1fq5_deposited_1.mol2",
"1gvx_ligand.mol2", "1gwx_deposited_1.mol2","1h23_ligand.mol2", "1hp0_deposited_1.mol2","1hvy_deposited_1.mol2", "1iiq_ligand.mol2","1lpz_deposited_1.mol2",
"1mq6_deposited_1.mol2","1oyt_deposited_1.mol2", "1pso_deposited_1.mol2","1s19_deposited_1.mol2","1uml_deposited_1.mol2","1ydt_deposited_1.mol2","2hnx_ligand.mol2",
"3l3n_ligand.mol2", "3nhi_ligand.mol2","4djp_ligand.mol2","4gid_ligand.mol2"};
for (int i = 0; i < n_molecole; i++) {
checkCuda(cudaMallocManaged(&m1, sizeof(molecola)));
create_molecola(molecole_list[i],m1);
espansion = measure_expansion(m1);
printf("Before expansion Molecola: %s , espansion: %f\n", m1->name,espansion);
espansion = match_probe_shape(m1);
printf("Molecola: %s, expansion: %f\n", m1->name, espansion);
checkCuda(cudaFree(m1));
}
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("\nTime spent: %f\n", time_spent);
return 0;
} |
6b6e264dfd5d4c59193ac9c20bfbf1c08ec6740d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __CUDNN__
#include "RMSPropOptimizer.hpp"
// template class RMSPropOptimizer<int>;
template class RMSPropOptimizer<float>;
// template class RMSPropOptimizer<double>;
//////////////////////////////////////////////////////////////////////////////// for private method
/*!
@brief
@details UpdateParameterOnGPU
@details 1 block thread
@param pDevWeight GPU data.
@param pDevAccGradient gradient.
@param weightDim dimension.
@param signed_learning_rate Optimizer
@param decay MeanSqaured gradiet
@param epsilon 0
@param weightDecayRate
@param pMeanSquared pMeanSquared
@see int RMSPropOptimizer<DTYPE>::UpdateParameterOnGPU(Operator<DTYPE> *pParameter, Tensor<DTYPE> *pMeanSquared)
*/
__global__ void RMSPropUpdate_kernel(float *pDevWeight, float *pDevAccGradient, int weightDim, float signed_learning_rate, float decay, float epsilon, float weightDecayRate, float *pMeanSquared) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < weightDim; idx += blockDim.x * gridDim.x) {
float g = pDevAccGradient[idx];
pMeanSquared[idx] = (decay * pMeanSquared[idx]) + ((1.F - decay) * (g * g)); //meansquared
pDevWeight[idx] += signed_learning_rate * weightDecayRate * pDevWeight[idx];
pDevWeight[idx] += signed_learning_rate / sqrt(pMeanSquared[idx] + epsilon) * g;
pDevAccGradient[idx] = 0.F;
}
}
/*!
@brief
@details UpdateParameterOnGPU
@details 1 block thread
@param pDevWeight GPU data.
@param pDevAccGradient gradient.
@param weightDim dimension.
@param signed_learning_rate Optimizer
@param decay MeanSqaured, pMeanGrad gradiet
@param epsilon 0
@param weightDecayRate
@param pMeanSquared pMeanSquared
@param pMeanGrad pMeanGrad
@see int RMSPropOptimizer<DTYPE>::UpdateParameterOnGPU(Operator<DTYPE> *pParameter, Tensor<DTYPE> *pMeanSquared)
*/
__global__ void RMSPropUpdate_kernelForCentered(float *pDevWeight, float *pDevAccGradient, int weightDim, float signed_learning_rate, float decay, float epsilon, float weightDecayRate, float *pMeanSquared, float *pMeanGrad) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < weightDim; idx += blockDim.x * gridDim.x) {
float g = pDevAccGradient[idx];
pMeanGrad[idx] = (decay * pMeanGrad[idx]) + ((1.f - decay) * g); //meangrad
pMeanSquared[idx] = (decay * pMeanSquared[idx]) + ((1.F - decay) * (g * g)); //meansquared
pDevWeight[idx] += signed_learning_rate * weightDecayRate * pDevWeight[idx];
pDevWeight[idx] += signed_learning_rate / sqrt((pMeanSquared[idx] - (pMeanGrad[idx] * pMeanGrad[idx])) + epsilon) * g;
pDevAccGradient[idx] = 0.F;
}
}
/*!
@brief RMSPropOptimizer UpdateParameterOnGPU centered false .
@details GPU , .
@details noBlock GPU block
@details threadsPerBlock block thread
@details m_parameterDim dimension
@details m_pDevData, m_pDevGrad, m_pDevGradientSquared GPU GPU data. CPU data GetGPUData() GPU data
@see template<typename DTYPE> DTYPE *LongArray<DTYPE>::GetGPUData(unsigned int pTime)
@details RMSPropUpdate_kernel . , , thread GPU .
@see __global__ void RMSPropUpdate_kernel(float *pDevWeight, float *pDevAccGradient, int weightDim, float signed_learning_rate, float decay, float epsilon, float weightDecayRate, float *pMeanSquared)
@param *pParameter Tensor Operator
@param pMeanSquared pMeanSquared
@return TRUE
*/
template<typename DTYPE> int RMSPropOptimizer<DTYPE>::UpdateParameterOnGPU(Operator<DTYPE> *pParameter, Tensor<DTYPE> *pMeanSquared) {
int noBlock = 3, threadsPerBlock = 128;
int m_parameterDim = pParameter->GetResult()->GetCapacity();
GetKernelParameters(m_parameterDim, &noBlock, &threadsPerBlock);
float signed_learning_rate = this->GetOptimizeDirection() * this->GetLearningRate();
float weightDecayRate = this->GetWeightDecayRate();
Tensor<DTYPE> *trainable_data = pParameter->GetResult();
Tensor<DTYPE> *gradient = pParameter->GetGradient();
DTYPE *m_pDevData = trainable_data->GetGPUData();
DTYPE *m_pDevGrad = gradient->GetGPUData();
DTYPE *m_pDevMeanSquared = pMeanSquared->GetGPUData();
RMSPropUpdate_kernel << < noBlock, threadsPerBlock >> > (m_pDevData, m_pDevGrad, m_parameterDim, signed_learning_rate, m_decay, m_epsilon, weightDecayRate, m_pDevMeanSquared);
return TRUE;
}
/*!
@brief RMSPropOptimizer UpdateParameterOnGPU centered true .
@details GPU , .
@details noBlock GPU block
@details threadsPerBlock block thread
@details m_parameterDim dimension
@details m_pDevData, m_pDevGrad, m_pDevGradientSquared GPU GPU data. CPU data GetGPUData() GPU data
@see template<typename DTYPE> DTYPE *LongArray<DTYPE>::GetGPUData(unsigned int pTime)
@details RMSPropUpdate_kernel . , , thread GPU .
@see __global__ void RMSPropUpdate_kernelForCentered(float *pDevWeight, float *pDevAccGradient, int weightDim, float signed_learning_rate, float decay, float epsilon, float weightDecayRate, float *pMeanSquared, float *pMeanGrad)
@param pMeanSquared pMeanSquared
@param pMeanGrad pMeanGrad
@return TRUE
*/
template<typename DTYPE> int RMSPropOptimizer<DTYPE>::UpdateParameterOnGPU(Operator<DTYPE> *pParameter, Tensor<DTYPE> *pMeanSquared, Tensor<DTYPE> *pMeanGrad) {
int noBlock = 3, threadsPerBlock = 128;
int m_parameterDim = pParameter->GetResult()->GetCapacity();
GetKernelParameters(m_parameterDim, &noBlock, &threadsPerBlock);
float signed_learning_rate = this->GetOptimizeDirection() * this->GetLearningRate();
float weightDecayRate = this->GetWeightDecayRate();
Tensor<DTYPE> *trainable_data = pParameter->GetResult();
Tensor<DTYPE> *gradient = pParameter->GetGradient();
DTYPE *m_pDevData = trainable_data->GetGPUData();
DTYPE *m_pDevGrad = gradient->GetGPUData();
DTYPE *m_pDevMeanSquared = pMeanSquared->GetGPUData();
DTYPE *m_pDevMeanGrad = pMeanGrad->GetGPUData();
RMSPropUpdate_kernelForCentered << < noBlock, threadsPerBlock >> > (m_pDevData, m_pDevGrad, m_parameterDim, signed_learning_rate, m_decay, m_epsilon, weightDecayRate, m_pDevMeanSquared, m_pDevMeanGrad);
return TRUE;
}
#endif // ifdef __CUDNN__
| 6b6e264dfd5d4c59193ac9c20bfbf1c08ec6740d.cu | #ifdef __CUDNN__
#include "RMSPropOptimizer.hpp"
// template class RMSPropOptimizer<int>;
template class RMSPropOptimizer<float>;
// template class RMSPropOptimizer<double>;
//////////////////////////////////////////////////////////////////////////////// for private method
/*!
@brief 파라미터 값들을 업데이트 하는 커널함수
@details UpdateParameterOnGPU 생성자에서 호출되어 실행
@details 1차원으로 배열 된 block과 thread에 접근하여 연산
@param pDevWeight 업데이트 할 파라미터의 GPU data.
@param pDevAccGradient 업데이트 할 파라미터의 gradient.
@param weightDim 업데이트 할 파라미터의 dimension.
@param signed_learning_rate Optimizer의 학습률
@param decay MeanSqaured와 gradiet 제곱 값의 가중치
@param epsilon 분모가 0이 되는 것을 방지
@param weightDecayRate 가중치 매개변수가 클 때 패널티를 부과하는 값
@param pMeanSquared 업데이트 할 pMeanSquared
@see int RMSPropOptimizer<DTYPE>::UpdateParameterOnGPU(Operator<DTYPE> *pParameter, Tensor<DTYPE> *pMeanSquared)
*/
__global__ void RMSPropUpdate_kernel(float *pDevWeight, float *pDevAccGradient, int weightDim, float signed_learning_rate, float decay, float epsilon, float weightDecayRate, float *pMeanSquared) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < weightDim; idx += blockDim.x * gridDim.x) {
float g = pDevAccGradient[idx];
pMeanSquared[idx] = (decay * pMeanSquared[idx]) + ((1.F - decay) * (g * g)); //meansquared
pDevWeight[idx] += signed_learning_rate * weightDecayRate * pDevWeight[idx];
pDevWeight[idx] += signed_learning_rate / sqrt(pMeanSquared[idx] + epsilon) * g;
pDevAccGradient[idx] = 0.F;
}
}
/*!
@brief 파라미터 값들을 업데이트 하는 커널함수
@details UpdateParameterOnGPU 생성자에서 호출되어 실행
@details 1차원으로 배열 된 block과 thread에 접근하여 연산
@param pDevWeight 업데이트 할 파라미터의 GPU data.
@param pDevAccGradient 업데이트 할 파라미터의 gradient.
@param weightDim 업데이트 할 파라미터의 dimension.
@param signed_learning_rate Optimizer의 학습률
@param decay MeanSqaured, pMeanGrad와 gradiet 제곱 값의 가중치
@param epsilon 분모가 0이 되는 것을 방지
@param weightDecayRate 가중치 매개변수가 클 때 패널티를 부과하는 값
@param pMeanSquared 업데이트 할 pMeanSquared
@param pMeanGrad 업데이트 할 pMeanGrad
@see int RMSPropOptimizer<DTYPE>::UpdateParameterOnGPU(Operator<DTYPE> *pParameter, Tensor<DTYPE> *pMeanSquared)
*/
__global__ void RMSPropUpdate_kernelForCentered(float *pDevWeight, float *pDevAccGradient, int weightDim, float signed_learning_rate, float decay, float epsilon, float weightDecayRate, float *pMeanSquared, float *pMeanGrad) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < weightDim; idx += blockDim.x * gridDim.x) {
float g = pDevAccGradient[idx];
pMeanGrad[idx] = (decay * pMeanGrad[idx]) + ((1.f - decay) * g); //meangrad
pMeanSquared[idx] = (decay * pMeanSquared[idx]) + ((1.F - decay) * (g * g)); //meansquared
pDevWeight[idx] += signed_learning_rate * weightDecayRate * pDevWeight[idx];
pDevWeight[idx] += signed_learning_rate / sqrt((pMeanSquared[idx] - (pMeanGrad[idx] * pMeanGrad[idx])) + epsilon) * g;
pDevAccGradient[idx] = 0.F;
}
}
/*!
@brief RMSPropOptimizer UpdateParameterOnGPU centered false 생성자.
@details GPU변수를 생성하고, 커널 함수를 실행한다.
@details noBlock는 GPU 연산시 사용되는 block의 수
@details threadsPerBlock는 한 block당 생성되는 thread 갯수
@details m_parameterDim는 업데이트 할 파라미터의 dimension
@details m_pDevData, m_pDevGrad, m_pDevGradientSquared는 GPU함수 연산에 수행되는 GPU data. 각 CPU data를 GetGPUData() 호출로 GPU data 생성
@see template<typename DTYPE> DTYPE *LongArray<DTYPE>::GetGPUData(unsigned int pTime)
@details RMSPropUpdate_kernel 커널 함수를 호출. 커널함수이름, 블록 수, 블록당 thread 수와 GPU데이터를 다음과 같은 형식으로 호출.
@see __global__ void RMSPropUpdate_kernel(float *pDevWeight, float *pDevAccGradient, int weightDim, float signed_learning_rate, float decay, float epsilon, float weightDecayRate, float *pMeanSquared)
@param *pParameter 업데이트 할 Tensor를 가지고 있는 Operator포인터
@param pMeanSquared 업데이트 할 pMeanSquared 변수
@return 성공 시 TRUE
*/
template<typename DTYPE> int RMSPropOptimizer<DTYPE>::UpdateParameterOnGPU(Operator<DTYPE> *pParameter, Tensor<DTYPE> *pMeanSquared) {
int noBlock = 3, threadsPerBlock = 128;
int m_parameterDim = pParameter->GetResult()->GetCapacity();
GetKernelParameters(m_parameterDim, &noBlock, &threadsPerBlock);
float signed_learning_rate = this->GetOptimizeDirection() * this->GetLearningRate();
float weightDecayRate = this->GetWeightDecayRate();
Tensor<DTYPE> *trainable_data = pParameter->GetResult();
Tensor<DTYPE> *gradient = pParameter->GetGradient();
DTYPE *m_pDevData = trainable_data->GetGPUData();
DTYPE *m_pDevGrad = gradient->GetGPUData();
DTYPE *m_pDevMeanSquared = pMeanSquared->GetGPUData();
RMSPropUpdate_kernel << < noBlock, threadsPerBlock >> > (m_pDevData, m_pDevGrad, m_parameterDim, signed_learning_rate, m_decay, m_epsilon, weightDecayRate, m_pDevMeanSquared);
return TRUE;
}
/*!
@brief RMSPropOptimizer UpdateParameterOnGPU centered true 생성자.
@details GPU변수를 생성하고, 커널 함수를 실행한다.
@details noBlock는 GPU 연산시 사용되는 block의 수
@details threadsPerBlock는 한 block당 생성되는 thread 갯수
@details m_parameterDim는 업데이트 할 파라미터의 dimension
@details m_pDevData, m_pDevGrad, m_pDevGradientSquared는 GPU함수 연산에 수행되는 GPU data. 각 CPU data를 GetGPUData() 호출로 GPU data 생성
@see template<typename DTYPE> DTYPE *LongArray<DTYPE>::GetGPUData(unsigned int pTime)
@details RMSPropUpdate_kernel 커널 함수를 호출. 커널함수이름, 블록 수, 블록당 thread 수와 GPU데이터를 다음과 같은 형식으로 호출.
@see __global__ void RMSPropUpdate_kernelForCentered(float *pDevWeight, float *pDevAccGradient, int weightDim, float signed_learning_rate, float decay, float epsilon, float weightDecayRate, float *pMeanSquared, float *pMeanGrad)
@param pMeanSquared 업데이트 할 pMeanSquared 변수
@param pMeanGrad 업데이트 할 pMeanGrad 변수
@return 성공 시 TRUE
*/
template<typename DTYPE> int RMSPropOptimizer<DTYPE>::UpdateParameterOnGPU(Operator<DTYPE> *pParameter, Tensor<DTYPE> *pMeanSquared, Tensor<DTYPE> *pMeanGrad) {
int noBlock = 3, threadsPerBlock = 128;
int m_parameterDim = pParameter->GetResult()->GetCapacity();
GetKernelParameters(m_parameterDim, &noBlock, &threadsPerBlock);
float signed_learning_rate = this->GetOptimizeDirection() * this->GetLearningRate();
float weightDecayRate = this->GetWeightDecayRate();
Tensor<DTYPE> *trainable_data = pParameter->GetResult();
Tensor<DTYPE> *gradient = pParameter->GetGradient();
DTYPE *m_pDevData = trainable_data->GetGPUData();
DTYPE *m_pDevGrad = gradient->GetGPUData();
DTYPE *m_pDevMeanSquared = pMeanSquared->GetGPUData();
DTYPE *m_pDevMeanGrad = pMeanGrad->GetGPUData();
RMSPropUpdate_kernelForCentered << < noBlock, threadsPerBlock >> > (m_pDevData, m_pDevGrad, m_parameterDim, signed_learning_rate, m_decay, m_epsilon, weightDecayRate, m_pDevMeanSquared, m_pDevMeanGrad);
return TRUE;
}
#endif // ifdef __CUDNN__
|
cedfd94ec09c7e98f86f80870e9eacd9c37d3ffb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <accelerate_cuda.h>
extern "C" __global__ void generate(const double* __restrict__ arrIn0_0, const double* __restrict__ arrIn1_0, const double* __restrict__ arrIn2_0, const double* __restrict__ arrIn3_0, double* __restrict__ arrOut_1, double* __restrict__ arrOut_0)
{
const int shapeSize = 1;
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const Int64 v0 = 0;
const double v1 = arrIn3_0[v0];
const double v17 = fmin(1.0e-2, ({ const Int64 v2 = 0; const double v3 = arrIn0_0[v2]; const Int64 v4 = 0; const double v5 = arrIn2_0[v4]; const Int64 v6 = 0; const double v7 = arrIn1_0[v6]; const Word8 v8 = v7 < 1.0e20; double lv90; if (v8) { lv90 = 0.5 * v7; } else { lv90 = 1.0e20; } const Word8 v10 = v5 < lv90; double lv110; if (v10) { lv110 = 0.6666666666666666 * v5; } else { lv110 = lv90; } const double v12 = lv110 / v3; const Word8 v13 = v12 >= 1.0 && v12 < 1.1; double lv160; if (v13) { lv160 = v3; } else { const Word8 v14 = v12 >= 1.0 && v12 > 1.2; double lv150; if (v14) { lv150 = 1.2 * v3; } else { lv150 = lv110; } lv160 = lv150; } ; lv160; }));
const double v21 = fmin(v17, ({ const double v18 = 1.0e-2 - v1; const Word8 v19 = v18 > v17 && v18 < 4.0 * v17 / 3.0; double lv200; if (v19) { lv200 = 0.6666666666666666 * v17; } else { lv200 = v18; } ; lv200; }));
arrOut_1[ix] = v1 + v21;
arrOut_0[ix] = v21;
}
}
| cedfd94ec09c7e98f86f80870e9eacd9c37d3ffb.cu | #include <accelerate_cuda.h>
extern "C" __global__ void generate(const double* __restrict__ arrIn0_0, const double* __restrict__ arrIn1_0, const double* __restrict__ arrIn2_0, const double* __restrict__ arrIn3_0, double* __restrict__ arrOut_1, double* __restrict__ arrOut_0)
{
const int shapeSize = 1;
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const Int64 v0 = 0;
const double v1 = arrIn3_0[v0];
const double v17 = fmin(1.0e-2, ({ const Int64 v2 = 0; const double v3 = arrIn0_0[v2]; const Int64 v4 = 0; const double v5 = arrIn2_0[v4]; const Int64 v6 = 0; const double v7 = arrIn1_0[v6]; const Word8 v8 = v7 < 1.0e20; double lv90; if (v8) { lv90 = 0.5 * v7; } else { lv90 = 1.0e20; } const Word8 v10 = v5 < lv90; double lv110; if (v10) { lv110 = 0.6666666666666666 * v5; } else { lv110 = lv90; } const double v12 = lv110 / v3; const Word8 v13 = v12 >= 1.0 && v12 < 1.1; double lv160; if (v13) { lv160 = v3; } else { const Word8 v14 = v12 >= 1.0 && v12 > 1.2; double lv150; if (v14) { lv150 = 1.2 * v3; } else { lv150 = lv110; } lv160 = lv150; } ; lv160; }));
const double v21 = fmin(v17, ({ const double v18 = 1.0e-2 - v1; const Word8 v19 = v18 > v17 && v18 < 4.0 * v17 / 3.0; double lv200; if (v19) { lv200 = 0.6666666666666666 * v17; } else { lv200 = v18; } ; lv200; }));
arrOut_1[ix] = v1 + v21;
arrOut_0[ix] = v21;
}
}
|
e5b535df8ec37341bcc63e0fa7dbfaccee3dc0c2.hip | // !!! This is a file automatically generated by hipify!!!
//#include <stdio.h>
#include <stdlib.h>
//#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <string>
#include <iostream>
#include "common_hip.cuh"
#include "InteractionList.cuh"
#include "kernels.hip"
void writexyz(FILE* traj, float4* r, int Naa);
void writexyz(FILE** traj, float4* r, int Naa, int ntraj);
void writexyz(FILE* traj, float4* r, float3 t,int Naa);
void writeforces(FILE* traj, float4* r, int Naa);
void readcoord(FILE* ind, float4* r, int N);
void readcoord(FILE* ind, float4* r, int N, int ntraj);
void readxyz(FILE* ind, float4* r, int N, int nskipframes);
void readxyz(FILE* ind, float4* r, int N, int ntraj, int nskipframes);
void readextforce(FILE* ind, float4* f, int N, int ntraj);
int main(int argc, char *argv[]){
if (argc<2) {
std::string progname=argv[0];
printf("Usage: %s inputfilename [startpositionsfilename nskipframes]\n",progname.c_str());
exit(1);
}
std::string filename=argv[1];
FILE *ind;
if((ind = fopen(filename.c_str(), "r"))==NULL) {
printf("Cannot open file %s \n",filename.c_str()) ;
exit(1) ;
}
std::string initlfilename;
int nskipframes=0;
if (argc>2)
initlfilename=argv[2];
if (argc>3)
nskipframes=strtol(argv[3], NULL, 10);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
hipSetDevice(0);
hipDeviceReset();
////////// READING INPUT FILE
char comments[80];
fscanf(ind,"%s %e",comments,&NumSteps);
printf("%s %e\n",comments,NumSteps);
fscanf(ind,"%s %f",comments,&h);
printf("%s %f\n",comments,h);
fscanf(ind,"%s %f",comments,&zeta);
printf("%s %f\n",comments,zeta);
fscanf(ind,"%s %f",comments,&kT);
printf("%s %f\n",comments,kT);
fscanf(ind,"%s %d",comments,&neighfreq);
printf("%s %d\n",comments,neighfreq);
fscanf(ind,"%s %d",comments,&outputfreq);
printf("%s %d\n",comments,outputfreq);
fscanf(ind,"%s %d",comments,&trajfreq);
printf("%s %d\n",comments,trajfreq);
fscanf(ind,"%s %d",comments,&ntraj);
printf("%s %d\n",comments,ntraj);
fscanf(ind,"%s %d",comments,&seed);
printf("%s %d\n",comments,seed);
fscanf(ind,"%s %d",comments,&BLOCK_SIZE);
printf("%s %d\n",comments,BLOCK_SIZE);
fscanf(ind,"%s %d",comments,&extforces);
printf("%s %d\n",comments,extforces);
// Initialize trajectory output files
FILE **traj;
traj=(FILE**)malloc(ntraj*sizeof(FILE*));
for (int itraj=0; itraj<ntraj; itraj++) {
char itrajstr[3];
sprintf(itrajstr, "%d", itraj);
std::string trajfile=filename+"traj"+itrajstr+".xyz";
if((traj[itraj] = fopen(trajfile.c_str(), "w"))==NULL) {
printf("Cannot open file %s \n",trajfile.c_str()) ;
exit(1) ;
}
}
int Naa; //Total number of amino acid residues
fscanf(ind,"%d",&Naa);
printf("Number of amino acid residues: %d\n",Naa);
int N=2*Naa*ntraj; //Number of beads
int Nch; //Number of chains
fscanf(ind,"%d",&Nch);
printf("Number of protein chains: %d\n",Nch);
chainstarts_h[0]=Nch-1;
for (int i=1; i<Nch; i++) {
int cstart;
fscanf(ind,"%d",&cstart);
chainstarts_h[i]=cstart;
printf("%d\n",cstart);
}
hipMemcpyToSymbol(chainstarts_c, &chainstarts_h, 100*sizeof(int), 0, hipMemcpyHostToDevice);
// Read bonds and build map, allocate and copy to device
int Nb; //Number of bonds
fscanf(ind,"%d",&Nb);
InteractionListBond bondlist(ind,N/ntraj,MaxBondsPerAtom,Nb,"covalent bond",ntraj);
// Read native contacts and build map for initial structure, allocate and copy to device
int Nnc; //Number of native contacts (initial)
fscanf(ind,"%d",&Nnc);
InteractionListNC nclist(ind,N/ntraj,MaxNCPerAtom,Nnc,"native contact (starting)",ntraj);
// // Read native contacts and build map for target structure, allocate and copy to device
// int Nnc2; //Number of native contacts (target)
// fscanf(ind,"%d",&Nnc2);
// InteractionListNC nclist2(ind,N/ntraj,MaxNCPerAtom,Nnc2,"native contact (target)",ntraj);
//Read sigmas for non-native and neighboring soft sphere repulsion
printf("Reading sigmas\n");
float *sig_h, *sig_d;
sig_h=(float*)malloc(N*sizeof(float));
hipMalloc((void**)&sig_d,N*sizeof(float));
ss_h.MaxSigma=0.;
for (int i=0; i<N/ntraj; i++) {
if (fscanf(ind,"%f", &sig_h[i])==EOF)
printf("Premature end of file at line %d", i);
for (int itraj=1; itraj<ntraj; itraj++)
sig_h[itraj*N/ntraj+i]=sig_h[i];
if (sig_h[i]>ss_h.MaxSigma)
ss_h.MaxSigma=sig_h[i];
}
hipMemcpy(sig_d, sig_h, N*sizeof(float), hipMemcpyHostToDevice);
hipBindTexture(0, sig_t, sig_d, N*sizeof(float));
//// Read soft-sphere interaction exclusions (like side-chains of neigboring beads) and build map, allocate and copy to device
// int Nexc; //Number of exclusions
// fscanf(ind,"%d",&Nexc);
// InteractionListBond ssel(ind,N/ntraj,MaxBondsPerAtom,Nexc,"additional soft-sphere exclusion",ntraj);
// Read salt bridges
//Number of salt bridges
int Nsb;
fscanf(ind,"%d",&Nsb);
InteractionListSB SaltBridgeList(ind,N/ntraj,MaxNeighbors,Nsb,"electrostatic interaction",ntraj);
//Read external forces (for stretching experiments)
float4 *fext_h, *fext_d;
if (extforces) {
hipMalloc((void**)&fext_d, N*sizeof(float4));
hipHostMalloc((void**)&fext_h, N*sizeof(float4));
readextforce(ind, fext_h, N/ntraj, ntraj);
hipMemcpy(fext_d, fext_h, N*sizeof(float4), hipMemcpyHostToDevice);
}
//Allocate coordinates arrays on device and host
float4 *r_h,*r_d;
hipHostMalloc((void**)&r_h, N*sizeof(float4));
hipMalloc((void**)&r_d, N*sizeof(float4));
// Read starting coordinates
printf("Reading initial coordinates ");
//READ FROM SEPARATE FILE
if (argc>2) {
printf(" from %s\n",initlfilename.c_str());
FILE *initl;
if((initl = fopen(initlfilename.c_str(), "r"))==NULL) {
printf("Cannot open file %s \n",initlfilename.c_str()) ;
exit(1) ;
}
//readxyz(initl, r_h, N);
readxyz(initl, r_h, N/ntraj, ntraj, nskipframes);
fclose(initl);
} else {
//READ FROM INPUT FILE
printf(" from %s\n",filename.c_str());
//readcoord(ind, r_h, N);
readcoord(ind, r_h, N/ntraj, ntraj);
}
//Copy coordinates to device
hipMemcpy(r_d, r_h, N*sizeof(float4), hipMemcpyHostToDevice);
hipBindTexture(0, r_t, r_d, N*sizeof(float4));
//Allocate forces arrays on device <and host>
float4 *f_d;
hipMalloc((void**)&f_d, N*sizeof(float4));
//float4 *f_h;
//hipHostMalloc((void**)&f_h, N*sizeof(float4));
fclose(ind);
//////////////END READING INPUT FILE//////
//Initialize Brownian Dynamics integrator parameters
bd_h.kT=kT;
bd_h.hoz=h/zeta;
bd_h.Gamma=sqrt(2*(bd_h.hoz)*(bd_h.kT));
hipMemcpyToSymbol(bd_c, &bd_h, sizeof(BrDynPar), 0, hipMemcpyHostToDevice);
checkCUDAError("Brownian dynamics parameters init");
//Initialize Soft Sphere repulsion force field parameters;
ss_h.Minus6eps=-6.0*ss_h.eps;
ss_h.Rcut2=ss_h.Rcut*ss_h.Rcut;
ss_h.Rcut2Outer=ss_h.RcutOuter*ss_h.RcutOuter;
ss_h.CutOffFactor2inv=1.0f/ss_h.CutOffFactor/ss_h.CutOffFactor;
ss_h.CutOffFactor6inv=ss_h.CutOffFactor2inv*ss_h.CutOffFactor2inv*ss_h.CutOffFactor2inv;
ss_h.CutOffFactor8inv=ss_h.CutOffFactor6inv*ss_h.CutOffFactor2inv;
hipMemcpyToSymbol(ss_c, &ss_h, sizeof(SoftSphere), 0, hipMemcpyHostToDevice);
checkCUDAError("Soft sphere parameters init");
//Initialize FENE parameters
fene_h.R02=fene_h.R0*fene_h.R0;
fene_h.kR0=fene_h.R0*fene_h.k;
hipMemcpyToSymbol(fene_c, &fene_h, sizeof(FENE), 0, hipMemcpyHostToDevice);
checkCUDAError("FENE parameters init");
//Initialize electrostatic parameters
hipMemcpyToSymbol(els_c, &els_h, sizeof(ElStatPar), 0, hipMemcpyHostToDevice);
checkCUDAError("Electrostatic parameters init");
//Neighbor list allocate
InteractionList<int> nl;
nl.N=N;
nl.Nmax=MaxSoftSphere;
nl.AllocateOnDevice("neighbor list");
nl.AllocateOnHost();
//hipBindTexture(0, neibmap_t, nl.map_d, nl.N*nl.Nmax*sizeof(int));
// InteractionList<int> nlo;
// nlo.N=N;
// nlo.Nmax=MaxSoftSphere;
// nlo.AllocateOnDevice("outer neighbor list");
// nlo.AllocateOnHost();
//Simulation
int THREADS=BLOCK_SIZE;
int BLOCKS=N/THREADS+1;
//Allocate and initialize random seeds
hiprandStatePhilox4_32_10_t *RNGStates_d;
hipMalloc( (void **)&RNGStates_d, THREADS*BLOCKS*sizeof(hiprandStatePhilox4_32_10_t) );
checkCUDAError("Brownian dynamics seeds allocation");
hipLaunchKernelGGL(( rand_init), dim3(BLOCKS),dim3(THREADS), 0, 0, seed,RNGStates_d);
checkCUDAError("Random number initializion");
printf("t\tTraj#\tRee(nm)\t\tE_POTENTIAL\tE_SoftSpheres\tE_NatCont\tE_ElStat\tE_FENE\t\t~TEMP\t<v>*neighfreq/DeltaRcut\n");
//float Delta=0.;
int stride=neighfreq;
for (int t=0;t<NumSteps;t+=stride) {
bool CoordCopiedToHost=false;
// if ((t % (3*neighfreq))==0) {
// hipLaunchKernelGGL(( SoftSphereNeighborList), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,nlo,bondlist,N/ntraj);
// checkCUDAError("Outer Neighbor List");
// }
if ((t % neighfreq)==0) {
//SoftSphereNeighborList<<<BLOCKS,THREADS>>>(r_d,nl);
hipLaunchKernelGGL(( SoftSphereNeighborList), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,nl,N/ntraj);
//SoftSphereNeighborList<<<BLOCKS,THREADS>>>(r_d,nl,bondlist,N/ntraj);
//SoftSphereNeighborList<<<BLOCKS,THREADS>>>(r_d,nlo,nl);
checkCUDAError("Neighbor List");
}
// nl.CopyToHost("Neighbor List");
// for (int i=0; i<N; i++) {
//
// int Nneib=nl.count_h[i]; //Number of neighbors of the i-th bead
// printf("%d, %d neibs: ",i,Nneib);
// for (int ineib=0;ineib<Nneib;ineib++) //Loop over neighbors of the i-th bead
// printf("%d ",nl.map_h[ineib*nl.N+i]);
// printf("\n");
// }
if ((t % outputfreq)==0) {
hipMemcpy(r_h, r_d, N*sizeof(float4), hipMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Ekin");
CoordCopiedToHost=true;
float* Ekin;
Ekin=(float*)calloc(ntraj,sizeof(float));
for (int itraj=0; itraj<ntraj; itraj++) {
for (int i=itraj*N/ntraj; i<(itraj+1)*N/ntraj; i++)
Ekin[itraj]+=r_h[i].w;
}
hipLaunchKernelGGL(( FENEEnergy), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,bondlist);
hipMemcpy(r_h, r_d, N*sizeof(float4), hipMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Efene");
float* Efene;
Efene=(float*)calloc(ntraj,sizeof(float));
for (int itraj=0; itraj<ntraj; itraj++) {
for (int i=itraj*N/ntraj; i<(itraj+1)*N/ntraj; i++)
Efene[itraj]+=r_h[i].w;
}
hipLaunchKernelGGL(( SoftSphereEnergy), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,nl,sig_d);
hipMemcpy(r_h, r_d, N*sizeof(float4), hipMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Ess");
float* Ess;
Ess=(float*)calloc(ntraj,sizeof(float));
for (int itraj=0; itraj<ntraj; itraj++) {
for (int i=itraj*N/ntraj; i<(itraj+1)*N/ntraj; i++)
Ess[itraj]+=r_h[i].w;
}
hipLaunchKernelGGL(( NativeSubtractSoftSphereEnergy), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,nclist,sig_d);
hipMemcpy(r_h, r_d, N*sizeof(float4), hipMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Enss");
for (int itraj=0; itraj<ntraj; itraj++) {
for (int i=itraj*N/ntraj; i<(itraj+1)*N/ntraj; i++)
Ess[itraj]+=r_h[i].w;
}
hipLaunchKernelGGL(( NativeEnergy), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,nclist);
hipMemcpy(r_h, r_d, N*sizeof(float4), hipMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Enat");
float* Enat;
Enat=(float*)calloc(ntraj,sizeof(float));
for (int itraj=0; itraj<ntraj; itraj++) {
for (int i=itraj*N/ntraj; i<(itraj+1)*N/ntraj; i++)
Enat[itraj]+=r_h[i].w;
}
//DebyeHuckelEnergy<<<BLOCKS,THREADS>>>(r_d,SaltBridgeList);
//hipMemcpy(r_h, r_d, N*sizeof(float4), hipMemcpyDeviceToHost);
//checkCUDAError("Copy coordinates back for Eel");
float* Eel;
Eel=(float*)calloc(ntraj,sizeof(float));
//for (int itraj=0; itraj<ntraj; itraj++) {
// for (int i=itraj*N/ntraj; i<(itraj+1)*N/ntraj; i++)
// Eel[itraj]+=r_h[i].w;
//}
for (int itraj=0; itraj<ntraj; itraj++) {
float Epot=(Efene[itraj]+Ess[itraj]+Enat[itraj]+Eel[itraj])/2.;
float Etot=Epot+Ekin[itraj];
float4 rN=r_h[itraj*N/ntraj+2*Naa-1];
float4 r0=r_h[itraj*N/ntraj+Naa];
float ree=sqrt((rN.x-r0.x)*(rN.x-r0.x)+(rN.y-r0.y)*(rN.y-r0.y)+(rN.z-r0.z)*(rN.z-r0.z))/10.;
printf("%d\t%d\t%f\t%f\t%e\t%e\t%e\t%e\t%f\t%f\n",t,itraj,ree,Epot,Ess[itraj]/2.,Enat[itraj]/2.,Eel[itraj]/2.,Efene[itraj]/2.,Ekin[itraj]*ntraj/(N*6.*bd_h.hoz/503.6),sqrt(Ekin[itraj]*ntraj/N)*neighfreq/(ss_h.Rcut-ss_h.MaxSigma*ss_h.CutOffFactor));
}
}
if ((t % trajfreq)==0) {
if (!CoordCopiedToHost) {
hipMemcpy(r_h, r_d, N*sizeof(float4), hipMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back");
CoordCopiedToHost=true;
}
writexyz(traj,r_h,Naa,ntraj);
}
for (int tongpu=0; tongpu<stride; tongpu++) {
//if (extforces)
hipLaunchKernelGGL(( force_flush), dim3(BLOCKS),dim3(THREADS), 0, 0, f_d,fext_d,N);
// else
// hipLaunchKernelGGL(( force_flush), dim3(BLOCKS),dim3(THREADS), 0, 0, f_d,N);
checkCUDAError("Force flush");
hipLaunchKernelGGL(( FENEForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,bondlist);
checkCUDAError("FENE");
hipLaunchKernelGGL(( SoftSphereForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,nl,sig_d);
checkCUDAError("SoftSphere");
hipLaunchKernelGGL(( NativeSubtractSoftSphereForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,nclist,sig_d);
checkCUDAError("Native subtract Soft Sphere");
hipLaunchKernelGGL(( NativeForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,nclist);
checkCUDAError("Native");
//DebyeHuckelForce<<<BLOCKS,THREADS>>>(r_d,f_d,SaltBridgeList);
//checkCUDAError("DebyeHuckel");
hipLaunchKernelGGL(( integrate), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,N,RNGStates_d);
checkCUDAError("Integrate");
}
/*if (t>StartingStateEquilSteps+SwitchingSteps) {
Delta=1.;
} else if (t>StartingStateEquilSteps) {
Delta=(float)SwitchingStride/(float)SwitchingSteps*(float)((int)(t-StartingStateEquilSteps)/(int)SwitchingStride);
} else {
Delta=0.;
}
bool CoordCopiedToHost=false;
if ((t % neighfreq)==0) {
hipLaunchKernelGGL(( SoftSphereNeighborList), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,nl);
checkCUDAError("Neighbor List");
}
if ((t % outputfreq)==0) {
hipMemcpy(r_h, r_d, N*sizeof(float4), hipMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Ekin");
CoordCopiedToHost=true;
float Ekin=0.; for (int i=0; i<N; i++) Ekin+=r_h[i].w;
hipLaunchKernelGGL(( FENEEnergy), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,bondlist);
hipMemcpy(r_h, r_d, N*sizeof(float4), hipMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Efene");
float Efene=0.; for (int i=0; i<N; i++) Efene+=r_h[i].w;
hipLaunchKernelGGL(( SoftSphereEnergy), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,nl,sig_d);
hipMemcpy(r_h, r_d, N*sizeof(float4), hipMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Ess");
float Ess=0.; for (int i=0; i<N; i++) Ess+=r_h[i].w;
if (t<StartingStateEquilSteps+SwitchingSteps) {
hipLaunchKernelGGL(( NativeSubtractSoftSphereEnergy), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,nclist,sig_d);
hipMemcpy(r_h, r_d, N*sizeof(float4), hipMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Enss");
for (int i=0; i<N; i++) Ess+=(1.-Delta)*r_h[i].w;
}
if (t>StartingStateEquilSteps) {
hipLaunchKernelGGL(( NativeSubtractSoftSphereEnergy), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,nclist2,sig_d);
hipMemcpy(r_h, r_d, N*sizeof(float4), hipMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Enss2");
for (int i=0; i<N; i++) Ess+=(Delta)*r_h[i].w;
}
float Enat=0.;
if (t<StartingStateEquilSteps+SwitchingSteps) {
hipLaunchKernelGGL(( NativeEnergy), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,nclist);
hipMemcpy(r_h, r_d, N*sizeof(float4), hipMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Enat");
for (int i=0; i<N; i++) Enat+=(1.-Delta)*r_h[i].w;
}
if (t>StartingStateEquilSteps) {
hipLaunchKernelGGL(( NativeEnergy), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,nclist2);
hipMemcpy(r_h, r_d, N*sizeof(float4), hipMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Enat2");
for (int i=0; i<N; i++) Enat+=(Delta)*r_h[i].w;
}
DebyeHuckelEnergy<<<BLOCKS,THREADS>>>(r_d,SaltBridgeList);
hipMemcpy(r_h, r_d, N*sizeof(float4), hipMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Eel");
float Eel=0.; for (int i=0; i<N; i++) Eel+=r_h[i].w;
float Epot=(Efene+Ess+Enat+Eel)/2.;
float Etot=Epot+Ekin;
printf("%d\t%e\t%e\t%e\t%e\t%e\t%e\t%f\t%f\n",t,Etot,Epot,Ess/2.,Enat/2.,Eel/2.,Efene/2.,Ekin/(N*6.*bd_h.hoz/503.6),sqrt(Ekin/N)*neighfreq/(ss_h.Rcut-ss_h.MaxSigma*ss_h.CutOffFactor));
}
if ((t % trajfreq)==0) {
if (!CoordCopiedToHost) {
hipMemcpy(r_h, r_d, N*sizeof(float4), hipMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back");
CoordCopiedToHost=true;
}
float3 com=make_float3(0.,0.,0.);
for (int i=0; i<N; i++) {
com.x+=r_h[i].x;
com.y+=r_h[i].y;
com.z+=r_h[i].z;
}
com.x/=N;com.y/=N;com.z/=N;
//writeforces(traj,f_h,Naa);
//writexyz(traj,r_h,Naa);
writexyz(traj,r_h,Naa,ntraj);
}
if (t==StartingStateEquilSteps) {
bd_h.hoz=.1*h/zeta;
bd_h.Gamma=sqrt(2*(bd_h.hoz)*(bd_h.kT));
hipMemcpyToSymbol(bd_c, &bd_h, sizeof(BrDynPar), 0, hipMemcpyHostToDevice);
}
if (t==StartingStateEquilSteps+SwitchingSteps) {
bd_h.hoz=h/zeta;
bd_h.Gamma=sqrt(2*(bd_h.hoz)*(bd_h.kT));
hipMemcpyToSymbol(bd_c, &bd_h, sizeof(BrDynPar), 0, hipMemcpyHostToDevice);
}
if (t>StartingStateEquilSteps+SwitchingSteps) {
for (int tongpu=0; tongpu<stride; tongpu++) {
hipLaunchKernelGGL(( force_flush), dim3(BLOCKS),dim3(THREADS), 0, 0, f_d,N);
checkCUDAError("Force flush");
hipLaunchKernelGGL(( FENEForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,bondlist);
checkCUDAError("FENE");
hipLaunchKernelGGL(( SoftSphereForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,nl,sig_d);
checkCUDAError("SoftSphere");
hipLaunchKernelGGL(( NativeSubtractSoftSphereForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,nclist2,sig_d);
checkCUDAError("Native subtract Soft Sphere 2");
hipLaunchKernelGGL(( NativeForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,nclist2);
checkCUDAError("Native 2");
DebyeHuckelForce<<<BLOCKS,THREADS>>>(r_d,f_d,SaltBridgeList);
checkCUDAError("DebyeHuckel");
hipLaunchKernelGGL(( integrate), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,N,RNGStates_d);
checkCUDAError("Integrate");
}
} else if (t>StartingStateEquilSteps) {
for (int tongpu=0; tongpu<stride; tongpu++) {
hipLaunchKernelGGL(( force_flush), dim3(BLOCKS),dim3(THREADS), 0, 0, f_d,N);
checkCUDAError("Force flush");
hipLaunchKernelGGL(( FENEForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,bondlist);
checkCUDAError("FENE");
hipLaunchKernelGGL(( SoftSphereForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,nl,sig_d);
checkCUDAError("SoftSphere");
hipLaunchKernelGGL(( NativeSubtractSoftSphereForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,nclist2,sig_d,Delta);
checkCUDAError("Native subtract Soft Sphere 2");
hipLaunchKernelGGL(( NativeForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,nclist2,Delta);
checkCUDAError("Native 2");
hipLaunchKernelGGL(( NativeSubtractSoftSphereForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,nclist,sig_d,1.-Delta);
checkCUDAError("Native subtract Soft Sphere");
hipLaunchKernelGGL(( NativeForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,nclist,1.-Delta);
checkCUDAError("Native");
DebyeHuckelForce<<<BLOCKS,THREADS>>>(r_d,f_d,SaltBridgeList);
checkCUDAError("DebyeHuckel");
hipLaunchKernelGGL(( integrate), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,N,RNGStates_d);
checkCUDAError("Integrate");
}
} else {
for (int tongpu=0; tongpu<stride; tongpu++) {
hipLaunchKernelGGL(( force_flush), dim3(BLOCKS),dim3(THREADS), 0, 0, f_d,N);
checkCUDAError("Force flush");
hipLaunchKernelGGL(( FENEForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,bondlist);
checkCUDAError("FENE");
hipLaunchKernelGGL(( SoftSphereForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,nl,sig_d);
checkCUDAError("SoftSphere");
hipLaunchKernelGGL(( NativeSubtractSoftSphereForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,nclist,sig_d);
checkCUDAError("Native subtract Soft Sphere");
hipLaunchKernelGGL(( NativeForce), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,nclist);
checkCUDAError("Native");
DebyeHuckelForce<<<BLOCKS,THREADS>>>(r_d,f_d,SaltBridgeList);
checkCUDAError("DebyeHuckel");
hipLaunchKernelGGL(( integrate), dim3(BLOCKS),dim3(THREADS), 0, 0, r_d,f_d,N,RNGStates_d);
checkCUDAError("Integrate");
}
}*/
}
hipFree(r_d);
hipFree(f_d);
nclist.FreeOnDevice("native contacts");
bondlist.FreeOnDevice("bonds");
SaltBridgeList.FreeOnDevice("salt bridges");
nl.FreeOnDevice("neighbor list");
//nlo.FreeOnDevice("outer neighbor list");
hipDeviceReset();
}
| e5b535df8ec37341bcc63e0fa7dbfaccee3dc0c2.cu | //#include <stdio.h>
#include <stdlib.h>
//#include <cuda.h>
#include <curand_kernel.h>
#include <string>
#include <iostream>
#include "common.cuh"
#include "InteractionList.cuh"
#include "kernels.cu"
void writexyz(FILE* traj, float4* r, int Naa);
void writexyz(FILE** traj, float4* r, int Naa, int ntraj);
void writexyz(FILE* traj, float4* r, float3 t,int Naa);
void writeforces(FILE* traj, float4* r, int Naa);
void readcoord(FILE* ind, float4* r, int N);
void readcoord(FILE* ind, float4* r, int N, int ntraj);
void readxyz(FILE* ind, float4* r, int N, int nskipframes);
void readxyz(FILE* ind, float4* r, int N, int ntraj, int nskipframes);
void readextforce(FILE* ind, float4* f, int N, int ntraj);
int main(int argc, char *argv[]){
if (argc<2) {
std::string progname=argv[0];
printf("Usage: %s inputfilename [startpositionsfilename nskipframes]\n",progname.c_str());
exit(1);
}
std::string filename=argv[1];
FILE *ind;
if((ind = fopen(filename.c_str(), "r"))==NULL) {
printf("Cannot open file %s \n",filename.c_str()) ;
exit(1) ;
}
std::string initlfilename;
int nskipframes=0;
if (argc>2)
initlfilename=argv[2];
if (argc>3)
nskipframes=strtol(argv[3], NULL, 10);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
cudaSetDevice(0);
cudaDeviceReset();
////////// READING INPUT FILE
char comments[80];
fscanf(ind,"%s %e",comments,&NumSteps);
printf("%s %e\n",comments,NumSteps);
fscanf(ind,"%s %f",comments,&h);
printf("%s %f\n",comments,h);
fscanf(ind,"%s %f",comments,&zeta);
printf("%s %f\n",comments,zeta);
fscanf(ind,"%s %f",comments,&kT);
printf("%s %f\n",comments,kT);
fscanf(ind,"%s %d",comments,&neighfreq);
printf("%s %d\n",comments,neighfreq);
fscanf(ind,"%s %d",comments,&outputfreq);
printf("%s %d\n",comments,outputfreq);
fscanf(ind,"%s %d",comments,&trajfreq);
printf("%s %d\n",comments,trajfreq);
fscanf(ind,"%s %d",comments,&ntraj);
printf("%s %d\n",comments,ntraj);
fscanf(ind,"%s %d",comments,&seed);
printf("%s %d\n",comments,seed);
fscanf(ind,"%s %d",comments,&BLOCK_SIZE);
printf("%s %d\n",comments,BLOCK_SIZE);
fscanf(ind,"%s %d",comments,&extforces);
printf("%s %d\n",comments,extforces);
// Initialize trajectory output files
FILE **traj;
traj=(FILE**)malloc(ntraj*sizeof(FILE*));
for (int itraj=0; itraj<ntraj; itraj++) {
char itrajstr[3];
sprintf(itrajstr, "%d", itraj);
std::string trajfile=filename+"traj"+itrajstr+".xyz";
if((traj[itraj] = fopen(trajfile.c_str(), "w"))==NULL) {
printf("Cannot open file %s \n",trajfile.c_str()) ;
exit(1) ;
}
}
int Naa; //Total number of amino acid residues
fscanf(ind,"%d",&Naa);
printf("Number of amino acid residues: %d\n",Naa);
int N=2*Naa*ntraj; //Number of beads
int Nch; //Number of chains
fscanf(ind,"%d",&Nch);
printf("Number of protein chains: %d\n",Nch);
chainstarts_h[0]=Nch-1;
for (int i=1; i<Nch; i++) {
int cstart;
fscanf(ind,"%d",&cstart);
chainstarts_h[i]=cstart;
printf("%d\n",cstart);
}
cudaMemcpyToSymbol(chainstarts_c, &chainstarts_h, 100*sizeof(int), 0, cudaMemcpyHostToDevice);
// Read bonds and build map, allocate and copy to device
int Nb; //Number of bonds
fscanf(ind,"%d",&Nb);
InteractionListBond bondlist(ind,N/ntraj,MaxBondsPerAtom,Nb,"covalent bond",ntraj);
// Read native contacts and build map for initial structure, allocate and copy to device
int Nnc; //Number of native contacts (initial)
fscanf(ind,"%d",&Nnc);
InteractionListNC nclist(ind,N/ntraj,MaxNCPerAtom,Nnc,"native contact (starting)",ntraj);
// // Read native contacts and build map for target structure, allocate and copy to device
// int Nnc2; //Number of native contacts (target)
// fscanf(ind,"%d",&Nnc2);
// InteractionListNC nclist2(ind,N/ntraj,MaxNCPerAtom,Nnc2,"native contact (target)",ntraj);
//Read sigmas for non-native and neighboring soft sphere repulsion
printf("Reading sigmas\n");
float *sig_h, *sig_d;
sig_h=(float*)malloc(N*sizeof(float));
cudaMalloc((void**)&sig_d,N*sizeof(float));
ss_h.MaxSigma=0.;
for (int i=0; i<N/ntraj; i++) {
if (fscanf(ind,"%f", &sig_h[i])==EOF)
printf("Premature end of file at line %d", i);
for (int itraj=1; itraj<ntraj; itraj++)
sig_h[itraj*N/ntraj+i]=sig_h[i];
if (sig_h[i]>ss_h.MaxSigma)
ss_h.MaxSigma=sig_h[i];
}
cudaMemcpy(sig_d, sig_h, N*sizeof(float), cudaMemcpyHostToDevice);
cudaBindTexture(0, sig_t, sig_d, N*sizeof(float));
//// Read soft-sphere interaction exclusions (like side-chains of neigboring beads) and build map, allocate and copy to device
// int Nexc; //Number of exclusions
// fscanf(ind,"%d",&Nexc);
// InteractionListBond ssel(ind,N/ntraj,MaxBondsPerAtom,Nexc,"additional soft-sphere exclusion",ntraj);
// Read salt bridges
//Number of salt bridges
int Nsb;
fscanf(ind,"%d",&Nsb);
InteractionListSB SaltBridgeList(ind,N/ntraj,MaxNeighbors,Nsb,"electrostatic interaction",ntraj);
//Read external forces (for stretching experiments)
float4 *fext_h, *fext_d;
if (extforces) {
cudaMalloc((void**)&fext_d, N*sizeof(float4));
cudaMallocHost((void**)&fext_h, N*sizeof(float4));
readextforce(ind, fext_h, N/ntraj, ntraj);
cudaMemcpy(fext_d, fext_h, N*sizeof(float4), cudaMemcpyHostToDevice);
}
//Allocate coordinates arrays on device and host
float4 *r_h,*r_d;
cudaMallocHost((void**)&r_h, N*sizeof(float4));
cudaMalloc((void**)&r_d, N*sizeof(float4));
// Read starting coordinates
printf("Reading initial coordinates ");
//READ FROM SEPARATE FILE
if (argc>2) {
printf(" from %s\n",initlfilename.c_str());
FILE *initl;
if((initl = fopen(initlfilename.c_str(), "r"))==NULL) {
printf("Cannot open file %s \n",initlfilename.c_str()) ;
exit(1) ;
}
//readxyz(initl, r_h, N);
readxyz(initl, r_h, N/ntraj, ntraj, nskipframes);
fclose(initl);
} else {
//READ FROM INPUT FILE
printf(" from %s\n",filename.c_str());
//readcoord(ind, r_h, N);
readcoord(ind, r_h, N/ntraj, ntraj);
}
//Copy coordinates to device
cudaMemcpy(r_d, r_h, N*sizeof(float4), cudaMemcpyHostToDevice);
cudaBindTexture(0, r_t, r_d, N*sizeof(float4));
//Allocate forces arrays on device <and host>
float4 *f_d;
cudaMalloc((void**)&f_d, N*sizeof(float4));
//float4 *f_h;
//cudaMallocHost((void**)&f_h, N*sizeof(float4));
fclose(ind);
//////////////END READING INPUT FILE//////
//Initialize Brownian Dynamics integrator parameters
bd_h.kT=kT;
bd_h.hoz=h/zeta;
bd_h.Gamma=sqrt(2*(bd_h.hoz)*(bd_h.kT));
cudaMemcpyToSymbol(bd_c, &bd_h, sizeof(BrDynPar), 0, cudaMemcpyHostToDevice);
checkCUDAError("Brownian dynamics parameters init");
//Initialize Soft Sphere repulsion force field parameters;
ss_h.Minus6eps=-6.0*ss_h.eps;
ss_h.Rcut2=ss_h.Rcut*ss_h.Rcut;
ss_h.Rcut2Outer=ss_h.RcutOuter*ss_h.RcutOuter;
ss_h.CutOffFactor2inv=1.0f/ss_h.CutOffFactor/ss_h.CutOffFactor;
ss_h.CutOffFactor6inv=ss_h.CutOffFactor2inv*ss_h.CutOffFactor2inv*ss_h.CutOffFactor2inv;
ss_h.CutOffFactor8inv=ss_h.CutOffFactor6inv*ss_h.CutOffFactor2inv;
cudaMemcpyToSymbol(ss_c, &ss_h, sizeof(SoftSphere), 0, cudaMemcpyHostToDevice);
checkCUDAError("Soft sphere parameters init");
//Initialize FENE parameters
fene_h.R02=fene_h.R0*fene_h.R0;
fene_h.kR0=fene_h.R0*fene_h.k;
cudaMemcpyToSymbol(fene_c, &fene_h, sizeof(FENE), 0, cudaMemcpyHostToDevice);
checkCUDAError("FENE parameters init");
//Initialize electrostatic parameters
cudaMemcpyToSymbol(els_c, &els_h, sizeof(ElStatPar), 0, cudaMemcpyHostToDevice);
checkCUDAError("Electrostatic parameters init");
//Neighbor list allocate
InteractionList<int> nl;
nl.N=N;
nl.Nmax=MaxSoftSphere;
nl.AllocateOnDevice("neighbor list");
nl.AllocateOnHost();
//cudaBindTexture(0, neibmap_t, nl.map_d, nl.N*nl.Nmax*sizeof(int));
// InteractionList<int> nlo;
// nlo.N=N;
// nlo.Nmax=MaxSoftSphere;
// nlo.AllocateOnDevice("outer neighbor list");
// nlo.AllocateOnHost();
//Simulation
int THREADS=BLOCK_SIZE;
int BLOCKS=N/THREADS+1;
//Allocate and initialize random seeds
curandStatePhilox4_32_10_t *RNGStates_d;
cudaMalloc( (void **)&RNGStates_d, THREADS*BLOCKS*sizeof(curandStatePhilox4_32_10_t) );
checkCUDAError("Brownian dynamics seeds allocation");
rand_init<<<BLOCKS,THREADS>>>(seed,RNGStates_d);
checkCUDAError("Random number initializion");
printf("t\tTraj#\tRee(nm)\t\tE_POTENTIAL\tE_SoftSpheres\tE_NatCont\tE_ElStat\tE_FENE\t\t~TEMP\t<v>*neighfreq/DeltaRcut\n");
//float Delta=0.;
int stride=neighfreq;
for (int t=0;t<NumSteps;t+=stride) {
bool CoordCopiedToHost=false;
// if ((t % (3*neighfreq))==0) {
// SoftSphereNeighborList<<<BLOCKS,THREADS>>>(r_d,nlo,bondlist,N/ntraj);
// checkCUDAError("Outer Neighbor List");
// }
if ((t % neighfreq)==0) {
//SoftSphereNeighborList<<<BLOCKS,THREADS>>>(r_d,nl);
SoftSphereNeighborList<<<BLOCKS,THREADS>>>(r_d,nl,N/ntraj);
//SoftSphereNeighborList<<<BLOCKS,THREADS>>>(r_d,nl,bondlist,N/ntraj);
//SoftSphereNeighborList<<<BLOCKS,THREADS>>>(r_d,nlo,nl);
checkCUDAError("Neighbor List");
}
// nl.CopyToHost("Neighbor List");
// for (int i=0; i<N; i++) {
//
// int Nneib=nl.count_h[i]; //Number of neighbors of the i-th bead
// printf("%d, %d neibs: ",i,Nneib);
// for (int ineib=0;ineib<Nneib;ineib++) //Loop over neighbors of the i-th bead
// printf("%d ",nl.map_h[ineib*nl.N+i]);
// printf("\n");
// }
if ((t % outputfreq)==0) {
cudaMemcpy(r_h, r_d, N*sizeof(float4), cudaMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Ekin");
CoordCopiedToHost=true;
float* Ekin;
Ekin=(float*)calloc(ntraj,sizeof(float));
for (int itraj=0; itraj<ntraj; itraj++) {
for (int i=itraj*N/ntraj; i<(itraj+1)*N/ntraj; i++)
Ekin[itraj]+=r_h[i].w;
}
FENEEnergy<<<BLOCKS,THREADS>>>(r_d,bondlist);
cudaMemcpy(r_h, r_d, N*sizeof(float4), cudaMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Efene");
float* Efene;
Efene=(float*)calloc(ntraj,sizeof(float));
for (int itraj=0; itraj<ntraj; itraj++) {
for (int i=itraj*N/ntraj; i<(itraj+1)*N/ntraj; i++)
Efene[itraj]+=r_h[i].w;
}
SoftSphereEnergy<<<BLOCKS,THREADS>>>(r_d,nl,sig_d);
cudaMemcpy(r_h, r_d, N*sizeof(float4), cudaMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Ess");
float* Ess;
Ess=(float*)calloc(ntraj,sizeof(float));
for (int itraj=0; itraj<ntraj; itraj++) {
for (int i=itraj*N/ntraj; i<(itraj+1)*N/ntraj; i++)
Ess[itraj]+=r_h[i].w;
}
NativeSubtractSoftSphereEnergy<<<BLOCKS,THREADS>>>(r_d,nclist,sig_d);
cudaMemcpy(r_h, r_d, N*sizeof(float4), cudaMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Enss");
for (int itraj=0; itraj<ntraj; itraj++) {
for (int i=itraj*N/ntraj; i<(itraj+1)*N/ntraj; i++)
Ess[itraj]+=r_h[i].w;
}
NativeEnergy<<<BLOCKS,THREADS>>>(r_d,nclist);
cudaMemcpy(r_h, r_d, N*sizeof(float4), cudaMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Enat");
float* Enat;
Enat=(float*)calloc(ntraj,sizeof(float));
for (int itraj=0; itraj<ntraj; itraj++) {
for (int i=itraj*N/ntraj; i<(itraj+1)*N/ntraj; i++)
Enat[itraj]+=r_h[i].w;
}
//DebyeHuckelEnergy<<<BLOCKS,THREADS>>>(r_d,SaltBridgeList);
//cudaMemcpy(r_h, r_d, N*sizeof(float4), cudaMemcpyDeviceToHost);
//checkCUDAError("Copy coordinates back for Eel");
float* Eel;
Eel=(float*)calloc(ntraj,sizeof(float));
//for (int itraj=0; itraj<ntraj; itraj++) {
// for (int i=itraj*N/ntraj; i<(itraj+1)*N/ntraj; i++)
// Eel[itraj]+=r_h[i].w;
//}
for (int itraj=0; itraj<ntraj; itraj++) {
float Epot=(Efene[itraj]+Ess[itraj]+Enat[itraj]+Eel[itraj])/2.;
float Etot=Epot+Ekin[itraj];
float4 rN=r_h[itraj*N/ntraj+2*Naa-1];
float4 r0=r_h[itraj*N/ntraj+Naa];
float ree=sqrt((rN.x-r0.x)*(rN.x-r0.x)+(rN.y-r0.y)*(rN.y-r0.y)+(rN.z-r0.z)*(rN.z-r0.z))/10.;
printf("%d\t%d\t%f\t%f\t%e\t%e\t%e\t%e\t%f\t%f\n",t,itraj,ree,Epot,Ess[itraj]/2.,Enat[itraj]/2.,Eel[itraj]/2.,Efene[itraj]/2.,Ekin[itraj]*ntraj/(N*6.*bd_h.hoz/503.6),sqrt(Ekin[itraj]*ntraj/N)*neighfreq/(ss_h.Rcut-ss_h.MaxSigma*ss_h.CutOffFactor));
}
}
if ((t % trajfreq)==0) {
if (!CoordCopiedToHost) {
cudaMemcpy(r_h, r_d, N*sizeof(float4), cudaMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back");
CoordCopiedToHost=true;
}
writexyz(traj,r_h,Naa,ntraj);
}
for (int tongpu=0; tongpu<stride; tongpu++) {
//if (extforces)
force_flush<<<BLOCKS,THREADS>>>(f_d,fext_d,N);
// else
// force_flush<<<BLOCKS,THREADS>>>(f_d,N);
checkCUDAError("Force flush");
FENEForce<<<BLOCKS,THREADS>>>(r_d,f_d,bondlist);
checkCUDAError("FENE");
SoftSphereForce<<<BLOCKS,THREADS>>>(r_d,f_d,nl,sig_d);
checkCUDAError("SoftSphere");
NativeSubtractSoftSphereForce<<<BLOCKS,THREADS>>>(r_d,f_d,nclist,sig_d);
checkCUDAError("Native subtract Soft Sphere");
NativeForce<<<BLOCKS,THREADS>>>(r_d,f_d,nclist);
checkCUDAError("Native");
//DebyeHuckelForce<<<BLOCKS,THREADS>>>(r_d,f_d,SaltBridgeList);
//checkCUDAError("DebyeHuckel");
integrate<<<BLOCKS,THREADS>>>(r_d,f_d,N,RNGStates_d);
checkCUDAError("Integrate");
}
/*if (t>StartingStateEquilSteps+SwitchingSteps) {
Delta=1.;
} else if (t>StartingStateEquilSteps) {
Delta=(float)SwitchingStride/(float)SwitchingSteps*(float)((int)(t-StartingStateEquilSteps)/(int)SwitchingStride);
} else {
Delta=0.;
}
bool CoordCopiedToHost=false;
if ((t % neighfreq)==0) {
SoftSphereNeighborList<<<BLOCKS,THREADS>>>(r_d,nl);
checkCUDAError("Neighbor List");
}
if ((t % outputfreq)==0) {
cudaMemcpy(r_h, r_d, N*sizeof(float4), cudaMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Ekin");
CoordCopiedToHost=true;
float Ekin=0.; for (int i=0; i<N; i++) Ekin+=r_h[i].w;
FENEEnergy<<<BLOCKS,THREADS>>>(r_d,bondlist);
cudaMemcpy(r_h, r_d, N*sizeof(float4), cudaMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Efene");
float Efene=0.; for (int i=0; i<N; i++) Efene+=r_h[i].w;
SoftSphereEnergy<<<BLOCKS,THREADS>>>(r_d,nl,sig_d);
cudaMemcpy(r_h, r_d, N*sizeof(float4), cudaMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Ess");
float Ess=0.; for (int i=0; i<N; i++) Ess+=r_h[i].w;
if (t<StartingStateEquilSteps+SwitchingSteps) {
NativeSubtractSoftSphereEnergy<<<BLOCKS,THREADS>>>(r_d,nclist,sig_d);
cudaMemcpy(r_h, r_d, N*sizeof(float4), cudaMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Enss");
for (int i=0; i<N; i++) Ess+=(1.-Delta)*r_h[i].w;
}
if (t>StartingStateEquilSteps) {
NativeSubtractSoftSphereEnergy<<<BLOCKS,THREADS>>>(r_d,nclist2,sig_d);
cudaMemcpy(r_h, r_d, N*sizeof(float4), cudaMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Enss2");
for (int i=0; i<N; i++) Ess+=(Delta)*r_h[i].w;
}
float Enat=0.;
if (t<StartingStateEquilSteps+SwitchingSteps) {
NativeEnergy<<<BLOCKS,THREADS>>>(r_d,nclist);
cudaMemcpy(r_h, r_d, N*sizeof(float4), cudaMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Enat");
for (int i=0; i<N; i++) Enat+=(1.-Delta)*r_h[i].w;
}
if (t>StartingStateEquilSteps) {
NativeEnergy<<<BLOCKS,THREADS>>>(r_d,nclist2);
cudaMemcpy(r_h, r_d, N*sizeof(float4), cudaMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Enat2");
for (int i=0; i<N; i++) Enat+=(Delta)*r_h[i].w;
}
DebyeHuckelEnergy<<<BLOCKS,THREADS>>>(r_d,SaltBridgeList);
cudaMemcpy(r_h, r_d, N*sizeof(float4), cudaMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back for Eel");
float Eel=0.; for (int i=0; i<N; i++) Eel+=r_h[i].w;
float Epot=(Efene+Ess+Enat+Eel)/2.;
float Etot=Epot+Ekin;
printf("%d\t%e\t%e\t%e\t%e\t%e\t%e\t%f\t%f\n",t,Etot,Epot,Ess/2.,Enat/2.,Eel/2.,Efene/2.,Ekin/(N*6.*bd_h.hoz/503.6),sqrt(Ekin/N)*neighfreq/(ss_h.Rcut-ss_h.MaxSigma*ss_h.CutOffFactor));
}
if ((t % trajfreq)==0) {
if (!CoordCopiedToHost) {
cudaMemcpy(r_h, r_d, N*sizeof(float4), cudaMemcpyDeviceToHost);
checkCUDAError("Copy coordinates back");
CoordCopiedToHost=true;
}
float3 com=make_float3(0.,0.,0.);
for (int i=0; i<N; i++) {
com.x+=r_h[i].x;
com.y+=r_h[i].y;
com.z+=r_h[i].z;
}
com.x/=N;com.y/=N;com.z/=N;
//writeforces(traj,f_h,Naa);
//writexyz(traj,r_h,Naa);
writexyz(traj,r_h,Naa,ntraj);
}
if (t==StartingStateEquilSteps) {
bd_h.hoz=.1*h/zeta;
bd_h.Gamma=sqrt(2*(bd_h.hoz)*(bd_h.kT));
cudaMemcpyToSymbol(bd_c, &bd_h, sizeof(BrDynPar), 0, cudaMemcpyHostToDevice);
}
if (t==StartingStateEquilSteps+SwitchingSteps) {
bd_h.hoz=h/zeta;
bd_h.Gamma=sqrt(2*(bd_h.hoz)*(bd_h.kT));
cudaMemcpyToSymbol(bd_c, &bd_h, sizeof(BrDynPar), 0, cudaMemcpyHostToDevice);
}
if (t>StartingStateEquilSteps+SwitchingSteps) {
for (int tongpu=0; tongpu<stride; tongpu++) {
force_flush<<<BLOCKS,THREADS>>>(f_d,N);
checkCUDAError("Force flush");
FENEForce<<<BLOCKS,THREADS>>>(r_d,f_d,bondlist);
checkCUDAError("FENE");
SoftSphereForce<<<BLOCKS,THREADS>>>(r_d,f_d,nl,sig_d);
checkCUDAError("SoftSphere");
NativeSubtractSoftSphereForce<<<BLOCKS,THREADS>>>(r_d,f_d,nclist2,sig_d);
checkCUDAError("Native subtract Soft Sphere 2");
NativeForce<<<BLOCKS,THREADS>>>(r_d,f_d,nclist2);
checkCUDAError("Native 2");
DebyeHuckelForce<<<BLOCKS,THREADS>>>(r_d,f_d,SaltBridgeList);
checkCUDAError("DebyeHuckel");
integrate<<<BLOCKS,THREADS>>>(r_d,f_d,N,RNGStates_d);
checkCUDAError("Integrate");
}
} else if (t>StartingStateEquilSteps) {
for (int tongpu=0; tongpu<stride; tongpu++) {
force_flush<<<BLOCKS,THREADS>>>(f_d,N);
checkCUDAError("Force flush");
FENEForce<<<BLOCKS,THREADS>>>(r_d,f_d,bondlist);
checkCUDAError("FENE");
SoftSphereForce<<<BLOCKS,THREADS>>>(r_d,f_d,nl,sig_d);
checkCUDAError("SoftSphere");
NativeSubtractSoftSphereForce<<<BLOCKS,THREADS>>>(r_d,f_d,nclist2,sig_d,Delta);
checkCUDAError("Native subtract Soft Sphere 2");
NativeForce<<<BLOCKS,THREADS>>>(r_d,f_d,nclist2,Delta);
checkCUDAError("Native 2");
NativeSubtractSoftSphereForce<<<BLOCKS,THREADS>>>(r_d,f_d,nclist,sig_d,1.-Delta);
checkCUDAError("Native subtract Soft Sphere");
NativeForce<<<BLOCKS,THREADS>>>(r_d,f_d,nclist,1.-Delta);
checkCUDAError("Native");
DebyeHuckelForce<<<BLOCKS,THREADS>>>(r_d,f_d,SaltBridgeList);
checkCUDAError("DebyeHuckel");
integrate<<<BLOCKS,THREADS>>>(r_d,f_d,N,RNGStates_d);
checkCUDAError("Integrate");
}
} else {
for (int tongpu=0; tongpu<stride; tongpu++) {
force_flush<<<BLOCKS,THREADS>>>(f_d,N);
checkCUDAError("Force flush");
FENEForce<<<BLOCKS,THREADS>>>(r_d,f_d,bondlist);
checkCUDAError("FENE");
SoftSphereForce<<<BLOCKS,THREADS>>>(r_d,f_d,nl,sig_d);
checkCUDAError("SoftSphere");
NativeSubtractSoftSphereForce<<<BLOCKS,THREADS>>>(r_d,f_d,nclist,sig_d);
checkCUDAError("Native subtract Soft Sphere");
NativeForce<<<BLOCKS,THREADS>>>(r_d,f_d,nclist);
checkCUDAError("Native");
DebyeHuckelForce<<<BLOCKS,THREADS>>>(r_d,f_d,SaltBridgeList);
checkCUDAError("DebyeHuckel");
integrate<<<BLOCKS,THREADS>>>(r_d,f_d,N,RNGStates_d);
checkCUDAError("Integrate");
}
}*/
}
cudaFree(r_d);
cudaFree(f_d);
nclist.FreeOnDevice("native contacts");
bondlist.FreeOnDevice("bonds");
SaltBridgeList.FreeOnDevice("salt bridges");
nl.FreeOnDevice("neighbor list");
//nlo.FreeOnDevice("outer neighbor list");
cudaDeviceReset();
}
|
335e8ec142764d9b97a2f01e007a83442cac24bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zsymmetrize_tiles.cu normal z -> c, Wed Sep 17 15:08:23 2014
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/*
Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix.
Grid is ntile x ceil(m/NB).
Each tile is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
csymmetrize_tiles_lower( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.x*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.y*NB + threadIdx.x;
magmaFloatComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaFloatComplex *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = cuConjf(*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
csymmetrize_tiles_upper( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.x*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.y*NB + threadIdx.x;
magmaFloatComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaFloatComplex *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = cuConjf(*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/**
Purpose
-------
CSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa,
to make some blocks of dA into general representations of a symmetric block.
This processes NTILE blocks, typically the diagonal blocks.
Each block is offset by mstride rows and nstride columns from the previous block.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows & columns of each square block of dA. M >= 0.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The matrix dA. N = m + nstride*(ntile-1).
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)).
@param[in]
ntile INTEGER
Number of blocks to symmetrize. ntile >= 0.
@param[in]
mstride INTEGER
Row offset from start of one block to start of next block. mstride >= 0.
Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles
from overlapping.
@param[in]
nstride INTEGER
Column offset from start of one block to start of next block. nstride >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_csymmetrize_tiles_q(
magma_uplo_t uplo, magma_int_t m, magmaFloatComplex *dA, magma_int_t ldda,
magma_int_t ntile, magma_int_t mstride, magma_int_t nstride,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m + mstride*(ntile-1)) )
info = -5;
else if ( ntile < 0 )
info = -6;
else if ( mstride < 0 )
info = -7;
else if ( nstride < 0 )
info = -8;
else if ( mstride < m && nstride < m ) // only one must be >= m.
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || ntile == 0 )
return;
dim3 threads( NB );
dim3 grid( ntile, (m + NB - 1)/NB );
//printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x );
if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( csymmetrize_tiles_upper), dim3(grid), dim3(threads), 0, queue , m, dA, ldda, mstride, nstride );
}
else {
hipLaunchKernelGGL(( csymmetrize_tiles_lower), dim3(grid), dim3(threads), 0, queue , m, dA, ldda, mstride, nstride );
}
}
/**
@see magmablas_csymmetrize_tiles_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_csymmetrize_tiles(
magma_uplo_t uplo, magma_int_t m, magmaFloatComplex *dA, magma_int_t ldda,
magma_int_t ntile, magma_int_t mstride, magma_int_t nstride )
{
magmablas_csymmetrize_tiles_q( uplo, m, dA, ldda, ntile, mstride, nstride, magma_stream );
}
| 335e8ec142764d9b97a2f01e007a83442cac24bd.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zsymmetrize_tiles.cu normal z -> c, Wed Sep 17 15:08:23 2014
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/*
Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix.
Grid is ntile x ceil(m/NB).
Each tile is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
csymmetrize_tiles_lower( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.x*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.y*NB + threadIdx.x;
magmaFloatComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaFloatComplex *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = cuConjf(*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
csymmetrize_tiles_upper( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.x*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.y*NB + threadIdx.x;
magmaFloatComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaFloatComplex *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = cuConjf(*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/**
Purpose
-------
CSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa,
to make some blocks of dA into general representations of a symmetric block.
This processes NTILE blocks, typically the diagonal blocks.
Each block is offset by mstride rows and nstride columns from the previous block.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows & columns of each square block of dA. M >= 0.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The matrix dA. N = m + nstride*(ntile-1).
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)).
@param[in]
ntile INTEGER
Number of blocks to symmetrize. ntile >= 0.
@param[in]
mstride INTEGER
Row offset from start of one block to start of next block. mstride >= 0.
Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles
from overlapping.
@param[in]
nstride INTEGER
Column offset from start of one block to start of next block. nstride >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_csymmetrize_tiles_q(
magma_uplo_t uplo, magma_int_t m, magmaFloatComplex *dA, magma_int_t ldda,
magma_int_t ntile, magma_int_t mstride, magma_int_t nstride,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m + mstride*(ntile-1)) )
info = -5;
else if ( ntile < 0 )
info = -6;
else if ( mstride < 0 )
info = -7;
else if ( nstride < 0 )
info = -8;
else if ( mstride < m && nstride < m ) // only one must be >= m.
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || ntile == 0 )
return;
dim3 threads( NB );
dim3 grid( ntile, (m + NB - 1)/NB );
//printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x );
if ( uplo == MagmaUpper ) {
csymmetrize_tiles_upper<<< grid, threads, 0, queue >>>( m, dA, ldda, mstride, nstride );
}
else {
csymmetrize_tiles_lower<<< grid, threads, 0, queue >>>( m, dA, ldda, mstride, nstride );
}
}
/**
@see magmablas_csymmetrize_tiles_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_csymmetrize_tiles(
magma_uplo_t uplo, magma_int_t m, magmaFloatComplex *dA, magma_int_t ldda,
magma_int_t ntile, magma_int_t mstride, magma_int_t nstride )
{
magmablas_csymmetrize_tiles_q( uplo, m, dA, ldda, ntile, mstride, nstride, magma_stream );
}
|
ad83a3a511d0eba2d69de4f6a57cc8238cf7438d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh>
#include <ATen/native/sparse/hip/SparseHIPBlas.cuh>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <THH/THHTensorMathPointwise.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <bitset>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
IntTensor _to_csr_int(const LongTensor& rowIndices, int64_t dim, int64_t nnz) {
IntTensor csr = at::empty({dim+1}, CUDA(kInt));
IntTensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data<int32_t>(), nnz, dim, csr.data<int32_t>());
return csr;
}
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensorRef, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, Scalar beta, Scalar alpha) {
AT_ASSERT(t.is_cuda()); // dispatch argument
AT_CHECK(r_.is_cuda(), "addmm: expected 'out' to be CUDA, but got CPU");
AT_CHECK(sparse_.is_cuda(), "addmm: expected 'mat1' to be CUDA, but got CPU");
AT_CHECK(dense.is_cuda(), "addmm: expected 'mat2' to be CUDA, but got CPU");
AT_CHECK(cuda::check_device({sparse_, r_, t, dense}));
AT_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
AT_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " spase dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
AT_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
AT_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
AT_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
LongTensor rowIndices = indices.select(0, 0);
LongTensor colIndices = indices.select(0, 1);
IntTensor csr = _to_csr_int(rowIndices, m, nnz);
IntTensor colIndicesInt = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
colIndicesInt.copy_(colIndices);
// No half support, so we don't have to use CUDATypeConversion
Tensor r__;
AT_DISPATCH_FLOATING_TYPES(
values.scalar_type(), "addmm_sparse_cuda", [&] {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
if (cast_beta == 0) {
r_.zero_();
} else if (cast_beta == 1) {
if (!is_same_tensor(t, r_)) {
r_.copy_(t);
}
} else {
at::mul_out(r_, t, scalar_to_tensor(beta));
}
/* r_ */
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// TODO: how... strange
r__ = r_.transpose(0, 1).clone();
r__.transpose_(0, 1);
}
if (nnz > 0) {
/* dense */
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) != dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data<scalar_t>(),
csr.data<int32_t>(),
colIndicesInt.data<int32_t>(),
dense_.data<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data<scalar_t>(),
r__.stride(1));
}
});
r_.copy_(r__);
return r_;
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(SparseTensor& r_, const SparseTensor& sparse_, const Tensor& dense/* , Scalar alpha */) {
AT_ASSERT(sparse_.is_cuda()); // dispatch argument
AT_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
AT_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
AT_CHECK(cuda::check_device({r_, sparse_, dense}));
AT_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
AT_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
AT_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
AT_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
LongTensor spIndices = newSparse._indices();
LongTensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(r, sparse, dense);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensorRef, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, SparseTensorRef sparse_, at::Scalar value) {
const SparseTensor& sparse = sparse_.tref;
AT_ASSERT(dense.is_cuda()); // dispatch argument
AT_CHECK(sparse.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
AT_CHECK(cuda::check_device({sparse, r_, dense}));
AT_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
Tensor r = r_;
if (!is_same_tensor(r, dense)) {
r_.resize_as_(dense);
r_.copy_(dense);
} else {
AT_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
r = r_.contiguous();
}
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (sparse._values().numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
if (sparse.dense_dim() == 0) {
AT_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, values.scalar_type(), "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernelScalar<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r_), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
} else {
AT_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, values.scalar_type(), "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r_), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
}
} else {
LongTensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
// FIXME: at some point we can wrap the scale into indexAdd
// NB: Purposely not inplace!
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, values.scalar_type(), "add_out_dense_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
values = values.mul(value);
}
});
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values);
}
THCudaCheck(hipGetLastError());
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& add_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t, const SparseTensor& src, Scalar value) {
AT_ASSERT(t.is_cuda()); // dispatch argument
AT_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
AT_CHECK(cuda::check_device({r_, t, src}));
AT_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
AT_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
LongTensor t_indices_ = t._indices();
Tensor t_values_ = t._values();
LongTensor s_indices_ = src._indices();
Tensor s_values_ = src._values();
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, s_values_.scalar_type(), "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
s_values_ = s_values_.mul(value);
}
});
LongTensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
r_.resize_as_(src);
alias_into_sparse(r_, r_indices_, r_values_);
// FIXME: add some heuristic about when to call coalesce() here, so that
// tensors don't totally blow up in size by concatenation; e.g.
// r->minUnique = max(a->minUnique + b->minUnique);
// if (r->nnz / r->minUnique > COMPACTION_THRESHOLD) {
// THCSTensor_(contiguous)(r);
// r->minUnique = r->nnz;
// }
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& mul_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t_, const SparseTensor& src_) {
if (src_.dim() == 0) {
return mul_out_sparse_zerodim(r_, t_, src_);
} else if (t_.dim() == 0) {
return mul_out_sparse_zerodim(r_, src_, t_);
}
AT_ASSERT(t_.is_cuda()); // dispatch argument
AT_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
AT_CHECK(cuda::check_device({r_, t_, src_}));
AT_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
r_.resize_as_(src_);
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = ::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparse_dim = src.sparse_dim();
LongTensor t_indices_ = t._indices();
Tensor t_values_ = t._values();
LongTensor s_indices_ = src._indices();
Tensor s_values_ = src._values();
LongTensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options());
Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_();
r_.resize_as_(src);
get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_);
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
AT_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
LongTensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, t_values_.scalar_type(), "mul_out_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::valueSparseIntersectionKernel<TensorMulOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
THCudaCheck(hipGetLastError());
hipLaunchKernelGGL(( apply::indexSparseIntersectionKernel<uint64_t, scalar_t>)
, dim3(1), dim3(1), 0, stream,
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
THCudaCheck(hipGetLastError());
});
// sync! (surely there is a more idiomatic way to do this...)
LongTensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]);
return r_._coalesced_(true);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti
) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
AT_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
AT_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
LongTensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
AT_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone();
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
AT_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
LongTensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone();
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options());
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data<int64_t>());
// store lower_bound of input indices at grad indices
LongTensor input_indices_pos = at::empty_like(input_indices_1D);
thrust_ptr input_indices_pos_iter(input_indices_pos.data<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
AT_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
hipLaunchKernelGGL(( _sparse_sum_backward_cuda_kernel<scalar_t>), dim3(grid), dim3(block), 0, stream,
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(), grad_input_values, grad.options());
}
}
}} // namespace at::native
| ad83a3a511d0eba2d69de4f6a57cc8238cf7438d.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh>
#include <ATen/native/sparse/cuda/SparseCUDABlas.cuh>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <THC/THCTensorMathPointwise.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <bitset>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
IntTensor _to_csr_int(const LongTensor& rowIndices, int64_t dim, int64_t nnz) {
IntTensor csr = at::empty({dim+1}, CUDA(kInt));
IntTensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data<int32_t>(), nnz, dim, csr.data<int32_t>());
return csr;
}
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensorRef, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, Scalar beta, Scalar alpha) {
AT_ASSERT(t.is_cuda()); // dispatch argument
AT_CHECK(r_.is_cuda(), "addmm: expected 'out' to be CUDA, but got CPU");
AT_CHECK(sparse_.is_cuda(), "addmm: expected 'mat1' to be CUDA, but got CPU");
AT_CHECK(dense.is_cuda(), "addmm: expected 'mat2' to be CUDA, but got CPU");
AT_CHECK(cuda::check_device({sparse_, r_, t, dense}));
AT_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
AT_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " spase dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
AT_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
AT_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
AT_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
LongTensor rowIndices = indices.select(0, 0);
LongTensor colIndices = indices.select(0, 1);
IntTensor csr = _to_csr_int(rowIndices, m, nnz);
IntTensor colIndicesInt = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
colIndicesInt.copy_(colIndices);
// No half support, so we don't have to use CUDATypeConversion
Tensor r__;
AT_DISPATCH_FLOATING_TYPES(
values.scalar_type(), "addmm_sparse_cuda", [&] {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
if (cast_beta == 0) {
r_.zero_();
} else if (cast_beta == 1) {
if (!is_same_tensor(t, r_)) {
r_.copy_(t);
}
} else {
at::mul_out(r_, t, scalar_to_tensor(beta));
}
/* r_ */
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// TODO: how... strange
r__ = r_.transpose(0, 1).clone();
r__.transpose_(0, 1);
}
if (nnz > 0) {
/* dense */
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) != dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data<scalar_t>(),
csr.data<int32_t>(),
colIndicesInt.data<int32_t>(),
dense_.data<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data<scalar_t>(),
r__.stride(1));
}
});
r_.copy_(r__);
return r_;
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(SparseTensor& r_, const SparseTensor& sparse_, const Tensor& dense/* , Scalar alpha */) {
AT_ASSERT(sparse_.is_cuda()); // dispatch argument
AT_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
AT_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
AT_CHECK(cuda::check_device({r_, sparse_, dense}));
AT_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
AT_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
AT_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
AT_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
LongTensor spIndices = newSparse._indices();
LongTensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(r, sparse, dense);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensorRef, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, SparseTensorRef sparse_, at::Scalar value) {
const SparseTensor& sparse = sparse_.tref;
AT_ASSERT(dense.is_cuda()); // dispatch argument
AT_CHECK(sparse.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
AT_CHECK(cuda::check_device({sparse, r_, dense}));
AT_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
Tensor r = r_;
if (!is_same_tensor(r, dense)) {
r_.resize_as_(dense);
r_.copy_(dense);
} else {
AT_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
r = r_.contiguous();
}
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (sparse._values().numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
if (sparse.dense_dim() == 0) {
AT_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, values.scalar_type(), "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernelScalar<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r_), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
} else {
AT_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, values.scalar_type(), "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r_), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
}
} else {
LongTensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
// FIXME: at some point we can wrap the scale into indexAdd
// NB: Purposely not inplace!
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, values.scalar_type(), "add_out_dense_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
values = values.mul(value);
}
});
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values);
}
THCudaCheck(cudaGetLastError());
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& add_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t, const SparseTensor& src, Scalar value) {
AT_ASSERT(t.is_cuda()); // dispatch argument
AT_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
AT_CHECK(cuda::check_device({r_, t, src}));
AT_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
AT_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
LongTensor t_indices_ = t._indices();
Tensor t_values_ = t._values();
LongTensor s_indices_ = src._indices();
Tensor s_values_ = src._values();
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, s_values_.scalar_type(), "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
s_values_ = s_values_.mul(value);
}
});
LongTensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
r_.resize_as_(src);
alias_into_sparse(r_, r_indices_, r_values_);
// FIXME: add some heuristic about when to call coalesce() here, so that
// tensors don't totally blow up in size by concatenation; e.g.
// r->minUnique = max(a->minUnique + b->minUnique);
// if (r->nnz / r->minUnique > COMPACTION_THRESHOLD) {
// THCSTensor_(contiguous)(r);
// r->minUnique = r->nnz;
// }
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& mul_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t_, const SparseTensor& src_) {
if (src_.dim() == 0) {
return mul_out_sparse_zerodim(r_, t_, src_);
} else if (t_.dim() == 0) {
return mul_out_sparse_zerodim(r_, src_, t_);
}
AT_ASSERT(t_.is_cuda()); // dispatch argument
AT_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
AT_CHECK(cuda::check_device({r_, t_, src_}));
AT_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
r_.resize_as_(src_);
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = std::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparse_dim = src.sparse_dim();
LongTensor t_indices_ = t._indices();
Tensor t_values_ = t._values();
LongTensor s_indices_ = src._indices();
Tensor s_values_ = src._values();
LongTensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options());
Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_();
r_.resize_as_(src);
get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_);
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
AT_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
LongTensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, t_values_.scalar_type(), "mul_out_sparse_cuda", [&] {
apply::valueSparseIntersectionKernel<TensorMulOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
THCudaCheck(cudaGetLastError());
apply::indexSparseIntersectionKernel<uint64_t, scalar_t>
<<<1, 1, 0, stream>>>(
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
THCudaCheck(cudaGetLastError());
});
// sync! (surely there is a more idiomatic way to do this...)
LongTensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]);
return r_._coalesced_(true);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti
) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
AT_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
AT_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
LongTensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
AT_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone();
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
AT_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
LongTensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone();
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options());
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data<int64_t>());
// store lower_bound of input indices at grad indices
LongTensor input_indices_pos = at::empty_like(input_indices_1D);
thrust_ptr input_indices_pos_iter(input_indices_pos.data<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
AT_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
_sparse_sum_backward_cuda_kernel<scalar_t><<<grid, block, 0, stream>>>(
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(), grad_input_values, grad.options());
}
}
}} // namespace at::native
|
4d56258c9231ad47b0825c4fd5904e5ada24b8ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <chrono>
#include <cmath>
#include <cstring>
#include <hip/hip_runtime.h>
#include <fstream>
#include <iostream>
#include <random>
#include <vector>
// =================
// Helper Functions
// =================
// I/O routines
void save(std::ofstream& fsave, particle_t* parts, int num_parts, double size) {
static bool first = true;
if (first) {
fsave << num_parts << " " << size << std::endl;
first = false;
}
for (int i = 0; i < num_parts; ++i) {
fsave << parts[i].x << " " << parts[i].y << std::endl;
}
fsave << std::endl;
}
// Particle Initialization
void init_particles(particle_t* parts, int num_parts, double size, int part_seed) {
std::random_device rd;
std::mt19937 gen(part_seed ? part_seed : rd());
int sx = (int)ceil(sqrt((double)num_parts));
int sy = (num_parts + sx - 1) / sx;
std::vector<int> shuffle(num_parts);
for (int i = 0; i < shuffle.size(); ++i) {
shuffle[i] = i;
}
for (int i = 0; i < num_parts; ++i) {
// Make sure particles are not spatially sorted
std::uniform_int_distribution<int> rand_int(0, num_parts - i - 1);
int j = rand_int(gen);
int k = shuffle[j];
shuffle[j] = shuffle[num_parts - i - 1];
// Distribute particles evenly to ensure proper spacing
parts[i].x = size * (1. + (k % sx)) / (1 + sx);
parts[i].y = size * (1. + (k / sx)) / (1 + sy);
// Assign random velocities within a bound
std::uniform_real_distribution<float> rand_real(-1.0, 1.0);
parts[i].vx = rand_real(gen);
parts[i].vy = rand_real(gen);
}
}
// Command Line Option Processing
int find_arg_idx(int argc, char** argv, const char* option) {
for (int i = 1; i < argc; ++i) {
if (strcmp(argv[i], option) == 0) {
return i;
}
}
return -1;
}
int find_int_arg(int argc, char** argv, const char* option, int default_value) {
int iplace = find_arg_idx(argc, argv, option);
if (iplace >= 0 && iplace < argc - 1) {
return std::stoi(argv[iplace + 1]);
}
return default_value;
}
char* find_string_option(int argc, char** argv, const char* option, char* default_value) {
int iplace = find_arg_idx(argc, argv, option);
if (iplace >= 0 && iplace < argc - 1) {
return argv[iplace + 1];
}
return default_value;
}
// ==============
// Main Function
// ==============
int main(int argc, char** argv) {
// Parse Args
if (find_arg_idx(argc, argv, "-h") >= 0) {
std::cout << "Options:" << std::endl;
std::cout << "-h: see this help" << std::endl;
std::cout << "-n <int>: set number of particles" << std::endl;
std::cout << "-o <filename>: set the output file name" << std::endl;
std::cout << "-s <int>: set particle initialization seed" << std::endl;
return 0;
}
// Open Output File
char* savename = find_string_option(argc, argv, "-o", nullptr);
std::ofstream fsave(savename);
// Initialize Particles
int num_parts = find_int_arg(argc, argv, "-n", 1000);
int part_seed = find_int_arg(argc, argv, "-s", 0);
double size = sqrt(density * num_parts);
particle_t* parts = new particle_t[num_parts];
init_particles(parts, num_parts, size, part_seed);
particle_t* parts_gpu;
hipMalloc((void**)&parts_gpu, num_parts * sizeof(particle_t));
hipMemcpy(parts_gpu, parts, num_parts * sizeof(particle_t), hipMemcpyHostToDevice);
// Algorithm
auto start_time = std::chrono::steady_clock::now();
init_simulation(parts_gpu, num_parts, size);
for (int step = 0; step < nsteps; ++step) {
simulate_one_step(parts_gpu, num_parts, size);
hipDeviceSynchronize();
// Save state if necessary
if (fsave.good() && (step % savefreq) == 0) {
hipMemcpy(parts, parts_gpu, num_parts * sizeof(particle_t), hipMemcpyDeviceToHost);
save(fsave, parts, num_parts, size);
}
}
hipDeviceSynchronize();
auto end_time = std::chrono::steady_clock::now();
std::chrono::duration<double> diff = end_time - start_time;
double seconds = diff.count();
// Finalize
std::cout << "Simulation Time = " << seconds << " seconds for " << num_parts << " particles.\n";
fsave.close();
hipFree(parts_gpu);
delete[] parts;
}
| 4d56258c9231ad47b0825c4fd5904e5ada24b8ed.cu | #include "common.h"
#include <chrono>
#include <cmath>
#include <cstring>
#include <cuda.h>
#include <fstream>
#include <iostream>
#include <random>
#include <vector>
// =================
// Helper Functions
// =================
// I/O routines
void save(std::ofstream& fsave, particle_t* parts, int num_parts, double size) {
static bool first = true;
if (first) {
fsave << num_parts << " " << size << std::endl;
first = false;
}
for (int i = 0; i < num_parts; ++i) {
fsave << parts[i].x << " " << parts[i].y << std::endl;
}
fsave << std::endl;
}
// Particle Initialization
void init_particles(particle_t* parts, int num_parts, double size, int part_seed) {
std::random_device rd;
std::mt19937 gen(part_seed ? part_seed : rd());
int sx = (int)ceil(sqrt((double)num_parts));
int sy = (num_parts + sx - 1) / sx;
std::vector<int> shuffle(num_parts);
for (int i = 0; i < shuffle.size(); ++i) {
shuffle[i] = i;
}
for (int i = 0; i < num_parts; ++i) {
// Make sure particles are not spatially sorted
std::uniform_int_distribution<int> rand_int(0, num_parts - i - 1);
int j = rand_int(gen);
int k = shuffle[j];
shuffle[j] = shuffle[num_parts - i - 1];
// Distribute particles evenly to ensure proper spacing
parts[i].x = size * (1. + (k % sx)) / (1 + sx);
parts[i].y = size * (1. + (k / sx)) / (1 + sy);
// Assign random velocities within a bound
std::uniform_real_distribution<float> rand_real(-1.0, 1.0);
parts[i].vx = rand_real(gen);
parts[i].vy = rand_real(gen);
}
}
// Command Line Option Processing
int find_arg_idx(int argc, char** argv, const char* option) {
for (int i = 1; i < argc; ++i) {
if (strcmp(argv[i], option) == 0) {
return i;
}
}
return -1;
}
int find_int_arg(int argc, char** argv, const char* option, int default_value) {
int iplace = find_arg_idx(argc, argv, option);
if (iplace >= 0 && iplace < argc - 1) {
return std::stoi(argv[iplace + 1]);
}
return default_value;
}
char* find_string_option(int argc, char** argv, const char* option, char* default_value) {
int iplace = find_arg_idx(argc, argv, option);
if (iplace >= 0 && iplace < argc - 1) {
return argv[iplace + 1];
}
return default_value;
}
// ==============
// Main Function
// ==============
int main(int argc, char** argv) {
// Parse Args
if (find_arg_idx(argc, argv, "-h") >= 0) {
std::cout << "Options:" << std::endl;
std::cout << "-h: see this help" << std::endl;
std::cout << "-n <int>: set number of particles" << std::endl;
std::cout << "-o <filename>: set the output file name" << std::endl;
std::cout << "-s <int>: set particle initialization seed" << std::endl;
return 0;
}
// Open Output File
char* savename = find_string_option(argc, argv, "-o", nullptr);
std::ofstream fsave(savename);
// Initialize Particles
int num_parts = find_int_arg(argc, argv, "-n", 1000);
int part_seed = find_int_arg(argc, argv, "-s", 0);
double size = sqrt(density * num_parts);
particle_t* parts = new particle_t[num_parts];
init_particles(parts, num_parts, size, part_seed);
particle_t* parts_gpu;
cudaMalloc((void**)&parts_gpu, num_parts * sizeof(particle_t));
cudaMemcpy(parts_gpu, parts, num_parts * sizeof(particle_t), cudaMemcpyHostToDevice);
// Algorithm
auto start_time = std::chrono::steady_clock::now();
init_simulation(parts_gpu, num_parts, size);
for (int step = 0; step < nsteps; ++step) {
simulate_one_step(parts_gpu, num_parts, size);
cudaDeviceSynchronize();
// Save state if necessary
if (fsave.good() && (step % savefreq) == 0) {
cudaMemcpy(parts, parts_gpu, num_parts * sizeof(particle_t), cudaMemcpyDeviceToHost);
save(fsave, parts, num_parts, size);
}
}
cudaDeviceSynchronize();
auto end_time = std::chrono::steady_clock::now();
std::chrono::duration<double> diff = end_time - start_time;
double seconds = diff.count();
// Finalize
std::cout << "Simulation Time = " << seconds << " seconds for " << num_parts << " particles.\n";
fsave.close();
cudaFree(parts_gpu);
delete[] parts;
}
|
f6a0de9c120957ec30a03cfad55997202c257d6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "FastStitchingPluginKernel.h"
#include "defs.h"
///////////////////////////////////////////////////////////////////////////////
// Common
///////////////////////////////////////////////////////////////////////////////
extern "C"
void
CUDARangeToWorld(float4* duplicate, const hipArray *InputImageArray)
{
// Set input image texture parameters and bind texture to the array. Texture is defined in the kernel
InputImageTexture.addressMode[0] = hipAddressModeClamp;
InputImageTexture.addressMode[1] = hipAddressModeClamp;
InputImageTexture.filterMode = hipFilterModePoint;
InputImageTexture.normalized = false;
cutilSafeCall(hipBindTextureToArray(InputImageTexture, InputImageArray));
// Kernel Invocation
dim3 DimBlock(16, 16);
dim3 DimGrid(DivUp(FRAME_SIZE_X, DimBlock.x), DivUp(FRAME_SIZE_Y, DimBlock.y));
hipLaunchKernelGGL(( CUDARangeToWorldKernel<16,16>), dim3(DimGrid),dim3(DimBlock), 0, 0, duplicate);
// Unbind texture
cutilSafeCall(hipUnbindTexture(InputImageTexture));
CUT_CHECK_ERROR("Kernel execution failed");
}
extern "C"
void CUDATransformPoints(double transformationMatrix[4][4], float4* toBeTransformed, int numPoints, float* distances)
{
//printf("CUDATransformPoints\n");
// allocate memory for transformation matrix (will be stored linearly) and copy it
float tmp[16];
tmp[0] = (float)transformationMatrix[0][0];
tmp[1] = (float)transformationMatrix[0][1];
tmp[2] = (float)transformationMatrix[0][2];
tmp[3] = (float)transformationMatrix[0][3];
tmp[4] = (float)transformationMatrix[1][0];
tmp[5] = (float)transformationMatrix[1][1];
tmp[6] = (float)transformationMatrix[1][2];
tmp[7] = (float)transformationMatrix[1][3];
tmp[8] = (float)transformationMatrix[2][0];
tmp[9] = (float)transformationMatrix[2][1];
tmp[10] = (float)transformationMatrix[2][2];
tmp[11] = (float)transformationMatrix[2][3];
tmp[12] = (float)transformationMatrix[3][0];
tmp[13] = (float)transformationMatrix[3][1];
tmp[14] = (float)transformationMatrix[3][2];
tmp[15] = (float)transformationMatrix[3][3];
CUDA_SAFE_CALL(hipMemcpyToSymbol(dev_transformationMatrix, tmp, 16*sizeof(float), 0));
hipLaunchKernelGGL(( kernelTransformPointsAndComputeDistance), dim3(DivUp(numPoints, CUDA_THREADS_PER_BLOCK)), dim3(CUDA_THREADS_PER_BLOCK), 0, 0, toBeTransformed, distances, numPoints);
CUT_CHECK_ERROR("Kernel execution failed (while transforming points)");
}
extern "C"
void CUDAExtractLandmarks(int numLandmarks, float4* devWCsIn, uchar3* devColorsIn, unsigned int* devIndicesIn, float4* devLandmarksOut, float4* devColorsOut)
{
//printf("CUDAExtractLandmarks\n");
hipLaunchKernelGGL(( kernelExtractLandmarks), dim3(DivUp(numLandmarks, CUDA_THREADS_PER_BLOCK)), dim3(CUDA_THREADS_PER_BLOCK), 0, 0, numLandmarks, devWCsIn, devColorsIn, devIndicesIn, devLandmarksOut, devColorsOut);
hipDeviceSynchronize();
CUT_CHECK_ERROR("Kernel execution failed (while trying to find closest points)");
}
///////////////////////////////////////////////////////////////////////////////
// Random Ball Cover
///////////////////////////////////////////////////////////////////////////////
extern "C"
void initGPURBC(int nrOfReps, RepGPU* repsGPU, float weightRBC)
{
CUDA_SAFE_CALL(hipMemcpyToSymbol(dev_repsGPU, repsGPU, nrOfReps*sizeof(RepGPU), 0));
CUDA_SAFE_CALL(hipMemcpyToSymbol(devWeightRGB, &weightRBC, 1*sizeof(float), 0));
}
extern "C"
void CUDAFindClosestPointsRBC(int nrOfPoints, int nrOfReps, unsigned int* indices, float* distances, float4* targetCoords, float4* targetColors, float4* sourceCoords, float4* sourceColors)
{
// find the closest point for each pixel
hipLaunchKernelGGL(( kernelRBC), dim3(DivUp(nrOfPoints, CUDA_THREADS_PER_BLOCK)), dim3(CUDA_THREADS_PER_BLOCK), 0, 0, nrOfReps, indices, distances, targetCoords, targetColors, sourceCoords, sourceColors);
hipDeviceSynchronize();
CUT_CHECK_ERROR("Kernel execution failed (while trying to find closest points)");
}
extern "C"
void CUDAPointsToReps(int nrOfPoints, int nrOfReps, float4* devTargetCoords, float4* devTargetColors, unsigned int* devRepIndices, unsigned int* devPointToRep)
{
hipLaunchKernelGGL(( kernelPointsToReps), dim3(DivUp(nrOfPoints, CUDA_THREADS_PER_BLOCK)), dim3(CUDA_THREADS_PER_BLOCK), 0, 0, nrOfReps, devTargetCoords, devTargetColors, devRepIndices, devPointToRep);
} | f6a0de9c120957ec30a03cfad55997202c257d6a.cu | #include "FastStitchingPluginKernel.h"
#include "defs.h"
///////////////////////////////////////////////////////////////////////////////
// Common
///////////////////////////////////////////////////////////////////////////////
extern "C"
void
CUDARangeToWorld(float4* duplicate, const cudaArray *InputImageArray)
{
// Set input image texture parameters and bind texture to the array. Texture is defined in the kernel
InputImageTexture.addressMode[0] = cudaAddressModeClamp;
InputImageTexture.addressMode[1] = cudaAddressModeClamp;
InputImageTexture.filterMode = cudaFilterModePoint;
InputImageTexture.normalized = false;
cutilSafeCall(cudaBindTextureToArray(InputImageTexture, InputImageArray));
// Kernel Invocation
dim3 DimBlock(16, 16);
dim3 DimGrid(DivUp(FRAME_SIZE_X, DimBlock.x), DivUp(FRAME_SIZE_Y, DimBlock.y));
CUDARangeToWorldKernel<16,16><<<DimGrid,DimBlock>>>(duplicate);
// Unbind texture
cutilSafeCall(cudaUnbindTexture(InputImageTexture));
CUT_CHECK_ERROR("Kernel execution failed");
}
extern "C"
void CUDATransformPoints(double transformationMatrix[4][4], float4* toBeTransformed, int numPoints, float* distances)
{
//printf("CUDATransformPoints\n");
// allocate memory for transformation matrix (will be stored linearly) and copy it
float tmp[16];
tmp[0] = (float)transformationMatrix[0][0];
tmp[1] = (float)transformationMatrix[0][1];
tmp[2] = (float)transformationMatrix[0][2];
tmp[3] = (float)transformationMatrix[0][3];
tmp[4] = (float)transformationMatrix[1][0];
tmp[5] = (float)transformationMatrix[1][1];
tmp[6] = (float)transformationMatrix[1][2];
tmp[7] = (float)transformationMatrix[1][3];
tmp[8] = (float)transformationMatrix[2][0];
tmp[9] = (float)transformationMatrix[2][1];
tmp[10] = (float)transformationMatrix[2][2];
tmp[11] = (float)transformationMatrix[2][3];
tmp[12] = (float)transformationMatrix[3][0];
tmp[13] = (float)transformationMatrix[3][1];
tmp[14] = (float)transformationMatrix[3][2];
tmp[15] = (float)transformationMatrix[3][3];
CUDA_SAFE_CALL(cudaMemcpyToSymbol(dev_transformationMatrix, tmp, 16*sizeof(float), 0));
kernelTransformPointsAndComputeDistance<<<DivUp(numPoints, CUDA_THREADS_PER_BLOCK), CUDA_THREADS_PER_BLOCK>>>(toBeTransformed, distances, numPoints);
CUT_CHECK_ERROR("Kernel execution failed (while transforming points)");
}
extern "C"
void CUDAExtractLandmarks(int numLandmarks, float4* devWCsIn, uchar3* devColorsIn, unsigned int* devIndicesIn, float4* devLandmarksOut, float4* devColorsOut)
{
//printf("CUDAExtractLandmarks\n");
kernelExtractLandmarks<<<DivUp(numLandmarks, CUDA_THREADS_PER_BLOCK), CUDA_THREADS_PER_BLOCK>>>(numLandmarks, devWCsIn, devColorsIn, devIndicesIn, devLandmarksOut, devColorsOut);
cudaThreadSynchronize();
CUT_CHECK_ERROR("Kernel execution failed (while trying to find closest points)");
}
///////////////////////////////////////////////////////////////////////////////
// Random Ball Cover
///////////////////////////////////////////////////////////////////////////////
extern "C"
void initGPURBC(int nrOfReps, RepGPU* repsGPU, float weightRBC)
{
CUDA_SAFE_CALL(cudaMemcpyToSymbol(dev_repsGPU, repsGPU, nrOfReps*sizeof(RepGPU), 0));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(devWeightRGB, &weightRBC, 1*sizeof(float), 0));
}
extern "C"
void CUDAFindClosestPointsRBC(int nrOfPoints, int nrOfReps, unsigned int* indices, float* distances, float4* targetCoords, float4* targetColors, float4* sourceCoords, float4* sourceColors)
{
// find the closest point for each pixel
kernelRBC<<<DivUp(nrOfPoints, CUDA_THREADS_PER_BLOCK), CUDA_THREADS_PER_BLOCK>>>(nrOfReps, indices, distances, targetCoords, targetColors, sourceCoords, sourceColors);
cudaThreadSynchronize();
CUT_CHECK_ERROR("Kernel execution failed (while trying to find closest points)");
}
extern "C"
void CUDAPointsToReps(int nrOfPoints, int nrOfReps, float4* devTargetCoords, float4* devTargetColors, unsigned int* devRepIndices, unsigned int* devPointToRep)
{
kernelPointsToReps<<<DivUp(nrOfPoints, CUDA_THREADS_PER_BLOCK), CUDA_THREADS_PER_BLOCK>>>(nrOfReps, devTargetCoords, devTargetColors, devRepIndices, devPointToRep);
} |
b2b3dcc364f39ef1e3604e6482c3245bd8c0077d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 15.11.2018
//
#include <loops/special_kernels.h>
namespace sd {
///////////////////////////////////////////////////////////////////////
template <typename T>
SD_DEVICE void concatKernelVStack(int numArrays, sd::Pointer *data, sd::Pointer *inputShapeInfos, void *vz,
sd::LongType *zShapeInfo) {
/*
this is special case for concat: we group bunch of vectors into 2D matrix
also: we expect each inputShapeInfo to have EWS, be a vector, and have equal size
*/
auto z = static_cast<T *>(vz);
auto inputShapes = (sd::LongType **)inputShapeInfos;
T **input = (T **)data;
__shared__ int inputEWS;
__shared__ int resultEWS;
__shared__ int inputLength;
if (threadIdx.x == 0) {
inputLength = shape::length(inputShapes[0]);
inputEWS = shape::elementWiseStride(inputShapes[0]);
resultEWS = shape::elementWiseStride(zShapeInfo);
}
__syncthreads();
for (int r = blockIdx.x; r < numArrays; r += gridDim.x) {
int zOffset = r * inputLength * resultEWS;
T *inputData = (T *)input[r];
for (int i = threadIdx.x; i < inputLength; i += blockDim.x) {
z[zOffset + i * resultEWS] = inputData[i * inputEWS];
}
}
}
///////////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL void execConcatKernelVStack(int numArrays, sd::Pointer *data, sd::Pointer *inputShapeInfos, void *vz,
sd::LongType *zShapeInfo) {
concatKernelVStack<T>(numArrays, data, inputShapeInfos, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////////
template <typename T>
SD_HOST void concatKernelVStackGeneric(dim3 &launchDims, hipStream_t *stream, int numArrays, sd::Pointer *data,
sd::Pointer *inputShapeInfos, void *vz, sd::LongType *zShapeInfo) {
hipLaunchKernelGGL(( execConcatKernelVStack<T>)
, dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, numArrays, data, inputShapeInfos, vz, zShapeInfo);
sd::DebugHelper::checkErrorCode(stream, "concatVStack(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void concatKernelVStackGeneric,
(dim3 & launchDims, hipStream_t *stream, int numArrays, sd::Pointer *data,
sd::Pointer *inputShapeInfos, void *vz, sd::LongType *zShapeInfo),
SD_COMMON_TYPES);
} // namespace sd
| b2b3dcc364f39ef1e3604e6482c3245bd8c0077d.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 15.11.2018
//
#include <loops/special_kernels.h>
namespace sd {
///////////////////////////////////////////////////////////////////////
template <typename T>
SD_DEVICE void concatKernelVStack(int numArrays, sd::Pointer *data, sd::Pointer *inputShapeInfos, void *vz,
sd::LongType *zShapeInfo) {
/*
this is special case for concat: we group bunch of vectors into 2D matrix
also: we expect each inputShapeInfo to have EWS, be a vector, and have equal size
*/
auto z = static_cast<T *>(vz);
auto inputShapes = (sd::LongType **)inputShapeInfos;
T **input = (T **)data;
__shared__ int inputEWS;
__shared__ int resultEWS;
__shared__ int inputLength;
if (threadIdx.x == 0) {
inputLength = shape::length(inputShapes[0]);
inputEWS = shape::elementWiseStride(inputShapes[0]);
resultEWS = shape::elementWiseStride(zShapeInfo);
}
__syncthreads();
for (int r = blockIdx.x; r < numArrays; r += gridDim.x) {
int zOffset = r * inputLength * resultEWS;
T *inputData = (T *)input[r];
for (int i = threadIdx.x; i < inputLength; i += blockDim.x) {
z[zOffset + i * resultEWS] = inputData[i * inputEWS];
}
}
}
///////////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL void execConcatKernelVStack(int numArrays, sd::Pointer *data, sd::Pointer *inputShapeInfos, void *vz,
sd::LongType *zShapeInfo) {
concatKernelVStack<T>(numArrays, data, inputShapeInfos, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////////
template <typename T>
SD_HOST void concatKernelVStackGeneric(dim3 &launchDims, cudaStream_t *stream, int numArrays, sd::Pointer *data,
sd::Pointer *inputShapeInfos, void *vz, sd::LongType *zShapeInfo) {
execConcatKernelVStack<T>
<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(numArrays, data, inputShapeInfos, vz, zShapeInfo);
sd::DebugHelper::checkErrorCode(stream, "concatVStack(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void concatKernelVStackGeneric,
(dim3 & launchDims, cudaStream_t *stream, int numArrays, sd::Pointer *data,
sd::Pointer *inputShapeInfos, void *vz, sd::LongType *zShapeInfo),
SD_COMMON_TYPES);
} // namespace sd
|
2c1832db2613ee2d01d95a9418ab423ed36b5c50.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include <thrust/iterator/discard_iterator.h>
#include "cupoch/geometry/voxelgrid.h"
#include "cupoch/integration/marching_cubes_const.h"
#include "cupoch/integration/uniform_tsdfvolume.h"
#include "cupoch/integration/integrate_functor.h"
#include "cupoch/utility/helper.h"
using namespace cupoch;
using namespace cupoch::integration;
namespace {
__device__ float GetTSDFAt(const Eigen::Vector3f &p,
const geometry::TSDFVoxel *voxels,
float voxel_length,
int resolution) {
Eigen::Vector3i idx;
Eigen::Vector3f p_grid = p / voxel_length - Eigen::Vector3f(0.5, 0.5, 0.5);
for (int i = 0; i < 3; i++) {
idx(i) = (int)::floor(p_grid(i));
}
Eigen::Vector3f r = p_grid - idx.cast<float>();
float tsdf = 0;
tsdf += (1 - r(0)) * (1 - r(1)) * (1 - r(2)) *
voxels[IndexOf(idx + Eigen::Vector3i(0, 0, 0), resolution)].tsdf_;
tsdf += (1 - r(0)) * (1 - r(1)) * r(2) *
voxels[IndexOf(idx + Eigen::Vector3i(0, 0, 1), resolution)].tsdf_;
tsdf += (1 - r(0)) * r(1) * (1 - r(2)) *
voxels[IndexOf(idx + Eigen::Vector3i(0, 1, 0), resolution)].tsdf_;
tsdf += (1 - r(0)) * r(1) * r(2) *
voxels[IndexOf(idx + Eigen::Vector3i(0, 1, 1), resolution)].tsdf_;
tsdf += r(0) * (1 - r(1)) * (1 - r(2)) *
voxels[IndexOf(idx + Eigen::Vector3i(1, 0, 0), resolution)].tsdf_;
tsdf += r(0) * (1 - r(1)) * r(2) *
voxels[IndexOf(idx + Eigen::Vector3i(1, 0, 1), resolution)].tsdf_;
tsdf += r(0) * r(1) * (1 - r(2)) *
voxels[IndexOf(idx + Eigen::Vector3i(1, 1, 0), resolution)].tsdf_;
tsdf += r(0) * r(1) * r(2) *
voxels[IndexOf(idx + Eigen::Vector3i(1, 1, 1), resolution)].tsdf_;
return tsdf;
}
__device__ Eigen::Vector3f GetNormalAt(const Eigen::Vector3f &p,
const geometry::TSDFVoxel *voxels,
float voxel_length,
int resolution) {
Eigen::Vector3f n;
const double half_gap = 0.99 * voxel_length;
#pragma unroll
for (int i = 0; i < 3; i++) {
Eigen::Vector3f p0 = p;
p0(i) -= half_gap;
Eigen::Vector3f p1 = p;
p1(i) += half_gap;
n(i) = GetTSDFAt(p1, voxels, voxel_length, resolution) -
GetTSDFAt(p0, voxels, voxel_length, resolution);
}
return n.normalized();
}
struct extract_pointcloud_functor {
extract_pointcloud_functor(const geometry::TSDFVoxel *voxels,
int resolution,
float voxel_length,
const Eigen::Vector3f &origin,
TSDFVolumeColorType color_type)
: voxels_(voxels),
resolution_(resolution),
voxel_length_(voxel_length),
origin_(origin),
half_voxel_length_(0.5 * voxel_length_),
color_type_(color_type){};
const geometry::TSDFVoxel *voxels_;
const int resolution_;
const float voxel_length_;
const Eigen::Vector3f origin_;
const float half_voxel_length_;
const TSDFVolumeColorType color_type_;
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector3f>
operator()(const size_t idx) {
int res2 = (resolution_ - 2) * (resolution_ - 2);
int x = idx / (3 * res2) + 1;
int yzi = idx % (3 * res2);
int y = yzi / (3 * (resolution_ - 2)) + 1;
int zi = yzi % (3 * (resolution_ - 2));
int z = zi / 3 + 1;
int i = zi % 3;
Eigen::Vector3f point = Eigen::Vector3f::Constant(std::numeric_limits<float>::quiet_NaN());
Eigen::Vector3f normal = Eigen::Vector3f::Constant(std::numeric_limits<float>::quiet_NaN());
Eigen::Vector3f color = Eigen::Vector3f::Constant(std::numeric_limits<float>::quiet_NaN());
Eigen::Vector3i idx0(x, y, z);
float w0 = voxels_[IndexOf(idx0, resolution_)].weight_;
float f0 = voxels_[IndexOf(idx0, resolution_)].tsdf_;
const Eigen::Vector3f &c0 = voxels_[IndexOf(idx0, resolution_)].color_;
if (!(w0 != 0.0f && f0 < 0.98f && f0 >= -0.98f)) {
return thrust::make_tuple(point, normal, color);
}
Eigen::Vector3f p0(half_voxel_length_ + voxel_length_ * x,
half_voxel_length_ + voxel_length_ * y,
half_voxel_length_ + voxel_length_ * z);
Eigen::Vector3f p1 = p0;
p1(i) += voxel_length_;
Eigen::Vector3i idx1 = idx0;
idx1(i) += 1;
if (idx1(i) < resolution_ - 1) {
float w1 = voxels_[IndexOf(idx1, resolution_)].weight_;
float f1 = voxels_[IndexOf(idx1, resolution_)].tsdf_;
const Eigen::Vector3f &c1 =
voxels_[IndexOf(idx1, resolution_)].color_;
if (w1 != 0.0f && f1 < 0.98f && f1 >= -0.98f && f0 * f1 < 0) {
float r0 = ::fabs(f0);
float r1 = ::fabs(f1);
Eigen::Vector3f p = p0;
p(i) = (p0(i) * r1 + p1(i) * r0) / (r0 + r1);
point = p + origin_;
if (color_type_ == TSDFVolumeColorType::RGB8) {
color = (c0 * r1 + c1 * r0) / (r0 + r1) / 255.0f;
} else if (color_type_ == TSDFVolumeColorType::Gray32) {
color = (c0 * r1 + c1 * r0) / (r0 + r1);
}
// has_normal
normal = GetNormalAt(p, voxels_, voxel_length_, resolution_);
}
}
return thrust::make_tuple(point, normal, color);
}
};
struct count_valid_voxels_functor {
count_valid_voxels_functor(const geometry::TSDFVoxel *voxels,
int resolution)
: voxels_(voxels), resolution_(resolution){};
const geometry::TSDFVoxel *voxels_;
const int resolution_;
__device__ bool operator()(const thrust::tuple<size_t, geometry::TSDFVoxel> &kv) const {
size_t idx = thrust::get<0>(kv);
int x, y, z;
thrust::tie(x, y, z) = KeyOf(idx, resolution_);
if (x == resolution_ - 1 || y == resolution_ - 1 || z == resolution_ - 1) return false;
geometry::TSDFVoxel v = thrust::get<1>(kv);
#pragma unroll
for (int i = 0; i < 8; ++i) {
Eigen::Vector3i idx =
Eigen::Vector3i(x + shift[i][0], y + shift[i][1], z + shift[i][2]);
if (voxels_[IndexOf(idx, resolution_)].weight_ == 0.0f)
return false;
}
return true;
}
};
struct extract_mesh_phase0_functor {
extract_mesh_phase0_functor(const geometry::TSDFVoxel *voxels,
int resolution)
: voxels_(voxels), resolution_(resolution){};
const geometry::TSDFVoxel *voxels_;
const int resolution_;
__device__ thrust::tuple<Eigen::Vector3i, int> operator()(size_t idx) {
int x, y, z;
thrust::tie(x, y, z) = KeyOf(idx, resolution_ - 1);
int cube_index = 0;
Eigen::Vector3i key = Eigen::Vector3i(x, y, z);
for (int i = 0; i < 8; ++i) {
Eigen::Vector3i idxs =
key +
Eigen::Vector3i(shift[i][0], shift[i][1], shift[i][2]);
if (voxels_[IndexOf(idxs, resolution_)].weight_ == 0.0f) {
return thrust::make_tuple(key, -1);
} else {
float f = voxels_[IndexOf(idxs, resolution_)].tsdf_;
if (f < 0.0f) {
cube_index |= (1 << i);
}
}
}
return thrust::make_tuple(key, cube_index);
}
};
struct extract_mesh_phase1_functor {
extract_mesh_phase1_functor(const geometry::TSDFVoxel *voxels,
const Eigen::Vector3i *keys,
int resolution,
TSDFVolumeColorType color_type)
: voxels_(voxels),
keys_(keys),
resolution_(resolution),
color_type_(color_type){};
const geometry::TSDFVoxel *voxels_;
const Eigen::Vector3i *keys_;
const int resolution_;
TSDFVolumeColorType color_type_;
__device__ thrust::tuple<float, Eigen::Vector3f> operator()(size_t idx) {
int j = idx / 8;
int i = idx % 8;
const Eigen::Vector3i &key = keys_[j];
Eigen::Vector3i idxs =
key + Eigen::Vector3i(shift[i][0], shift[i][1], shift[i][2]);
Eigen::Vector3f c = Eigen::Vector3f::Zero();
if (voxels_[IndexOf(idxs, resolution_)].weight_ == 0.0f) {
return thrust::make_tuple(0.0f, c);
} else {
float f = voxels_[IndexOf(idxs, resolution_)].tsdf_;
if (color_type_ == TSDFVolumeColorType::RGB8) {
c = voxels_[IndexOf(idxs, resolution_)].color_ / 255.0;
} else if (color_type_ == TSDFVolumeColorType::Gray32) {
c = voxels_[IndexOf(idxs, resolution_)].color_;
}
return thrust::make_tuple(f, c);
}
}
};
struct extract_mesh_phase2_functor {
extract_mesh_phase2_functor(const Eigen::Vector3i *keys,
const int *cube_indices,
const Eigen::Vector3f &origin,
int resolution,
float voxel_length,
const float *fs,
const Eigen::Vector3f *cs,
TSDFVolumeColorType color_type)
: keys_(keys),
cube_indices_(cube_indices),
origin_(origin),
resolution_(resolution),
voxel_length_(voxel_length),
half_voxel_length_(0.5 * voxel_length_),
fs_(fs),
cs_(cs),
color_type_(color_type){};
const Eigen::Vector3i *keys_;
const int *cube_indices_;
const Eigen::Vector3f origin_;
const int resolution_;
const float voxel_length_;
const float half_voxel_length_;
const float *fs_;
const Eigen::Vector3f *cs_;
const TSDFVolumeColorType color_type_;
__device__ thrust::
tuple<Eigen::Vector3i, int, int, Eigen::Vector3f, Eigen::Vector3f>
operator()(size_t idx) const {
int j = idx / 12;
const Eigen::Vector3i &xyz = keys_[j];
int cube_index = cube_indices_[j];
int offset = j * 8;
int x = xyz[0];
int y = xyz[1];
int z = xyz[2];
int i = idx % 12;
if (edge_table[cube_index] & (1 << i)) {
Eigen::Vector4i edge_index =
Eigen::Vector4i(x, y, z, 0) +
Eigen::Vector4i(edge_shift[i][0], edge_shift[i][1],
edge_shift[i][2], edge_shift[i][3]);
Eigen::Vector3f pt(
half_voxel_length_ + voxel_length_ * edge_index(0),
half_voxel_length_ + voxel_length_ * edge_index(1),
half_voxel_length_ + voxel_length_ * edge_index(2));
float f0 = abs(fs_[offset + edge_to_vert[i][0]]);
float f1 = abs(fs_[offset + edge_to_vert[i][1]]);
pt(edge_index(3)) += f0 * voxel_length_ / (f0 + f1);
Eigen::Vector3f vertex = pt + origin_;
Eigen::Vector3f vertex_color = Eigen::Vector3f::Zero();
if (color_type_ != TSDFVolumeColorType::NoColor) {
const auto &c0 = cs_[offset + edge_to_vert[i][0]];
const auto &c1 = cs_[offset + edge_to_vert[i][1]];
vertex_color = (f1 * c0 + f0 * c1) / (f0 + f1);
}
return thrust::make_tuple(xyz, cube_index, i, vertex, vertex_color);
} else {
Eigen::Vector3i index = -Eigen::Vector3i::Ones();
Eigen::Vector3f vertex = Eigen::Vector3f::Zero();
Eigen::Vector3f vertex_color = Eigen::Vector3f::Zero();
return thrust::make_tuple(index, cube_index, i, vertex,
vertex_color);
}
}
};
__constant__ int vert_table[3] = {0, 2, 1};
struct extract_mesh_phase3_functor {
extract_mesh_phase3_functor(const int *cube_index,
const int *vert_no,
const int *key_index,
Eigen::Vector3i *triangles)
: cube_index_(cube_index),
vert_no_(vert_no),
key_index_(key_index),
triangles_(triangles){};
const int *cube_index_;
const int *vert_no_;
const int *key_index_;
Eigen::Vector3i *triangles_;
__device__ void operator()(size_t idx) {
const int kindx0 = key_index_[idx];
const int kindx1 = key_index_[idx + 1];
for (int j = kindx0; j < kindx1; ++j) {
const int cindx = cube_index_[j];
for (int i = 0; tri_table[cindx][i] != -1; ++i) {
const int tri_idx = tri_table[cindx][i];
for (int l = kindx0; l < kindx1; ++l) {
if (vert_no_[l] == tri_idx) {
triangles_[idx * 4 + i / 3][vert_table[i % 3]] = l;
}
}
}
}
}
};
struct extract_voxel_pointcloud_functor {
extract_voxel_pointcloud_functor(const Eigen::Vector3f &origin,
int resolution,
float voxel_length)
: origin_(origin),
resolution_(resolution),
voxel_length_(voxel_length),
half_voxel_length_(0.5 * voxel_length){};
const Eigen::Vector3f origin_;
const int resolution_;
const float voxel_length_;
const float half_voxel_length_;
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator()(
const thrust::tuple<size_t, geometry::TSDFVoxel> &kv) {
int idx = thrust::get<0>(kv);
int x, y, z;
thrust::tie(x, y, z) = KeyOf(idx, resolution_);
geometry::TSDFVoxel v = thrust::get<1>(kv);
Eigen::Vector3f pt(half_voxel_length_ + voxel_length_ * x,
half_voxel_length_ + voxel_length_ * y,
half_voxel_length_ + voxel_length_ * z);
if (v.weight_ != 0.0f && v.tsdf_ < 0.98f && v.tsdf_ >= -0.98f) {
float c = (v.tsdf_ + 1.0) * 0.5;
return thrust::make_tuple(pt + origin_, Eigen::Vector3f(c, c, c));
}
return thrust::make_tuple(
Eigen::Vector3f::Constant(std::numeric_limits<float>::quiet_NaN()),
Eigen::Vector3f::Constant(std::numeric_limits<float>::quiet_NaN()));
}
};
struct extract_voxel_grid_functor {
extract_voxel_grid_functor(int resolution) : resolution_(resolution){};
const int resolution_;
__device__ thrust::tuple<Eigen::Vector3i, geometry::Voxel> operator()(
const thrust::tuple<size_t, geometry::TSDFVoxel> &kv) {
int idx = thrust::get<0>(kv);
int x, y, z;
thrust::tie(x, y, z) = KeyOf(idx, resolution_);
Eigen::Vector3i grid_idx = Eigen::Vector3i(x, y, z);
geometry::TSDFVoxel v = thrust::get<1>(kv);
const float w = v.weight_;
const float f = v.tsdf_;
if (w != 0.0f && f < 0.98f && f >= -0.98f) {
float c = (f + 1.0) * 0.5;
return thrust::make_tuple(
grid_idx,
geometry::Voxel(grid_idx, Eigen::Vector3f(c, c, c)));
}
return thrust::make_tuple(
Eigen::Vector3i::Constant(geometry::INVALID_VOXEL_INDEX),
geometry::Voxel());
}
};
} // namespace
UniformTSDFVolume::UniformTSDFVolume(
float length,
int resolution,
float sdf_trunc,
TSDFVolumeColorType color_type,
const Eigen::Vector3f &origin /* = Eigen::Vector3f::Zero()*/)
: TSDFVolume(length / (float)resolution, sdf_trunc, color_type),
origin_(origin),
length_(length),
resolution_(resolution),
voxel_num_(resolution * resolution * resolution) {
voxels_.resize(voxel_num_);
}
UniformTSDFVolume::~UniformTSDFVolume() {}
UniformTSDFVolume::UniformTSDFVolume(const UniformTSDFVolume &other)
: TSDFVolume(other),
voxels_(other.voxels_),
origin_(other.origin_),
length_(other.length_),
resolution_(other.resolution_),
voxel_num_(other.voxel_num_) {}
void UniformTSDFVolume::Reset() { voxels_.clear(); }
void UniformTSDFVolume::Integrate(
const geometry::RGBDImage &image,
const camera::PinholeCameraIntrinsic &intrinsic,
const Eigen::Matrix4f &extrinsic) {
// This function goes through the voxels, and scan convert the relative
// depth/color value into the voxel.
// The following implementation is a highly optimized version.
if ((image.depth_.num_of_channels_ != 1) ||
(image.depth_.bytes_per_channel_ != 4) ||
(image.depth_.width_ != intrinsic.width_) ||
(image.depth_.height_ != intrinsic.height_) ||
(color_type_ == TSDFVolumeColorType::RGB8 &&
image.color_.num_of_channels_ != 3) ||
(color_type_ == TSDFVolumeColorType::RGB8 &&
image.color_.bytes_per_channel_ != 1) ||
(color_type_ == TSDFVolumeColorType::Gray32 &&
image.color_.num_of_channels_ != 1) ||
(color_type_ == TSDFVolumeColorType::Gray32 &&
image.color_.bytes_per_channel_ != 4) ||
(color_type_ != TSDFVolumeColorType::NoColor &&
image.color_.width_ != intrinsic.width_) ||
(color_type_ != TSDFVolumeColorType::NoColor &&
image.color_.height_ != intrinsic.height_)) {
utility::LogError(
"[UniformTSDFVolume::Integrate] Unsupported image format.");
}
auto depth2cameradistance =
geometry::Image::CreateDepthToCameraDistanceMultiplierFloatImage(
intrinsic);
IntegrateWithDepthToCameraDistanceMultiplier(image, intrinsic, extrinsic,
*depth2cameradistance);
}
std::shared_ptr<geometry::PointCloud> UniformTSDFVolume::ExtractPointCloud() {
auto pointcloud = std::make_shared<geometry::PointCloud>();
size_t n_valid_voxels =
thrust::count_if(voxels_.begin(), voxels_.end(),
[] __device__(const geometry::TSDFVoxel &v) {
return (v.weight_ != 0.0f && v.tsdf_ < 0.98f &&
v.tsdf_ >= -0.98f);
});
extract_pointcloud_functor func(thrust::raw_pointer_cast(voxels_.data()),
resolution_, voxel_length_, origin_,
color_type_);
pointcloud->points_.resize(n_valid_voxels);
pointcloud->normals_.resize(n_valid_voxels);
pointcloud->colors_.resize(n_valid_voxels);
size_t n_total =
(resolution_ - 2) * (resolution_ - 2) * (resolution_ - 2) * 3;
auto begin = make_tuple_begin(pointcloud->points_, pointcloud->normals_,
pointcloud->colors_);
auto end_p = thrust::copy_if(
thrust::make_transform_iterator(
thrust::make_counting_iterator<size_t>(0), func),
thrust::make_transform_iterator(
thrust::make_counting_iterator(n_total), func),
begin,
[] __device__(const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f,
Eigen::Vector3f> &x) {
const Eigen::Vector3f &pt = thrust::get<0>(x);
return !(isnan(pt(0)) || isnan(pt(1)) || isnan(pt(2)));
});
resize_all(thrust::distance(begin, end_p), pointcloud->points_,
pointcloud->normals_, pointcloud->colors_);
if (color_type_ == TSDFVolumeColorType::NoColor)
pointcloud->colors_.clear();
return pointcloud;
}
std::shared_ptr<geometry::TriangleMesh>
UniformTSDFVolume::ExtractTriangleMesh() {
// implementation of marching cubes, based on
// http://paulbourke.net/geometry/polygonise/
auto mesh = std::make_shared<geometry::TriangleMesh>();
size_t n_valid_voxels = thrust::count_if(
enumerate_begin(voxels_),
enumerate_end(voxels_),
count_valid_voxels_functor(thrust::raw_pointer_cast(voxels_.data()),
resolution_));
size_t res3 = (resolution_ - 1) * (resolution_ - 1) * (resolution_ - 1);
// compute cube indices for each voxels
utility::device_vector<Eigen::Vector3i> keys(n_valid_voxels);
utility::device_vector<int> cube_indices(n_valid_voxels);
extract_mesh_phase0_functor func0(thrust::raw_pointer_cast(voxels_.data()),
resolution_);
thrust::copy_if(
thrust::make_transform_iterator(
thrust::make_counting_iterator<size_t>(0), func0),
thrust::make_transform_iterator(
thrust::make_counting_iterator(res3), func0),
make_tuple_begin(keys, cube_indices),
[] __device__(const thrust::tuple<Eigen::Vector3i, int> &x) {
return thrust::get<1>(x) >= 0;
});
auto check_fn =
[] __device__(
const thrust::tuple<Eigen::Vector3i, int> &x) -> bool {
int cidx = thrust::get<1>(x);
return (cidx <= 0 || cidx >= 255);
};
size_t n_result1 = remove_if_vectors(check_fn, keys, cube_indices);
utility::device_vector<float> fs(n_result1 * 8);
utility::device_vector<Eigen::Vector3f> cs(n_result1 * 8);
extract_mesh_phase1_functor func1(thrust::raw_pointer_cast(voxels_.data()),
thrust::raw_pointer_cast(keys.data()),
resolution_, color_type_);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_result1 * 8),
make_tuple_begin(fs, cs), func1);
// compute vertices and vertex_colors
int *ci_p = thrust::raw_pointer_cast(cube_indices.data());
size_t n_valid_cubes =
thrust::count_if(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_result1 * 12),
[ci_p] __device__(size_t idx) {
int i = idx / 12;
int j = idx % 12;
return (edge_table[ci_p[i]] & (1 << j)) > 0;
});
resize_all(n_valid_cubes, mesh->vertices_, mesh->vertex_colors_);
utility::device_vector<Eigen::Vector3i> repeat_keys(n_valid_cubes);
utility::device_vector<int> repeat_cube_indices(n_valid_cubes);
utility::device_vector<int> vert_no(n_valid_cubes);
extract_mesh_phase2_functor func2(
thrust::raw_pointer_cast(keys.data()),
thrust::raw_pointer_cast(cube_indices.data()), origin_,
voxel_length_, resolution_, thrust::raw_pointer_cast(fs.data()),
thrust::raw_pointer_cast(cs.data()), color_type_);
thrust::copy_if(
thrust::make_transform_iterator(
thrust::make_counting_iterator<size_t>(0), func2),
thrust::make_transform_iterator(
thrust::make_counting_iterator(n_result1 * 12), func2),
make_tuple_begin(repeat_keys, repeat_cube_indices, vert_no,
mesh->vertices_, mesh->vertex_colors_),
[] __device__(
const thrust::tuple<Eigen::Vector3i, int, int,
Eigen::Vector3f, Eigen::Vector3f> &x) {
return thrust::get<0>(x)[0] >= 0;
});
// compute triangles
utility::device_vector<int> vt_offsets(n_valid_cubes + 1, 0);
auto end2 = thrust::reduce_by_key(repeat_keys.begin(), repeat_keys.end(),
thrust::make_constant_iterator<int>(1),
thrust::make_discard_iterator(),
vt_offsets.begin());
size_t n_result2 = thrust::distance(vt_offsets.begin(), end2.second);
vt_offsets.resize(n_result2 + 1);
thrust::exclusive_scan(vt_offsets.begin(), vt_offsets.end(),
vt_offsets.begin());
mesh->triangles_.resize(n_result2 * 4, Eigen::Vector3i(-1, -1, -1));
extract_mesh_phase3_functor func3(
thrust::raw_pointer_cast(repeat_cube_indices.data()),
thrust::raw_pointer_cast(vert_no.data()),
thrust::raw_pointer_cast(vt_offsets.data()),
thrust::raw_pointer_cast(mesh->triangles_.data()));
thrust::for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_result2), func3);
auto end3 = thrust::remove_if(
mesh->triangles_.begin(), mesh->triangles_.end(),
[] __device__(const Eigen::Vector3i &idxs) { return idxs[0] < 0; });
mesh->triangles_.resize(thrust::distance(mesh->triangles_.begin(), end3));
return mesh;
}
std::shared_ptr<geometry::PointCloud>
UniformTSDFVolume::ExtractVoxelPointCloud() const {
auto voxel = std::make_shared<geometry::PointCloud>();
// const float *p_tsdf = (const float *)tsdf_.data();
// const float *p_weight = (const float *)weight_.data();
// const float *p_color = (const float *)color_.data();
size_t n_valid_voxels =
thrust::count_if(voxels_.begin(), voxels_.end(),
[] __device__(const geometry::TSDFVoxel &v) {
return (v.weight_ != 0.0f && v.tsdf_ < 0.98f &&
v.tsdf_ >= -0.98f);
});
extract_voxel_pointcloud_functor func(origin_, resolution_, voxel_length_);
resize_all(n_valid_voxels, voxel->points_, voxel->colors_);
thrust::copy_if(
thrust::make_transform_iterator(enumerate_begin(voxels_), func),
thrust::make_transform_iterator(enumerate_end(voxels_), func),
make_tuple_begin(voxel->points_, voxel->colors_),
[] __device__(
const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> &x) {
const Eigen::Vector3f &pt = thrust::get<0>(x);
return !(isnan(pt(0)) || isnan(pt(1)) || isnan(pt(2)));
});
voxel->RemoveNoneFinitePoints(true, false);
return voxel;
}
std::shared_ptr<geometry::VoxelGrid> UniformTSDFVolume::ExtractVoxelGrid()
const {
auto voxel_grid = std::make_shared<geometry::VoxelGrid>();
voxel_grid->voxel_size_ = voxel_length_;
voxel_grid->origin_ = origin_;
size_t n_valid_voxels =
thrust::count_if(voxels_.begin(), voxels_.end(),
[] __device__(const geometry::TSDFVoxel &v) {
return (v.weight_ != 0.0f && v.tsdf_ < 0.98f &&
v.tsdf_ >= -0.98f);
});
resize_all(n_valid_voxels, voxel_grid->voxels_keys_,
voxel_grid->voxels_values_);
extract_voxel_grid_functor func(resolution_);
thrust::copy_if(
thrust::make_transform_iterator(enumerate_begin(voxels_), func),
thrust::make_transform_iterator(enumerate_end(voxels_), func),
make_tuple_begin(voxel_grid->voxels_keys_,
voxel_grid->voxels_values_),
[] __device__(
const thrust::tuple<Eigen::Vector3i, geometry::Voxel> &x) {
return thrust::get<0>(x) !=
Eigen::Vector3i(geometry::INVALID_VOXEL_INDEX,
geometry::INVALID_VOXEL_INDEX,
geometry::INVALID_VOXEL_INDEX);
});
return voxel_grid;
}
void UniformTSDFVolume::IntegrateWithDepthToCameraDistanceMultiplier(
const geometry::RGBDImage &image,
const camera::PinholeCameraIntrinsic &intrinsic,
const Eigen::Matrix4f &extrinsic,
const geometry::Image &depth_to_camera_distance_multiplier) {
const float fx = intrinsic.GetFocalLength().first;
const float fy = intrinsic.GetFocalLength().second;
const float cx = intrinsic.GetPrincipalPoint().first;
const float cy = intrinsic.GetPrincipalPoint().second;
const float safe_width = intrinsic.width_ - 0.0001f;
const float safe_height = intrinsic.height_ - 0.0001f;
voxels_.resize(voxel_num_);
uniform_integrate_functor func(
fx, fy, cx, cy, extrinsic, voxel_length_, sdf_trunc_,
safe_width, safe_height, resolution_,
thrust::raw_pointer_cast(image.color_.data_.data()),
thrust::raw_pointer_cast(image.depth_.data_.data()),
thrust::raw_pointer_cast(
depth_to_camera_distance_multiplier.data_.data()),
image.depth_.width_, image.color_.num_of_channels_, color_type_,
origin_, thrust::raw_pointer_cast(voxels_.data()));
thrust::for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(
resolution_ * resolution_ * resolution_),
func);
} | 2c1832db2613ee2d01d95a9418ab423ed36b5c50.cu | /**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include <thrust/iterator/discard_iterator.h>
#include "cupoch/geometry/voxelgrid.h"
#include "cupoch/integration/marching_cubes_const.h"
#include "cupoch/integration/uniform_tsdfvolume.h"
#include "cupoch/integration/integrate_functor.h"
#include "cupoch/utility/helper.h"
using namespace cupoch;
using namespace cupoch::integration;
namespace {
__device__ float GetTSDFAt(const Eigen::Vector3f &p,
const geometry::TSDFVoxel *voxels,
float voxel_length,
int resolution) {
Eigen::Vector3i idx;
Eigen::Vector3f p_grid = p / voxel_length - Eigen::Vector3f(0.5, 0.5, 0.5);
for (int i = 0; i < 3; i++) {
idx(i) = (int)std::floor(p_grid(i));
}
Eigen::Vector3f r = p_grid - idx.cast<float>();
float tsdf = 0;
tsdf += (1 - r(0)) * (1 - r(1)) * (1 - r(2)) *
voxels[IndexOf(idx + Eigen::Vector3i(0, 0, 0), resolution)].tsdf_;
tsdf += (1 - r(0)) * (1 - r(1)) * r(2) *
voxels[IndexOf(idx + Eigen::Vector3i(0, 0, 1), resolution)].tsdf_;
tsdf += (1 - r(0)) * r(1) * (1 - r(2)) *
voxels[IndexOf(idx + Eigen::Vector3i(0, 1, 0), resolution)].tsdf_;
tsdf += (1 - r(0)) * r(1) * r(2) *
voxels[IndexOf(idx + Eigen::Vector3i(0, 1, 1), resolution)].tsdf_;
tsdf += r(0) * (1 - r(1)) * (1 - r(2)) *
voxels[IndexOf(idx + Eigen::Vector3i(1, 0, 0), resolution)].tsdf_;
tsdf += r(0) * (1 - r(1)) * r(2) *
voxels[IndexOf(idx + Eigen::Vector3i(1, 0, 1), resolution)].tsdf_;
tsdf += r(0) * r(1) * (1 - r(2)) *
voxels[IndexOf(idx + Eigen::Vector3i(1, 1, 0), resolution)].tsdf_;
tsdf += r(0) * r(1) * r(2) *
voxels[IndexOf(idx + Eigen::Vector3i(1, 1, 1), resolution)].tsdf_;
return tsdf;
}
__device__ Eigen::Vector3f GetNormalAt(const Eigen::Vector3f &p,
const geometry::TSDFVoxel *voxels,
float voxel_length,
int resolution) {
Eigen::Vector3f n;
const double half_gap = 0.99 * voxel_length;
#pragma unroll
for (int i = 0; i < 3; i++) {
Eigen::Vector3f p0 = p;
p0(i) -= half_gap;
Eigen::Vector3f p1 = p;
p1(i) += half_gap;
n(i) = GetTSDFAt(p1, voxels, voxel_length, resolution) -
GetTSDFAt(p0, voxels, voxel_length, resolution);
}
return n.normalized();
}
struct extract_pointcloud_functor {
extract_pointcloud_functor(const geometry::TSDFVoxel *voxels,
int resolution,
float voxel_length,
const Eigen::Vector3f &origin,
TSDFVolumeColorType color_type)
: voxels_(voxels),
resolution_(resolution),
voxel_length_(voxel_length),
origin_(origin),
half_voxel_length_(0.5 * voxel_length_),
color_type_(color_type){};
const geometry::TSDFVoxel *voxels_;
const int resolution_;
const float voxel_length_;
const Eigen::Vector3f origin_;
const float half_voxel_length_;
const TSDFVolumeColorType color_type_;
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector3f>
operator()(const size_t idx) {
int res2 = (resolution_ - 2) * (resolution_ - 2);
int x = idx / (3 * res2) + 1;
int yzi = idx % (3 * res2);
int y = yzi / (3 * (resolution_ - 2)) + 1;
int zi = yzi % (3 * (resolution_ - 2));
int z = zi / 3 + 1;
int i = zi % 3;
Eigen::Vector3f point = Eigen::Vector3f::Constant(std::numeric_limits<float>::quiet_NaN());
Eigen::Vector3f normal = Eigen::Vector3f::Constant(std::numeric_limits<float>::quiet_NaN());
Eigen::Vector3f color = Eigen::Vector3f::Constant(std::numeric_limits<float>::quiet_NaN());
Eigen::Vector3i idx0(x, y, z);
float w0 = voxels_[IndexOf(idx0, resolution_)].weight_;
float f0 = voxels_[IndexOf(idx0, resolution_)].tsdf_;
const Eigen::Vector3f &c0 = voxels_[IndexOf(idx0, resolution_)].color_;
if (!(w0 != 0.0f && f0 < 0.98f && f0 >= -0.98f)) {
return thrust::make_tuple(point, normal, color);
}
Eigen::Vector3f p0(half_voxel_length_ + voxel_length_ * x,
half_voxel_length_ + voxel_length_ * y,
half_voxel_length_ + voxel_length_ * z);
Eigen::Vector3f p1 = p0;
p1(i) += voxel_length_;
Eigen::Vector3i idx1 = idx0;
idx1(i) += 1;
if (idx1(i) < resolution_ - 1) {
float w1 = voxels_[IndexOf(idx1, resolution_)].weight_;
float f1 = voxels_[IndexOf(idx1, resolution_)].tsdf_;
const Eigen::Vector3f &c1 =
voxels_[IndexOf(idx1, resolution_)].color_;
if (w1 != 0.0f && f1 < 0.98f && f1 >= -0.98f && f0 * f1 < 0) {
float r0 = std::fabs(f0);
float r1 = std::fabs(f1);
Eigen::Vector3f p = p0;
p(i) = (p0(i) * r1 + p1(i) * r0) / (r0 + r1);
point = p + origin_;
if (color_type_ == TSDFVolumeColorType::RGB8) {
color = (c0 * r1 + c1 * r0) / (r0 + r1) / 255.0f;
} else if (color_type_ == TSDFVolumeColorType::Gray32) {
color = (c0 * r1 + c1 * r0) / (r0 + r1);
}
// has_normal
normal = GetNormalAt(p, voxels_, voxel_length_, resolution_);
}
}
return thrust::make_tuple(point, normal, color);
}
};
struct count_valid_voxels_functor {
count_valid_voxels_functor(const geometry::TSDFVoxel *voxels,
int resolution)
: voxels_(voxels), resolution_(resolution){};
const geometry::TSDFVoxel *voxels_;
const int resolution_;
__device__ bool operator()(const thrust::tuple<size_t, geometry::TSDFVoxel> &kv) const {
size_t idx = thrust::get<0>(kv);
int x, y, z;
thrust::tie(x, y, z) = KeyOf(idx, resolution_);
if (x == resolution_ - 1 || y == resolution_ - 1 || z == resolution_ - 1) return false;
geometry::TSDFVoxel v = thrust::get<1>(kv);
#pragma unroll
for (int i = 0; i < 8; ++i) {
Eigen::Vector3i idx =
Eigen::Vector3i(x + shift[i][0], y + shift[i][1], z + shift[i][2]);
if (voxels_[IndexOf(idx, resolution_)].weight_ == 0.0f)
return false;
}
return true;
}
};
struct extract_mesh_phase0_functor {
extract_mesh_phase0_functor(const geometry::TSDFVoxel *voxels,
int resolution)
: voxels_(voxels), resolution_(resolution){};
const geometry::TSDFVoxel *voxels_;
const int resolution_;
__device__ thrust::tuple<Eigen::Vector3i, int> operator()(size_t idx) {
int x, y, z;
thrust::tie(x, y, z) = KeyOf(idx, resolution_ - 1);
int cube_index = 0;
Eigen::Vector3i key = Eigen::Vector3i(x, y, z);
for (int i = 0; i < 8; ++i) {
Eigen::Vector3i idxs =
key +
Eigen::Vector3i(shift[i][0], shift[i][1], shift[i][2]);
if (voxels_[IndexOf(idxs, resolution_)].weight_ == 0.0f) {
return thrust::make_tuple(key, -1);
} else {
float f = voxels_[IndexOf(idxs, resolution_)].tsdf_;
if (f < 0.0f) {
cube_index |= (1 << i);
}
}
}
return thrust::make_tuple(key, cube_index);
}
};
struct extract_mesh_phase1_functor {
extract_mesh_phase1_functor(const geometry::TSDFVoxel *voxels,
const Eigen::Vector3i *keys,
int resolution,
TSDFVolumeColorType color_type)
: voxels_(voxels),
keys_(keys),
resolution_(resolution),
color_type_(color_type){};
const geometry::TSDFVoxel *voxels_;
const Eigen::Vector3i *keys_;
const int resolution_;
TSDFVolumeColorType color_type_;
__device__ thrust::tuple<float, Eigen::Vector3f> operator()(size_t idx) {
int j = idx / 8;
int i = idx % 8;
const Eigen::Vector3i &key = keys_[j];
Eigen::Vector3i idxs =
key + Eigen::Vector3i(shift[i][0], shift[i][1], shift[i][2]);
Eigen::Vector3f c = Eigen::Vector3f::Zero();
if (voxels_[IndexOf(idxs, resolution_)].weight_ == 0.0f) {
return thrust::make_tuple(0.0f, c);
} else {
float f = voxels_[IndexOf(idxs, resolution_)].tsdf_;
if (color_type_ == TSDFVolumeColorType::RGB8) {
c = voxels_[IndexOf(idxs, resolution_)].color_ / 255.0;
} else if (color_type_ == TSDFVolumeColorType::Gray32) {
c = voxels_[IndexOf(idxs, resolution_)].color_;
}
return thrust::make_tuple(f, c);
}
}
};
struct extract_mesh_phase2_functor {
extract_mesh_phase2_functor(const Eigen::Vector3i *keys,
const int *cube_indices,
const Eigen::Vector3f &origin,
int resolution,
float voxel_length,
const float *fs,
const Eigen::Vector3f *cs,
TSDFVolumeColorType color_type)
: keys_(keys),
cube_indices_(cube_indices),
origin_(origin),
resolution_(resolution),
voxel_length_(voxel_length),
half_voxel_length_(0.5 * voxel_length_),
fs_(fs),
cs_(cs),
color_type_(color_type){};
const Eigen::Vector3i *keys_;
const int *cube_indices_;
const Eigen::Vector3f origin_;
const int resolution_;
const float voxel_length_;
const float half_voxel_length_;
const float *fs_;
const Eigen::Vector3f *cs_;
const TSDFVolumeColorType color_type_;
__device__ thrust::
tuple<Eigen::Vector3i, int, int, Eigen::Vector3f, Eigen::Vector3f>
operator()(size_t idx) const {
int j = idx / 12;
const Eigen::Vector3i &xyz = keys_[j];
int cube_index = cube_indices_[j];
int offset = j * 8;
int x = xyz[0];
int y = xyz[1];
int z = xyz[2];
int i = idx % 12;
if (edge_table[cube_index] & (1 << i)) {
Eigen::Vector4i edge_index =
Eigen::Vector4i(x, y, z, 0) +
Eigen::Vector4i(edge_shift[i][0], edge_shift[i][1],
edge_shift[i][2], edge_shift[i][3]);
Eigen::Vector3f pt(
half_voxel_length_ + voxel_length_ * edge_index(0),
half_voxel_length_ + voxel_length_ * edge_index(1),
half_voxel_length_ + voxel_length_ * edge_index(2));
float f0 = abs(fs_[offset + edge_to_vert[i][0]]);
float f1 = abs(fs_[offset + edge_to_vert[i][1]]);
pt(edge_index(3)) += f0 * voxel_length_ / (f0 + f1);
Eigen::Vector3f vertex = pt + origin_;
Eigen::Vector3f vertex_color = Eigen::Vector3f::Zero();
if (color_type_ != TSDFVolumeColorType::NoColor) {
const auto &c0 = cs_[offset + edge_to_vert[i][0]];
const auto &c1 = cs_[offset + edge_to_vert[i][1]];
vertex_color = (f1 * c0 + f0 * c1) / (f0 + f1);
}
return thrust::make_tuple(xyz, cube_index, i, vertex, vertex_color);
} else {
Eigen::Vector3i index = -Eigen::Vector3i::Ones();
Eigen::Vector3f vertex = Eigen::Vector3f::Zero();
Eigen::Vector3f vertex_color = Eigen::Vector3f::Zero();
return thrust::make_tuple(index, cube_index, i, vertex,
vertex_color);
}
}
};
__constant__ int vert_table[3] = {0, 2, 1};
struct extract_mesh_phase3_functor {
extract_mesh_phase3_functor(const int *cube_index,
const int *vert_no,
const int *key_index,
Eigen::Vector3i *triangles)
: cube_index_(cube_index),
vert_no_(vert_no),
key_index_(key_index),
triangles_(triangles){};
const int *cube_index_;
const int *vert_no_;
const int *key_index_;
Eigen::Vector3i *triangles_;
__device__ void operator()(size_t idx) {
const int kindx0 = key_index_[idx];
const int kindx1 = key_index_[idx + 1];
for (int j = kindx0; j < kindx1; ++j) {
const int cindx = cube_index_[j];
for (int i = 0; tri_table[cindx][i] != -1; ++i) {
const int tri_idx = tri_table[cindx][i];
for (int l = kindx0; l < kindx1; ++l) {
if (vert_no_[l] == tri_idx) {
triangles_[idx * 4 + i / 3][vert_table[i % 3]] = l;
}
}
}
}
}
};
struct extract_voxel_pointcloud_functor {
extract_voxel_pointcloud_functor(const Eigen::Vector3f &origin,
int resolution,
float voxel_length)
: origin_(origin),
resolution_(resolution),
voxel_length_(voxel_length),
half_voxel_length_(0.5 * voxel_length){};
const Eigen::Vector3f origin_;
const int resolution_;
const float voxel_length_;
const float half_voxel_length_;
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator()(
const thrust::tuple<size_t, geometry::TSDFVoxel> &kv) {
int idx = thrust::get<0>(kv);
int x, y, z;
thrust::tie(x, y, z) = KeyOf(idx, resolution_);
geometry::TSDFVoxel v = thrust::get<1>(kv);
Eigen::Vector3f pt(half_voxel_length_ + voxel_length_ * x,
half_voxel_length_ + voxel_length_ * y,
half_voxel_length_ + voxel_length_ * z);
if (v.weight_ != 0.0f && v.tsdf_ < 0.98f && v.tsdf_ >= -0.98f) {
float c = (v.tsdf_ + 1.0) * 0.5;
return thrust::make_tuple(pt + origin_, Eigen::Vector3f(c, c, c));
}
return thrust::make_tuple(
Eigen::Vector3f::Constant(std::numeric_limits<float>::quiet_NaN()),
Eigen::Vector3f::Constant(std::numeric_limits<float>::quiet_NaN()));
}
};
struct extract_voxel_grid_functor {
extract_voxel_grid_functor(int resolution) : resolution_(resolution){};
const int resolution_;
__device__ thrust::tuple<Eigen::Vector3i, geometry::Voxel> operator()(
const thrust::tuple<size_t, geometry::TSDFVoxel> &kv) {
int idx = thrust::get<0>(kv);
int x, y, z;
thrust::tie(x, y, z) = KeyOf(idx, resolution_);
Eigen::Vector3i grid_idx = Eigen::Vector3i(x, y, z);
geometry::TSDFVoxel v = thrust::get<1>(kv);
const float w = v.weight_;
const float f = v.tsdf_;
if (w != 0.0f && f < 0.98f && f >= -0.98f) {
float c = (f + 1.0) * 0.5;
return thrust::make_tuple(
grid_idx,
geometry::Voxel(grid_idx, Eigen::Vector3f(c, c, c)));
}
return thrust::make_tuple(
Eigen::Vector3i::Constant(geometry::INVALID_VOXEL_INDEX),
geometry::Voxel());
}
};
} // namespace
UniformTSDFVolume::UniformTSDFVolume(
float length,
int resolution,
float sdf_trunc,
TSDFVolumeColorType color_type,
const Eigen::Vector3f &origin /* = Eigen::Vector3f::Zero()*/)
: TSDFVolume(length / (float)resolution, sdf_trunc, color_type),
origin_(origin),
length_(length),
resolution_(resolution),
voxel_num_(resolution * resolution * resolution) {
voxels_.resize(voxel_num_);
}
UniformTSDFVolume::~UniformTSDFVolume() {}
UniformTSDFVolume::UniformTSDFVolume(const UniformTSDFVolume &other)
: TSDFVolume(other),
voxels_(other.voxels_),
origin_(other.origin_),
length_(other.length_),
resolution_(other.resolution_),
voxel_num_(other.voxel_num_) {}
void UniformTSDFVolume::Reset() { voxels_.clear(); }
void UniformTSDFVolume::Integrate(
const geometry::RGBDImage &image,
const camera::PinholeCameraIntrinsic &intrinsic,
const Eigen::Matrix4f &extrinsic) {
// This function goes through the voxels, and scan convert the relative
// depth/color value into the voxel.
// The following implementation is a highly optimized version.
if ((image.depth_.num_of_channels_ != 1) ||
(image.depth_.bytes_per_channel_ != 4) ||
(image.depth_.width_ != intrinsic.width_) ||
(image.depth_.height_ != intrinsic.height_) ||
(color_type_ == TSDFVolumeColorType::RGB8 &&
image.color_.num_of_channels_ != 3) ||
(color_type_ == TSDFVolumeColorType::RGB8 &&
image.color_.bytes_per_channel_ != 1) ||
(color_type_ == TSDFVolumeColorType::Gray32 &&
image.color_.num_of_channels_ != 1) ||
(color_type_ == TSDFVolumeColorType::Gray32 &&
image.color_.bytes_per_channel_ != 4) ||
(color_type_ != TSDFVolumeColorType::NoColor &&
image.color_.width_ != intrinsic.width_) ||
(color_type_ != TSDFVolumeColorType::NoColor &&
image.color_.height_ != intrinsic.height_)) {
utility::LogError(
"[UniformTSDFVolume::Integrate] Unsupported image format.");
}
auto depth2cameradistance =
geometry::Image::CreateDepthToCameraDistanceMultiplierFloatImage(
intrinsic);
IntegrateWithDepthToCameraDistanceMultiplier(image, intrinsic, extrinsic,
*depth2cameradistance);
}
std::shared_ptr<geometry::PointCloud> UniformTSDFVolume::ExtractPointCloud() {
auto pointcloud = std::make_shared<geometry::PointCloud>();
size_t n_valid_voxels =
thrust::count_if(voxels_.begin(), voxels_.end(),
[] __device__(const geometry::TSDFVoxel &v) {
return (v.weight_ != 0.0f && v.tsdf_ < 0.98f &&
v.tsdf_ >= -0.98f);
});
extract_pointcloud_functor func(thrust::raw_pointer_cast(voxels_.data()),
resolution_, voxel_length_, origin_,
color_type_);
pointcloud->points_.resize(n_valid_voxels);
pointcloud->normals_.resize(n_valid_voxels);
pointcloud->colors_.resize(n_valid_voxels);
size_t n_total =
(resolution_ - 2) * (resolution_ - 2) * (resolution_ - 2) * 3;
auto begin = make_tuple_begin(pointcloud->points_, pointcloud->normals_,
pointcloud->colors_);
auto end_p = thrust::copy_if(
thrust::make_transform_iterator(
thrust::make_counting_iterator<size_t>(0), func),
thrust::make_transform_iterator(
thrust::make_counting_iterator(n_total), func),
begin,
[] __device__(const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f,
Eigen::Vector3f> &x) {
const Eigen::Vector3f &pt = thrust::get<0>(x);
return !(isnan(pt(0)) || isnan(pt(1)) || isnan(pt(2)));
});
resize_all(thrust::distance(begin, end_p), pointcloud->points_,
pointcloud->normals_, pointcloud->colors_);
if (color_type_ == TSDFVolumeColorType::NoColor)
pointcloud->colors_.clear();
return pointcloud;
}
std::shared_ptr<geometry::TriangleMesh>
UniformTSDFVolume::ExtractTriangleMesh() {
// implementation of marching cubes, based on
// http://paulbourke.net/geometry/polygonise/
auto mesh = std::make_shared<geometry::TriangleMesh>();
size_t n_valid_voxels = thrust::count_if(
enumerate_begin(voxels_),
enumerate_end(voxels_),
count_valid_voxels_functor(thrust::raw_pointer_cast(voxels_.data()),
resolution_));
size_t res3 = (resolution_ - 1) * (resolution_ - 1) * (resolution_ - 1);
// compute cube indices for each voxels
utility::device_vector<Eigen::Vector3i> keys(n_valid_voxels);
utility::device_vector<int> cube_indices(n_valid_voxels);
extract_mesh_phase0_functor func0(thrust::raw_pointer_cast(voxels_.data()),
resolution_);
thrust::copy_if(
thrust::make_transform_iterator(
thrust::make_counting_iterator<size_t>(0), func0),
thrust::make_transform_iterator(
thrust::make_counting_iterator(res3), func0),
make_tuple_begin(keys, cube_indices),
[] __device__(const thrust::tuple<Eigen::Vector3i, int> &x) {
return thrust::get<1>(x) >= 0;
});
auto check_fn =
[] __device__(
const thrust::tuple<Eigen::Vector3i, int> &x) -> bool {
int cidx = thrust::get<1>(x);
return (cidx <= 0 || cidx >= 255);
};
size_t n_result1 = remove_if_vectors(check_fn, keys, cube_indices);
utility::device_vector<float> fs(n_result1 * 8);
utility::device_vector<Eigen::Vector3f> cs(n_result1 * 8);
extract_mesh_phase1_functor func1(thrust::raw_pointer_cast(voxels_.data()),
thrust::raw_pointer_cast(keys.data()),
resolution_, color_type_);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_result1 * 8),
make_tuple_begin(fs, cs), func1);
// compute vertices and vertex_colors
int *ci_p = thrust::raw_pointer_cast(cube_indices.data());
size_t n_valid_cubes =
thrust::count_if(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_result1 * 12),
[ci_p] __device__(size_t idx) {
int i = idx / 12;
int j = idx % 12;
return (edge_table[ci_p[i]] & (1 << j)) > 0;
});
resize_all(n_valid_cubes, mesh->vertices_, mesh->vertex_colors_);
utility::device_vector<Eigen::Vector3i> repeat_keys(n_valid_cubes);
utility::device_vector<int> repeat_cube_indices(n_valid_cubes);
utility::device_vector<int> vert_no(n_valid_cubes);
extract_mesh_phase2_functor func2(
thrust::raw_pointer_cast(keys.data()),
thrust::raw_pointer_cast(cube_indices.data()), origin_,
voxel_length_, resolution_, thrust::raw_pointer_cast(fs.data()),
thrust::raw_pointer_cast(cs.data()), color_type_);
thrust::copy_if(
thrust::make_transform_iterator(
thrust::make_counting_iterator<size_t>(0), func2),
thrust::make_transform_iterator(
thrust::make_counting_iterator(n_result1 * 12), func2),
make_tuple_begin(repeat_keys, repeat_cube_indices, vert_no,
mesh->vertices_, mesh->vertex_colors_),
[] __device__(
const thrust::tuple<Eigen::Vector3i, int, int,
Eigen::Vector3f, Eigen::Vector3f> &x) {
return thrust::get<0>(x)[0] >= 0;
});
// compute triangles
utility::device_vector<int> vt_offsets(n_valid_cubes + 1, 0);
auto end2 = thrust::reduce_by_key(repeat_keys.begin(), repeat_keys.end(),
thrust::make_constant_iterator<int>(1),
thrust::make_discard_iterator(),
vt_offsets.begin());
size_t n_result2 = thrust::distance(vt_offsets.begin(), end2.second);
vt_offsets.resize(n_result2 + 1);
thrust::exclusive_scan(vt_offsets.begin(), vt_offsets.end(),
vt_offsets.begin());
mesh->triangles_.resize(n_result2 * 4, Eigen::Vector3i(-1, -1, -1));
extract_mesh_phase3_functor func3(
thrust::raw_pointer_cast(repeat_cube_indices.data()),
thrust::raw_pointer_cast(vert_no.data()),
thrust::raw_pointer_cast(vt_offsets.data()),
thrust::raw_pointer_cast(mesh->triangles_.data()));
thrust::for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_result2), func3);
auto end3 = thrust::remove_if(
mesh->triangles_.begin(), mesh->triangles_.end(),
[] __device__(const Eigen::Vector3i &idxs) { return idxs[0] < 0; });
mesh->triangles_.resize(thrust::distance(mesh->triangles_.begin(), end3));
return mesh;
}
std::shared_ptr<geometry::PointCloud>
UniformTSDFVolume::ExtractVoxelPointCloud() const {
auto voxel = std::make_shared<geometry::PointCloud>();
// const float *p_tsdf = (const float *)tsdf_.data();
// const float *p_weight = (const float *)weight_.data();
// const float *p_color = (const float *)color_.data();
size_t n_valid_voxels =
thrust::count_if(voxels_.begin(), voxels_.end(),
[] __device__(const geometry::TSDFVoxel &v) {
return (v.weight_ != 0.0f && v.tsdf_ < 0.98f &&
v.tsdf_ >= -0.98f);
});
extract_voxel_pointcloud_functor func(origin_, resolution_, voxel_length_);
resize_all(n_valid_voxels, voxel->points_, voxel->colors_);
thrust::copy_if(
thrust::make_transform_iterator(enumerate_begin(voxels_), func),
thrust::make_transform_iterator(enumerate_end(voxels_), func),
make_tuple_begin(voxel->points_, voxel->colors_),
[] __device__(
const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> &x) {
const Eigen::Vector3f &pt = thrust::get<0>(x);
return !(isnan(pt(0)) || isnan(pt(1)) || isnan(pt(2)));
});
voxel->RemoveNoneFinitePoints(true, false);
return voxel;
}
std::shared_ptr<geometry::VoxelGrid> UniformTSDFVolume::ExtractVoxelGrid()
const {
auto voxel_grid = std::make_shared<geometry::VoxelGrid>();
voxel_grid->voxel_size_ = voxel_length_;
voxel_grid->origin_ = origin_;
size_t n_valid_voxels =
thrust::count_if(voxels_.begin(), voxels_.end(),
[] __device__(const geometry::TSDFVoxel &v) {
return (v.weight_ != 0.0f && v.tsdf_ < 0.98f &&
v.tsdf_ >= -0.98f);
});
resize_all(n_valid_voxels, voxel_grid->voxels_keys_,
voxel_grid->voxels_values_);
extract_voxel_grid_functor func(resolution_);
thrust::copy_if(
thrust::make_transform_iterator(enumerate_begin(voxels_), func),
thrust::make_transform_iterator(enumerate_end(voxels_), func),
make_tuple_begin(voxel_grid->voxels_keys_,
voxel_grid->voxels_values_),
[] __device__(
const thrust::tuple<Eigen::Vector3i, geometry::Voxel> &x) {
return thrust::get<0>(x) !=
Eigen::Vector3i(geometry::INVALID_VOXEL_INDEX,
geometry::INVALID_VOXEL_INDEX,
geometry::INVALID_VOXEL_INDEX);
});
return voxel_grid;
}
void UniformTSDFVolume::IntegrateWithDepthToCameraDistanceMultiplier(
const geometry::RGBDImage &image,
const camera::PinholeCameraIntrinsic &intrinsic,
const Eigen::Matrix4f &extrinsic,
const geometry::Image &depth_to_camera_distance_multiplier) {
const float fx = intrinsic.GetFocalLength().first;
const float fy = intrinsic.GetFocalLength().second;
const float cx = intrinsic.GetPrincipalPoint().first;
const float cy = intrinsic.GetPrincipalPoint().second;
const float safe_width = intrinsic.width_ - 0.0001f;
const float safe_height = intrinsic.height_ - 0.0001f;
voxels_.resize(voxel_num_);
uniform_integrate_functor func(
fx, fy, cx, cy, extrinsic, voxel_length_, sdf_trunc_,
safe_width, safe_height, resolution_,
thrust::raw_pointer_cast(image.color_.data_.data()),
thrust::raw_pointer_cast(image.depth_.data_.data()),
thrust::raw_pointer_cast(
depth_to_camera_distance_multiplier.data_.data()),
image.depth_.width_, image.color_.num_of_channels_, color_type_,
origin_, thrust::raw_pointer_cast(voxels_.data()));
thrust::for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(
resolution_ * resolution_ * resolution_),
func);
} |
9efb8a8a4efa09a22d39e7a8a5797c93b28e65a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <math.h>
#include <limits>
#include <string>
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/op_registry.h"
#if defined(PADDLE_WITH_CUDA)
#include "paddle/fluid/platform/dynload/hipsparse.h"
#endif
namespace ops = paddle::operators;
namespace plf = paddle::platform;
namespace paddle {
namespace operators {
template <typename T>
__forceinline__ __device__ T CudaShuffleXorSync(unsigned mask, T val,
int width = warpSize) {
return __shfl_xor_sync(mask, val, width);
}
template <typename T, int batch_size, int warp_size>
__device__ __forceinline__ void WarpReduceSum(T* sum) {
#pragma unroll
for (int offset = warp_size / 2; offset > 0; offset /= 2) {
#pragma unroll
for (int i = 0; i < batch_size; ++i) {
T sum_val = CudaShuffleXorSync(0xFFFFFFFF, sum[i], offset);
sum[i] = sum[i] + sum_val;
}
}
}
template <typename T, int batch_size, int warp_size>
__device__ __forceinline__ void WarpReduceMax(T* sum) {
#pragma unroll
for (int offset = warp_size / 2; offset > 0; offset /= 2) {
#pragma unroll
for (int i = 0; i < batch_size; ++i) {
T max_val = CudaShuffleXorSync(0xFFFFFFFF, sum[i], offset);
sum[i] = max(sum[i], max_val);
}
}
}
template <typename T, int BlockSize, int BlockNnzMax>
__global__ void BlockSparseSoftmaxForward(T* softmax, const T* src, T scale,
const T* kp_mask, const T* attn_mask,
const int* layout_rowptr,
const int* layout_colindex,
int num_rows) {
// current thread related info
const int WarpSize = 32;
const int cur_row = blockIdx.x * blockDim.y + threadIdx.y;
if (cur_row < num_rows) {
const int cur_block_row = cur_row / BlockSize;
const int cur_block_nnz =
layout_rowptr[cur_block_row + 1] - layout_rowptr[cur_block_row];
T srcdata[(BlockSize * BlockNnzMax + WarpSize - 1) / WarpSize] = {0};
T attndata[(BlockSize * BlockNnzMax + WarpSize - 1) / WarpSize] = {0};
// read tensor data, attn mask
const int iter = (cur_block_nnz + WarpSize - 1) / WarpSize;
const T* srcptr = src + layout_rowptr[cur_block_row];
const T* attnptr = (attn_mask == nullptr)
? nullptr
: (attn_mask + cur_block_row * num_rows);
// the coloumn start index in current row
const int* colindex = layout_colindex + layout_rowptr[cur_block_row];
for (int j = 0; j < iter; j++) {
int cur_block_col = j * WarpSize + threadIdx.x;
int cur_reg_index = j;
if (cur_block_col < cur_block_nnz) {
// read kp mask
T cur_kp_mask;
if ((kp_mask != nullptr) &&
std::abs(kp_mask[colindex[cur_block_col]]) <
std::numeric_limits<T>::epsilon()) {
cur_kp_mask = -std::numeric_limits<T>::infinity();
} else {
cur_kp_mask = 0;
}
// do mask operation
if ((attnptr != nullptr) &&
std::abs(attnptr[colindex[cur_block_col]]) <
std::numeric_limits<T>::epsilon()) {
srcdata[cur_reg_index] =
-std::numeric_limits<T>::infinity() * scale + cur_kp_mask;
} else {
srcdata[cur_reg_index] = scale * srcptr[cur_block_col] + cur_kp_mask;
}
} else {
srcdata[cur_reg_index] = -std::numeric_limits<T>::infinity();
}
}
// max value
T max_value = srcdata[0];
const int kIteration =
(cur_block_nnz * BlockSize + WarpSize - 1) / WarpSize;
#pragma unroll
for (int it = 1; it < kIteration; ++it) {
max_value = (max_value > srcdata[it]) ? max_value : srcdata[it];
}
WarpReduceMax<T, 1, WarpSize>(&max_value);
// exp sum
T sum = 0;
#pragma unroll
for (int it = 0; it < kIteration; ++it) {
srcdata[it] = ::exp(srcdata[it] - max_value);
sum += srcdata[it];
}
WarpReduceSum<T, 1, WarpSize>(&sum);
// compute softmax and write out
T* softmaxptr = softmax + layout_rowptr[cur_block_row];
for (int j = 0; j < iter; j++) {
int cur_block_col = j * WarpSize + threadIdx.x;
int cur_reg_index = j;
if (cur_block_col < cur_block_nnz) {
softmaxptr[cur_block_col] = srcdata[cur_reg_index] / sum;
}
}
}
}
template <typename T, int BlockSize, int BlockNnzMax>
__global__ void BlockSparseSoftmaxBackward(T* dst, const T* grad, const T* src,
T scale, const int* layout_rowptr,
const int* layout_colindex,
int num_rows) {
// current thread related info
const int WarpSize = 32;
const int cur_row = blockIdx.x * blockDim.y + threadIdx.y;
if (cur_row < num_rows) {
const int cur_block_row = cur_row / BlockSize;
const int cur_block_nnz =
layout_rowptr[cur_block_row + 1] - layout_rowptr[cur_block_row];
T srcdata[(BlockSize * BlockNnzMax + WarpSize - 1) / WarpSize];
T graddata[(BlockSize * BlockNnzMax + WarpSize - 1) / WarpSize];
// read tensor data, attn mask
const int iter = (cur_block_nnz + WarpSize - 1) / WarpSize;
const T* srcptr = src + layout_rowptr[cur_block_row];
const T* gradptr = grad + layout_rowptr[cur_block_row];
for (int j = 0; j < iter; j++) {
int cur_block_col = j * WarpSize + threadIdx.x;
int cur_reg_index = j;
if (cur_block_col < cur_block_nnz) {
srcdata[cur_reg_index] = srcptr[cur_block_col];
graddata[cur_reg_index] = gradptr[cur_block_col];
} else {
srcdata[cur_reg_index] = 0;
graddata[cur_reg_index] = 0;
}
}
T sum = 0;
const int kIteration =
(cur_block_nnz * BlockSize + WarpSize - 1) / WarpSize;
#pragma unroll
for (int it = 0; it < kIteration; ++it) {
sum += srcdata[it] * graddata[it];
}
WarpReduceSum<T, 1, WarpSize>(&sum);
// compute softmax and write out
T* dstptr = dst + layout_rowptr[cur_block_row];
for (int j = 0; j < iter; j++) {
int cur_block_col = j * WarpSize + threadIdx.x;
int cur_reg_index = j;
if (cur_block_col < cur_block_nnz) {
dstptr[cur_block_col] =
scale * srcdata[cur_reg_index] * (graddata[cur_reg_index] - sum);
}
}
}
}
using Tensor = framework::Tensor;
/*
input: sparse C in CSR format (num_rows,num_rows)
output: sparse C after softmax operation
*/
template <typename DeviceContext, typename T>
void SparseSoftmaxForward(const platform::CUDADeviceContext& ctx,
const Tensor* offset, const Tensor* columns,
Tensor* input, Tensor* output, const int blocksize,
const int num_rows, const int num_cols,
const Tensor* key_padding_mask,
const Tensor* attn_mask) {
const int* offset_data = offset->data<int>();
const int* columns_data = columns->data<int>();
T* input_data = input->data<T>();
T* output_data = output->data<T>();
// Add mask
const T* key_padding_mask_data =
(key_padding_mask != nullptr) ? key_padding_mask->data<T>() : nullptr;
const T* attn_mask_data =
(attn_mask != nullptr) ? attn_mask->data<T>() : nullptr;
const int block_size = 1;
dim3 blocks(32, 4, 1);
int grid = (num_rows * block_size + 3) / 4;
T scaling = static_cast<T>(1.0) / sqrt(static_cast<T>(num_cols));
if (num_cols <= 4) {
hipLaunchKernelGGL(( BlockSparseSoftmaxForward<T, block_size, 4>), dim3(grid), dim3(blocks), 0, 0,
output_data, input_data, scaling, key_padding_mask_data, attn_mask_data,
offset_data, columns_data, num_rows);
} else if (num_cols > 4 && num_cols <= 8) {
hipLaunchKernelGGL(( BlockSparseSoftmaxForward<T, block_size, 8>), dim3(grid), dim3(blocks), 0, 0,
output_data, input_data, scaling, key_padding_mask_data, attn_mask_data,
offset_data, columns_data, num_rows);
} else if (num_cols > 8 && num_cols <= 16) {
hipLaunchKernelGGL(( BlockSparseSoftmaxForward<T, block_size, 16>), dim3(grid), dim3(blocks), 0, 0,
output_data, input_data, scaling, key_padding_mask_data, attn_mask_data,
offset_data, columns_data, num_rows);
} else if (num_cols > 16 && num_cols <= 32) {
hipLaunchKernelGGL(( BlockSparseSoftmaxForward<T, block_size, 32>), dim3(grid), dim3(blocks), 0, 0,
output_data, input_data, scaling, key_padding_mask_data, attn_mask_data,
offset_data, columns_data, num_rows);
} else if (num_cols > 32 && num_cols <= 64) {
hipLaunchKernelGGL(( BlockSparseSoftmaxForward<T, block_size, 64>), dim3(grid), dim3(blocks), 0, 0,
output_data, input_data, scaling, key_padding_mask_data, attn_mask_data,
offset_data, columns_data, num_rows);
} else if (num_cols > 64 && num_cols <= 128) {
hipLaunchKernelGGL(( BlockSparseSoftmaxForward<T, block_size, 128>), dim3(grid), dim3(blocks), 0, 0,
output_data, input_data, scaling, key_padding_mask_data, attn_mask_data,
offset_data, columns_data, num_rows);
} else if (num_cols > 128 && num_cols <= 256) {
hipLaunchKernelGGL(( BlockSparseSoftmaxForward<T, block_size, 256>), dim3(grid), dim3(blocks), 0, 0,
output_data, input_data, scaling, key_padding_mask_data, attn_mask_data,
offset_data, columns_data, num_rows);
} else if (num_cols > 256 && num_cols <= 512) {
hipLaunchKernelGGL(( BlockSparseSoftmaxForward<T, block_size, 512>), dim3(grid), dim3(blocks), 0, 0,
output_data, input_data, scaling, key_padding_mask_data, attn_mask_data,
offset_data, columns_data, num_rows);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The head_dim of query in sparse_attention op should less or equal "
"512"));
}
}
template <typename DeviceContext, typename T>
void SparseSoftmaxBackward(const platform::CUDADeviceContext& ctx,
const Tensor* offset, const Tensor* columns,
Tensor* dx, const Tensor* dout, const Tensor* out,
const int blocksize, const int num_rows,
const int num_cols) {
const int* offset_data = offset->data<int>();
const int* columns_data = columns->data<int>();
T* dx_data = dx->data<T>();
const T* dout_data = dout->data<T>();
const T* out_data = out->data<T>();
const int block_size = 1;
dim3 blocks(32, 4, 1);
int grid = (num_rows * block_size + 3) / 4;
T scaling = static_cast<T>(1.0) / sqrt(static_cast<T>(num_cols));
if (num_cols <= 4) {
hipLaunchKernelGGL(( BlockSparseSoftmaxBackward<T, block_size, 4>), dim3(grid), dim3(blocks), 0, 0,
dx_data, dout_data, out_data, scaling, offset_data, columns_data,
num_rows);
} else if (num_cols > 4 && num_cols <= 8) {
hipLaunchKernelGGL(( BlockSparseSoftmaxBackward<T, block_size, 8>), dim3(grid), dim3(blocks), 0, 0,
dx_data, dout_data, out_data, scaling, offset_data, columns_data,
num_rows);
} else if (num_cols > 8 && num_cols <= 16) {
hipLaunchKernelGGL(( BlockSparseSoftmaxBackward<T, block_size, 16>), dim3(grid), dim3(blocks), 0, 0,
dx_data, dout_data, out_data, scaling, offset_data, columns_data,
num_rows);
} else if (num_cols > 16 && num_cols <= 32) {
hipLaunchKernelGGL(( BlockSparseSoftmaxBackward<T, block_size, 32>), dim3(grid), dim3(blocks), 0, 0,
dx_data, dout_data, out_data, scaling, offset_data, columns_data,
num_rows);
} else if (num_cols > 32 && num_cols <= 64) {
hipLaunchKernelGGL(( BlockSparseSoftmaxBackward<T, block_size, 64>), dim3(grid), dim3(blocks), 0, 0,
dx_data, dout_data, out_data, scaling, offset_data, columns_data,
num_rows);
} else if (num_cols > 64 && num_cols <= 128) {
hipLaunchKernelGGL(( BlockSparseSoftmaxBackward<T, block_size, 128>), dim3(grid), dim3(blocks), 0, 0,
dx_data, dout_data, out_data, scaling, offset_data, columns_data,
num_rows);
} else if (num_cols > 128 && num_cols <= 256) {
hipLaunchKernelGGL(( BlockSparseSoftmaxBackward<T, block_size, 256>), dim3(grid), dim3(blocks), 0, 0,
dx_data, dout_data, out_data, scaling, offset_data, columns_data,
num_rows);
} else if (num_cols > 256 && num_cols <= 512) {
hipLaunchKernelGGL(( BlockSparseSoftmaxBackward<T, block_size, 512>), dim3(grid), dim3(blocks), 0, 0,
dx_data, dout_data, out_data, scaling, offset_data, columns_data,
num_rows);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The head_dim of query in sparse_attention op should less or equal "
"512"));
}
}
using VarType = framework::proto::VarType;
inline hipDataType GetGpuType(const VarType::Type data_type) {
if (data_type == VarType::FP32) {
return HIP_R_32F;
} else if (data_type == VarType::FP64) {
return HIP_R_64F;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Not support tensor type in sparse_attention OP: %s",
framework::DataTypeToString(data_type)));
}
}
inline hipsparseOperation_t GetTransposeOperation(const bool transpose) {
if (transpose) {
return HIPSPARSE_OPERATION_TRANSPOSE;
} else {
return HIPSPARSE_OPERATION_NON_TRANSPOSE;
}
}
void CusparseDestroy(hipsparseDnMatDescr_t* dn_mat_first,
hipsparseDnMatDescr_t* dn_mat_second,
hipsparseSpMatDescr_t* sp_mat) {
platform::dynload::hipsparseDestroyDnMat(*dn_mat_first);
platform::dynload::hipsparseDestroyDnMat(*dn_mat_second);
platform::dynload::hipsparseDestroySpMat(*sp_mat);
}
/*
input: dense A (num_rows,num_cols), dense B (num_rows,num_cols)
output: sparse C in CSR format (num_rows,num_rows)
*/
template <typename DeviceContext, typename T>
void DotSdd(const platform::CUDADeviceContext& ctx, const Tensor* a,
const Tensor* b, const Tensor* c_offset, const Tensor* c_columns,
Tensor* c_value, const int num_rows, const int num_cols,
const bool a_transpose, const bool b_transpose) {
const T* a_data = a->data<T>();
const T* b_data = b->data<T>();
const int* c_offset_data = c_offset->data<int>();
const int* c_columns_data = c_columns->data<int>();
T* c_value_data = c_value->data<T>();
hipDataType gpu_type =
GetGpuType(framework::TransToProtoVarType(c_value->dtype()));
hipsparseHandle_t handle = nullptr;
hipsparseDnMatDescr_t mat_a, mat_b;
hipsparseSpMatDescr_t mat_c;
platform::dynload::hipsparseCreate(&handle);
// Create dense matrix A
platform::dynload::hipsparseCreateDnMat(&mat_a, num_rows, num_cols, num_cols,
const_cast<T*>(a_data), gpu_type,
HIPSPARSE_ORDER_ROW);
// Create dense matrix B
platform::dynload::hipsparseCreateDnMat(&mat_b, num_rows, num_cols, num_cols,
const_cast<T*>(b_data), gpu_type,
HIPSPARSE_ORDER_ROW);
// Create sparse matrix C in CSR format
int c_nnz = c_columns->dims()[1];
platform::dynload::hipsparseCreateCsr(
&mat_c, num_rows, num_rows, c_nnz, const_cast<int*>(c_offset_data),
const_cast<int*>(c_columns_data), c_value_data, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, gpu_type);
T alpha = 1;
T beta = 0;
size_t buffer_size = 0;
platform::dynload::hipsparseSDDMM_bufferSize(
handle, GetTransposeOperation(a_transpose),
GetTransposeOperation(b_transpose), &alpha, mat_a, mat_b, &beta, mat_c,
gpu_type, HIPSPARSE_SDDMM_ALG_DEFAULT, &buffer_size);
auto d_buffer_ptr = paddle::memory::Alloc(ctx, buffer_size);
void* d_buffer = static_cast<void*>(d_buffer_ptr->ptr());
platform::dynload::hipsparseSDDMM(handle, GetTransposeOperation(a_transpose),
GetTransposeOperation(b_transpose), &alpha,
mat_a, mat_b, &beta, mat_c, gpu_type,
HIPSPARSE_SDDMM_ALG_DEFAULT, d_buffer);
CusparseDestroy(&mat_a, &mat_b, &mat_c);
platform::dynload::hipsparseDestroy(handle);
}
/*
input: sparse A in CSR format (num_rows,num_rows), dense B (num_rows,num_cols)
output: dense C (num_rows,num_cols)
*/
template <typename DeviceContext, typename T>
void DotDsd(const platform::CUDADeviceContext& ctx, const Tensor* a_offset,
const Tensor* a_columns, const Tensor* a_value, const Tensor* b,
Tensor* c, const int num_rows, const int num_cols,
const bool a_transpose, const bool b_transpose) {
const int* a_offset_data = a_offset->data<int>();
const int* a_columns_data = a_columns->data<int>();
const T* a_value_data = a_value->data<T>();
const T* b_data = b->data<T>();
T* c_data = c->data<T>();
hipDataType gpu_type =
GetGpuType(framework::TransToProtoVarType(c->dtype()));
hipsparseHandle_t handle = nullptr;
hipsparseSpMatDescr_t mat_a;
hipsparseDnMatDescr_t mat_b, mat_c;
platform::dynload::hipsparseCreate(&handle);
// Create sparse matrix A in CSR format
int a_nnz = a_columns->dims()[1];
platform::dynload::hipsparseCreateCsr(
&mat_a, num_rows, num_rows, a_nnz, const_cast<int*>(a_offset_data),
const_cast<int*>(a_columns_data), const_cast<T*>(a_value_data),
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO,
gpu_type);
// Create dense matrix B
platform::dynload::hipsparseCreateDnMat(&mat_b, num_rows, num_cols, num_cols,
const_cast<T*>(b_data), gpu_type,
HIPSPARSE_ORDER_ROW);
// Create dense matrix C
platform::dynload::hipsparseCreateDnMat(&mat_c, num_rows, num_cols, num_cols,
c_data, gpu_type, HIPSPARSE_ORDER_ROW);
T alpha = 1;
T beta = 0;
size_t buffer_size = 0;
// allocate an external buffer if needed
platform::dynload::hipsparseSpMM_bufferSize(
handle, GetTransposeOperation(a_transpose),
GetTransposeOperation(b_transpose), &alpha, mat_a, mat_b, &beta, mat_c,
gpu_type, CUSPARSE_SPMM_ALG_DEFAULT, &buffer_size);
auto d_buffer_ptr = paddle::memory::Alloc(ctx, buffer_size);
void* d_buffer = static_cast<void*>(d_buffer_ptr->ptr());
platform::dynload::hipsparseSpMM(handle, GetTransposeOperation(a_transpose),
GetTransposeOperation(b_transpose), &alpha,
mat_a, mat_b, &beta, mat_c, gpu_type,
CUSPARSE_SPMM_ALG_DEFAULT, d_buffer);
CusparseDestroy(&mat_b, &mat_c, &mat_a);
platform::dynload::hipsparseDestroy(handle);
}
std::vector<Tensor> GetSplitTensor(Tensor* input) {
auto dims = input->dims();
int batch_size = dims[0];
int num_heads = dims[1];
std::vector<int> new_dims(dims.size() - 1);
new_dims[0] = batch_size * num_heads;
for (int i = 1; i < new_dims.size(); i++) {
new_dims[i] = dims[i + 1];
}
input->Resize(phi::make_ddim(new_dims));
return input->Split(1, 0);
}
template <typename DeviceContext, typename T>
class SparseAttentionCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto query = *ctx.Input<Tensor>("Q");
auto key = *ctx.Input<Tensor>("K");
auto value = *ctx.Input<Tensor>("V");
auto offset = *ctx.Input<Tensor>("Offset");
auto columns = *ctx.Input<Tensor>("Columns");
auto output_ptr = ctx.Output<Tensor>("Out");
output_ptr->mutable_data<T>(ctx.GetPlace());
auto sparse_dot_sdd_ptr = ctx.Output<Tensor>("SparseDotSdd");
sparse_dot_sdd_ptr->mutable_data<T>(ctx.GetPlace());
auto softmax_ptr = ctx.Output<Tensor>("Softmax");
softmax_ptr->mutable_data<T>(ctx.GetPlace());
// add Mask
auto* key_padding_mask = ctx.HasInput("KeyPaddingMask")
? ctx.Input<Tensor>("KeyPaddingMask")
: nullptr;
auto* attn_mask =
ctx.HasInput("AttnMask") ? ctx.Input<Tensor>("AttnMask") : nullptr;
auto output = *output_ptr;
auto result_sdd = *sparse_dot_sdd_ptr;
auto result_softmax = *softmax_ptr;
auto query_dims = query.dims();
int batch_size = query_dims[0];
int num_heads = query_dims[1];
int M = query_dims[2];
int N = query_dims[3];
std::vector<Tensor> query_lists = GetSplitTensor(&query);
std::vector<Tensor> key_lists = GetSplitTensor(&key);
std::vector<Tensor> value_lists = GetSplitTensor(&value);
std::vector<Tensor> offset_lists = GetSplitTensor(&offset);
std::vector<Tensor> columns_lists = GetSplitTensor(&columns);
std::vector<Tensor> result_sdd_lists = GetSplitTensor(&result_sdd);
std::vector<Tensor> result_softmax_lists = GetSplitTensor(&result_softmax);
std::vector<Tensor> output_lists = GetSplitTensor(&output);
const auto& dev_ctx = ctx.cuda_device_context();
const int iter_num = batch_size * num_heads;
for (int i = 0; i < iter_num; i++) {
DotSdd<DeviceContext, T>(dev_ctx, &query_lists[i], &key_lists[i],
&offset_lists[i], &columns_lists[i],
&result_sdd_lists[i], M, N, false, true);
if (key_padding_mask != nullptr && attn_mask != nullptr) {
SparseSoftmaxForward<DeviceContext, T>(
dev_ctx, &offset_lists[i], &columns_lists[i], &result_sdd_lists[i],
&result_softmax_lists[i], 1, M, N,
key_padding_mask + (i / num_heads) * M, attn_mask);
} else if (key_padding_mask != nullptr && attn_mask == nullptr) {
SparseSoftmaxForward<DeviceContext, T>(
dev_ctx, &offset_lists[i], &columns_lists[i], &result_sdd_lists[i],
&result_softmax_lists[i], 1, M, N,
key_padding_mask + (i / num_heads) * M, nullptr);
} else if (key_padding_mask == nullptr && attn_mask != nullptr) {
SparseSoftmaxForward<DeviceContext, T>(
dev_ctx, &offset_lists[i], &columns_lists[i], &result_sdd_lists[i],
&result_softmax_lists[i], 1, M, N, nullptr, attn_mask);
} else {
SparseSoftmaxForward<DeviceContext, T>(
dev_ctx, &offset_lists[i], &columns_lists[i], &result_sdd_lists[i],
&result_softmax_lists[i], 1, M, N, nullptr, nullptr);
}
DotDsd<DeviceContext, T>(dev_ctx, &offset_lists[i], &columns_lists[i],
&result_softmax_lists[i], &value_lists[i],
&output_lists[i], M, N, false, false);
}
}
};
template <typename DeviceContext, typename T>
class SparseAttentionGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto query = *ctx.Input<Tensor>("Q");
auto key = *ctx.Input<Tensor>("K");
auto value = *ctx.Input<Tensor>("V");
auto offset = *ctx.Input<Tensor>("Offset");
auto columns = *ctx.Input<Tensor>("Columns");
auto sparse_dot_sdd = *ctx.Input<Tensor>("SparseDotSdd");
auto softmax = *ctx.Input<Tensor>("Softmax");
auto dout = *ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* dquery_ptr = ctx.Output<Tensor>(framework::GradVarName("Q"));
auto* dkey_ptr = ctx.Output<Tensor>(framework::GradVarName("K"));
auto* dvalue_ptr = ctx.Output<Tensor>(framework::GradVarName("V"));
dquery_ptr->mutable_data<T>(ctx.GetPlace());
dkey_ptr->mutable_data<T>(ctx.GetPlace());
dvalue_ptr->mutable_data<T>(ctx.GetPlace());
auto dquery = *dquery_ptr;
auto dkey = *dkey_ptr;
auto dvalue = *dvalue_ptr;
auto query_dims = query.dims();
int batch_size = query_dims[0];
int num_heads = query_dims[1];
int M = query_dims[2];
int N = query_dims[3];
std::vector<Tensor> query_lists = GetSplitTensor(&query);
std::vector<Tensor> key_lists = GetSplitTensor(&key);
std::vector<Tensor> value_lists = GetSplitTensor(&value);
std::vector<Tensor> offset_lists = GetSplitTensor(&offset);
std::vector<Tensor> columns_lists = GetSplitTensor(&columns);
std::vector<Tensor> sparse_dot_sdd_lists = GetSplitTensor(&sparse_dot_sdd);
std::vector<Tensor> softmax_lists = GetSplitTensor(&softmax);
std::vector<Tensor> dout_lists = GetSplitTensor(&dout);
std::vector<Tensor> dquery_lists = GetSplitTensor(&dquery);
std::vector<Tensor> dkey_lists = GetSplitTensor(&dkey);
std::vector<Tensor> dvalue_lists = GetSplitTensor(&dvalue);
const int iter_num = batch_size * num_heads;
const auto& dev_ctx = ctx.cuda_device_context();
for (int i = 0; i < iter_num; i++) {
// dValue = transpose(result_softmax) * dOut
DotDsd<DeviceContext, T>(dev_ctx, &offset_lists[i], &columns_lists[i],
&softmax_lists[i], &dout_lists[i],
&dvalue_lists[i], M, N, true, false);
// dSoftmax = dOut * transpose(Value)
int nnz_num = columns.dims()[0];
Tensor dsoftmax;
dsoftmax.Resize({nnz_num});
dsoftmax.mutable_data<T>(ctx.GetPlace());
DotSdd<DeviceContext, T>(dev_ctx, &dout_lists[i], &value_lists[i],
&offset_lists[i], &columns_lists[i], &dsoftmax,
M, N, false, true);
// dSparseDotSdd = dSoftmax * softmax'(SparseDotSdd)
Tensor dsparse_dot_sdd;
dsparse_dot_sdd.Resize({nnz_num});
dsparse_dot_sdd.mutable_data<T>(ctx.GetPlace());
SparseSoftmaxBackward<DeviceContext, T>(
dev_ctx, &offset_lists[i], &columns_lists[i], &dsparse_dot_sdd,
&dsoftmax, &softmax_lists[i], 1, M, N);
// dQuery = dSparseDotSdd * Key
DotDsd<DeviceContext, T>(dev_ctx, &offset_lists[i], &columns_lists[i],
&dsparse_dot_sdd, &key_lists[i],
&dquery_lists[i], M, N, false, false);
// dKey = transpose(dSparseDotSdd) * Query
DotDsd<DeviceContext, T>(dev_ctx, &offset_lists[i], &columns_lists[i],
&dsparse_dot_sdd, &query_lists[i],
&dkey_lists[i], M, N, true, false);
}
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
sparse_attention,
ops::SparseAttentionCUDAKernel<plf::CUDADeviceContext, float>,
ops::SparseAttentionCUDAKernel<plf::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
sparse_attention_grad,
ops::SparseAttentionGradCUDAKernel<plf::CUDADeviceContext, float>,
ops::SparseAttentionGradCUDAKernel<plf::CUDADeviceContext, double>);
| 9efb8a8a4efa09a22d39e7a8a5797c93b28e65a1.cu | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <math.h>
#include <limits>
#include <string>
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/op_registry.h"
#if defined(PADDLE_WITH_CUDA)
#include "paddle/fluid/platform/dynload/cusparse.h"
#endif
namespace ops = paddle::operators;
namespace plf = paddle::platform;
namespace paddle {
namespace operators {
template <typename T>
__forceinline__ __device__ T CudaShuffleXorSync(unsigned mask, T val,
int width = warpSize) {
return __shfl_xor_sync(mask, val, width);
}
template <typename T, int batch_size, int warp_size>
__device__ __forceinline__ void WarpReduceSum(T* sum) {
#pragma unroll
for (int offset = warp_size / 2; offset > 0; offset /= 2) {
#pragma unroll
for (int i = 0; i < batch_size; ++i) {
T sum_val = CudaShuffleXorSync(0xFFFFFFFF, sum[i], offset);
sum[i] = sum[i] + sum_val;
}
}
}
template <typename T, int batch_size, int warp_size>
__device__ __forceinline__ void WarpReduceMax(T* sum) {
#pragma unroll
for (int offset = warp_size / 2; offset > 0; offset /= 2) {
#pragma unroll
for (int i = 0; i < batch_size; ++i) {
T max_val = CudaShuffleXorSync(0xFFFFFFFF, sum[i], offset);
sum[i] = max(sum[i], max_val);
}
}
}
template <typename T, int BlockSize, int BlockNnzMax>
__global__ void BlockSparseSoftmaxForward(T* softmax, const T* src, T scale,
const T* kp_mask, const T* attn_mask,
const int* layout_rowptr,
const int* layout_colindex,
int num_rows) {
// current thread related info
const int WarpSize = 32;
const int cur_row = blockIdx.x * blockDim.y + threadIdx.y;
if (cur_row < num_rows) {
const int cur_block_row = cur_row / BlockSize;
const int cur_block_nnz =
layout_rowptr[cur_block_row + 1] - layout_rowptr[cur_block_row];
T srcdata[(BlockSize * BlockNnzMax + WarpSize - 1) / WarpSize] = {0};
T attndata[(BlockSize * BlockNnzMax + WarpSize - 1) / WarpSize] = {0};
// read tensor data, attn mask
const int iter = (cur_block_nnz + WarpSize - 1) / WarpSize;
const T* srcptr = src + layout_rowptr[cur_block_row];
const T* attnptr = (attn_mask == nullptr)
? nullptr
: (attn_mask + cur_block_row * num_rows);
// the coloumn start index in current row
const int* colindex = layout_colindex + layout_rowptr[cur_block_row];
for (int j = 0; j < iter; j++) {
int cur_block_col = j * WarpSize + threadIdx.x;
int cur_reg_index = j;
if (cur_block_col < cur_block_nnz) {
// read kp mask
T cur_kp_mask;
if ((kp_mask != nullptr) &&
std::abs(kp_mask[colindex[cur_block_col]]) <
std::numeric_limits<T>::epsilon()) {
cur_kp_mask = -std::numeric_limits<T>::infinity();
} else {
cur_kp_mask = 0;
}
// do mask operation
if ((attnptr != nullptr) &&
std::abs(attnptr[colindex[cur_block_col]]) <
std::numeric_limits<T>::epsilon()) {
srcdata[cur_reg_index] =
-std::numeric_limits<T>::infinity() * scale + cur_kp_mask;
} else {
srcdata[cur_reg_index] = scale * srcptr[cur_block_col] + cur_kp_mask;
}
} else {
srcdata[cur_reg_index] = -std::numeric_limits<T>::infinity();
}
}
// max value
T max_value = srcdata[0];
const int kIteration =
(cur_block_nnz * BlockSize + WarpSize - 1) / WarpSize;
#pragma unroll
for (int it = 1; it < kIteration; ++it) {
max_value = (max_value > srcdata[it]) ? max_value : srcdata[it];
}
WarpReduceMax<T, 1, WarpSize>(&max_value);
// exp sum
T sum = 0;
#pragma unroll
for (int it = 0; it < kIteration; ++it) {
srcdata[it] = std::exp(srcdata[it] - max_value);
sum += srcdata[it];
}
WarpReduceSum<T, 1, WarpSize>(&sum);
// compute softmax and write out
T* softmaxptr = softmax + layout_rowptr[cur_block_row];
for (int j = 0; j < iter; j++) {
int cur_block_col = j * WarpSize + threadIdx.x;
int cur_reg_index = j;
if (cur_block_col < cur_block_nnz) {
softmaxptr[cur_block_col] = srcdata[cur_reg_index] / sum;
}
}
}
}
template <typename T, int BlockSize, int BlockNnzMax>
__global__ void BlockSparseSoftmaxBackward(T* dst, const T* grad, const T* src,
T scale, const int* layout_rowptr,
const int* layout_colindex,
int num_rows) {
// current thread related info
const int WarpSize = 32;
const int cur_row = blockIdx.x * blockDim.y + threadIdx.y;
if (cur_row < num_rows) {
const int cur_block_row = cur_row / BlockSize;
const int cur_block_nnz =
layout_rowptr[cur_block_row + 1] - layout_rowptr[cur_block_row];
T srcdata[(BlockSize * BlockNnzMax + WarpSize - 1) / WarpSize];
T graddata[(BlockSize * BlockNnzMax + WarpSize - 1) / WarpSize];
// read tensor data, attn mask
const int iter = (cur_block_nnz + WarpSize - 1) / WarpSize;
const T* srcptr = src + layout_rowptr[cur_block_row];
const T* gradptr = grad + layout_rowptr[cur_block_row];
for (int j = 0; j < iter; j++) {
int cur_block_col = j * WarpSize + threadIdx.x;
int cur_reg_index = j;
if (cur_block_col < cur_block_nnz) {
srcdata[cur_reg_index] = srcptr[cur_block_col];
graddata[cur_reg_index] = gradptr[cur_block_col];
} else {
srcdata[cur_reg_index] = 0;
graddata[cur_reg_index] = 0;
}
}
T sum = 0;
const int kIteration =
(cur_block_nnz * BlockSize + WarpSize - 1) / WarpSize;
#pragma unroll
for (int it = 0; it < kIteration; ++it) {
sum += srcdata[it] * graddata[it];
}
WarpReduceSum<T, 1, WarpSize>(&sum);
// compute softmax and write out
T* dstptr = dst + layout_rowptr[cur_block_row];
for (int j = 0; j < iter; j++) {
int cur_block_col = j * WarpSize + threadIdx.x;
int cur_reg_index = j;
if (cur_block_col < cur_block_nnz) {
dstptr[cur_block_col] =
scale * srcdata[cur_reg_index] * (graddata[cur_reg_index] - sum);
}
}
}
}
using Tensor = framework::Tensor;
/*
input: sparse C in CSR format (num_rows,num_rows)
output: sparse C after softmax operation
*/
template <typename DeviceContext, typename T>
void SparseSoftmaxForward(const platform::CUDADeviceContext& ctx,
const Tensor* offset, const Tensor* columns,
Tensor* input, Tensor* output, const int blocksize,
const int num_rows, const int num_cols,
const Tensor* key_padding_mask,
const Tensor* attn_mask) {
const int* offset_data = offset->data<int>();
const int* columns_data = columns->data<int>();
T* input_data = input->data<T>();
T* output_data = output->data<T>();
// Add mask
const T* key_padding_mask_data =
(key_padding_mask != nullptr) ? key_padding_mask->data<T>() : nullptr;
const T* attn_mask_data =
(attn_mask != nullptr) ? attn_mask->data<T>() : nullptr;
const int block_size = 1;
dim3 blocks(32, 4, 1);
int grid = (num_rows * block_size + 3) / 4;
T scaling = static_cast<T>(1.0) / sqrt(static_cast<T>(num_cols));
if (num_cols <= 4) {
BlockSparseSoftmaxForward<T, block_size, 4><<<grid, blocks>>>(
output_data, input_data, scaling, key_padding_mask_data, attn_mask_data,
offset_data, columns_data, num_rows);
} else if (num_cols > 4 && num_cols <= 8) {
BlockSparseSoftmaxForward<T, block_size, 8><<<grid, blocks>>>(
output_data, input_data, scaling, key_padding_mask_data, attn_mask_data,
offset_data, columns_data, num_rows);
} else if (num_cols > 8 && num_cols <= 16) {
BlockSparseSoftmaxForward<T, block_size, 16><<<grid, blocks>>>(
output_data, input_data, scaling, key_padding_mask_data, attn_mask_data,
offset_data, columns_data, num_rows);
} else if (num_cols > 16 && num_cols <= 32) {
BlockSparseSoftmaxForward<T, block_size, 32><<<grid, blocks>>>(
output_data, input_data, scaling, key_padding_mask_data, attn_mask_data,
offset_data, columns_data, num_rows);
} else if (num_cols > 32 && num_cols <= 64) {
BlockSparseSoftmaxForward<T, block_size, 64><<<grid, blocks>>>(
output_data, input_data, scaling, key_padding_mask_data, attn_mask_data,
offset_data, columns_data, num_rows);
} else if (num_cols > 64 && num_cols <= 128) {
BlockSparseSoftmaxForward<T, block_size, 128><<<grid, blocks>>>(
output_data, input_data, scaling, key_padding_mask_data, attn_mask_data,
offset_data, columns_data, num_rows);
} else if (num_cols > 128 && num_cols <= 256) {
BlockSparseSoftmaxForward<T, block_size, 256><<<grid, blocks>>>(
output_data, input_data, scaling, key_padding_mask_data, attn_mask_data,
offset_data, columns_data, num_rows);
} else if (num_cols > 256 && num_cols <= 512) {
BlockSparseSoftmaxForward<T, block_size, 512><<<grid, blocks>>>(
output_data, input_data, scaling, key_padding_mask_data, attn_mask_data,
offset_data, columns_data, num_rows);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The head_dim of query in sparse_attention op should less or equal "
"512"));
}
}
template <typename DeviceContext, typename T>
void SparseSoftmaxBackward(const platform::CUDADeviceContext& ctx,
const Tensor* offset, const Tensor* columns,
Tensor* dx, const Tensor* dout, const Tensor* out,
const int blocksize, const int num_rows,
const int num_cols) {
const int* offset_data = offset->data<int>();
const int* columns_data = columns->data<int>();
T* dx_data = dx->data<T>();
const T* dout_data = dout->data<T>();
const T* out_data = out->data<T>();
const int block_size = 1;
dim3 blocks(32, 4, 1);
int grid = (num_rows * block_size + 3) / 4;
T scaling = static_cast<T>(1.0) / sqrt(static_cast<T>(num_cols));
if (num_cols <= 4) {
BlockSparseSoftmaxBackward<T, block_size, 4><<<grid, blocks>>>(
dx_data, dout_data, out_data, scaling, offset_data, columns_data,
num_rows);
} else if (num_cols > 4 && num_cols <= 8) {
BlockSparseSoftmaxBackward<T, block_size, 8><<<grid, blocks>>>(
dx_data, dout_data, out_data, scaling, offset_data, columns_data,
num_rows);
} else if (num_cols > 8 && num_cols <= 16) {
BlockSparseSoftmaxBackward<T, block_size, 16><<<grid, blocks>>>(
dx_data, dout_data, out_data, scaling, offset_data, columns_data,
num_rows);
} else if (num_cols > 16 && num_cols <= 32) {
BlockSparseSoftmaxBackward<T, block_size, 32><<<grid, blocks>>>(
dx_data, dout_data, out_data, scaling, offset_data, columns_data,
num_rows);
} else if (num_cols > 32 && num_cols <= 64) {
BlockSparseSoftmaxBackward<T, block_size, 64><<<grid, blocks>>>(
dx_data, dout_data, out_data, scaling, offset_data, columns_data,
num_rows);
} else if (num_cols > 64 && num_cols <= 128) {
BlockSparseSoftmaxBackward<T, block_size, 128><<<grid, blocks>>>(
dx_data, dout_data, out_data, scaling, offset_data, columns_data,
num_rows);
} else if (num_cols > 128 && num_cols <= 256) {
BlockSparseSoftmaxBackward<T, block_size, 256><<<grid, blocks>>>(
dx_data, dout_data, out_data, scaling, offset_data, columns_data,
num_rows);
} else if (num_cols > 256 && num_cols <= 512) {
BlockSparseSoftmaxBackward<T, block_size, 512><<<grid, blocks>>>(
dx_data, dout_data, out_data, scaling, offset_data, columns_data,
num_rows);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The head_dim of query in sparse_attention op should less or equal "
"512"));
}
}
using VarType = framework::proto::VarType;
inline cudaDataType_t GetGpuType(const VarType::Type data_type) {
if (data_type == VarType::FP32) {
return CUDA_R_32F;
} else if (data_type == VarType::FP64) {
return CUDA_R_64F;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Not support tensor type in sparse_attention OP: %s",
framework::DataTypeToString(data_type)));
}
}
inline cusparseOperation_t GetTransposeOperation(const bool transpose) {
if (transpose) {
return CUSPARSE_OPERATION_TRANSPOSE;
} else {
return CUSPARSE_OPERATION_NON_TRANSPOSE;
}
}
void CusparseDestroy(cusparseDnMatDescr_t* dn_mat_first,
cusparseDnMatDescr_t* dn_mat_second,
cusparseSpMatDescr_t* sp_mat) {
platform::dynload::cusparseDestroyDnMat(*dn_mat_first);
platform::dynload::cusparseDestroyDnMat(*dn_mat_second);
platform::dynload::cusparseDestroySpMat(*sp_mat);
}
/*
input: dense A (num_rows,num_cols), dense B (num_rows,num_cols)
output: sparse C in CSR format (num_rows,num_rows)
*/
template <typename DeviceContext, typename T>
void DotSdd(const platform::CUDADeviceContext& ctx, const Tensor* a,
const Tensor* b, const Tensor* c_offset, const Tensor* c_columns,
Tensor* c_value, const int num_rows, const int num_cols,
const bool a_transpose, const bool b_transpose) {
const T* a_data = a->data<T>();
const T* b_data = b->data<T>();
const int* c_offset_data = c_offset->data<int>();
const int* c_columns_data = c_columns->data<int>();
T* c_value_data = c_value->data<T>();
cudaDataType_t gpu_type =
GetGpuType(framework::TransToProtoVarType(c_value->dtype()));
cusparseHandle_t handle = nullptr;
cusparseDnMatDescr_t mat_a, mat_b;
cusparseSpMatDescr_t mat_c;
platform::dynload::cusparseCreate(&handle);
// Create dense matrix A
platform::dynload::cusparseCreateDnMat(&mat_a, num_rows, num_cols, num_cols,
const_cast<T*>(a_data), gpu_type,
CUSPARSE_ORDER_ROW);
// Create dense matrix B
platform::dynload::cusparseCreateDnMat(&mat_b, num_rows, num_cols, num_cols,
const_cast<T*>(b_data), gpu_type,
CUSPARSE_ORDER_ROW);
// Create sparse matrix C in CSR format
int c_nnz = c_columns->dims()[1];
platform::dynload::cusparseCreateCsr(
&mat_c, num_rows, num_rows, c_nnz, const_cast<int*>(c_offset_data),
const_cast<int*>(c_columns_data), c_value_data, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, gpu_type);
T alpha = 1;
T beta = 0;
size_t buffer_size = 0;
platform::dynload::cusparseSDDMM_bufferSize(
handle, GetTransposeOperation(a_transpose),
GetTransposeOperation(b_transpose), &alpha, mat_a, mat_b, &beta, mat_c,
gpu_type, CUSPARSE_SDDMM_ALG_DEFAULT, &buffer_size);
auto d_buffer_ptr = paddle::memory::Alloc(ctx, buffer_size);
void* d_buffer = static_cast<void*>(d_buffer_ptr->ptr());
platform::dynload::cusparseSDDMM(handle, GetTransposeOperation(a_transpose),
GetTransposeOperation(b_transpose), &alpha,
mat_a, mat_b, &beta, mat_c, gpu_type,
CUSPARSE_SDDMM_ALG_DEFAULT, d_buffer);
CusparseDestroy(&mat_a, &mat_b, &mat_c);
platform::dynload::cusparseDestroy(handle);
}
/*
input: sparse A in CSR format (num_rows,num_rows), dense B (num_rows,num_cols)
output: dense C (num_rows,num_cols)
*/
template <typename DeviceContext, typename T>
void DotDsd(const platform::CUDADeviceContext& ctx, const Tensor* a_offset,
const Tensor* a_columns, const Tensor* a_value, const Tensor* b,
Tensor* c, const int num_rows, const int num_cols,
const bool a_transpose, const bool b_transpose) {
const int* a_offset_data = a_offset->data<int>();
const int* a_columns_data = a_columns->data<int>();
const T* a_value_data = a_value->data<T>();
const T* b_data = b->data<T>();
T* c_data = c->data<T>();
cudaDataType_t gpu_type =
GetGpuType(framework::TransToProtoVarType(c->dtype()));
cusparseHandle_t handle = nullptr;
cusparseSpMatDescr_t mat_a;
cusparseDnMatDescr_t mat_b, mat_c;
platform::dynload::cusparseCreate(&handle);
// Create sparse matrix A in CSR format
int a_nnz = a_columns->dims()[1];
platform::dynload::cusparseCreateCsr(
&mat_a, num_rows, num_rows, a_nnz, const_cast<int*>(a_offset_data),
const_cast<int*>(a_columns_data), const_cast<T*>(a_value_data),
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO,
gpu_type);
// Create dense matrix B
platform::dynload::cusparseCreateDnMat(&mat_b, num_rows, num_cols, num_cols,
const_cast<T*>(b_data), gpu_type,
CUSPARSE_ORDER_ROW);
// Create dense matrix C
platform::dynload::cusparseCreateDnMat(&mat_c, num_rows, num_cols, num_cols,
c_data, gpu_type, CUSPARSE_ORDER_ROW);
T alpha = 1;
T beta = 0;
size_t buffer_size = 0;
// allocate an external buffer if needed
platform::dynload::cusparseSpMM_bufferSize(
handle, GetTransposeOperation(a_transpose),
GetTransposeOperation(b_transpose), &alpha, mat_a, mat_b, &beta, mat_c,
gpu_type, CUSPARSE_SPMM_ALG_DEFAULT, &buffer_size);
auto d_buffer_ptr = paddle::memory::Alloc(ctx, buffer_size);
void* d_buffer = static_cast<void*>(d_buffer_ptr->ptr());
platform::dynload::cusparseSpMM(handle, GetTransposeOperation(a_transpose),
GetTransposeOperation(b_transpose), &alpha,
mat_a, mat_b, &beta, mat_c, gpu_type,
CUSPARSE_SPMM_ALG_DEFAULT, d_buffer);
CusparseDestroy(&mat_b, &mat_c, &mat_a);
platform::dynload::cusparseDestroy(handle);
}
std::vector<Tensor> GetSplitTensor(Tensor* input) {
auto dims = input->dims();
int batch_size = dims[0];
int num_heads = dims[1];
std::vector<int> new_dims(dims.size() - 1);
new_dims[0] = batch_size * num_heads;
for (int i = 1; i < new_dims.size(); i++) {
new_dims[i] = dims[i + 1];
}
input->Resize(phi::make_ddim(new_dims));
return input->Split(1, 0);
}
template <typename DeviceContext, typename T>
class SparseAttentionCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto query = *ctx.Input<Tensor>("Q");
auto key = *ctx.Input<Tensor>("K");
auto value = *ctx.Input<Tensor>("V");
auto offset = *ctx.Input<Tensor>("Offset");
auto columns = *ctx.Input<Tensor>("Columns");
auto output_ptr = ctx.Output<Tensor>("Out");
output_ptr->mutable_data<T>(ctx.GetPlace());
auto sparse_dot_sdd_ptr = ctx.Output<Tensor>("SparseDotSdd");
sparse_dot_sdd_ptr->mutable_data<T>(ctx.GetPlace());
auto softmax_ptr = ctx.Output<Tensor>("Softmax");
softmax_ptr->mutable_data<T>(ctx.GetPlace());
// add Mask
auto* key_padding_mask = ctx.HasInput("KeyPaddingMask")
? ctx.Input<Tensor>("KeyPaddingMask")
: nullptr;
auto* attn_mask =
ctx.HasInput("AttnMask") ? ctx.Input<Tensor>("AttnMask") : nullptr;
auto output = *output_ptr;
auto result_sdd = *sparse_dot_sdd_ptr;
auto result_softmax = *softmax_ptr;
auto query_dims = query.dims();
int batch_size = query_dims[0];
int num_heads = query_dims[1];
int M = query_dims[2];
int N = query_dims[3];
std::vector<Tensor> query_lists = GetSplitTensor(&query);
std::vector<Tensor> key_lists = GetSplitTensor(&key);
std::vector<Tensor> value_lists = GetSplitTensor(&value);
std::vector<Tensor> offset_lists = GetSplitTensor(&offset);
std::vector<Tensor> columns_lists = GetSplitTensor(&columns);
std::vector<Tensor> result_sdd_lists = GetSplitTensor(&result_sdd);
std::vector<Tensor> result_softmax_lists = GetSplitTensor(&result_softmax);
std::vector<Tensor> output_lists = GetSplitTensor(&output);
const auto& dev_ctx = ctx.cuda_device_context();
const int iter_num = batch_size * num_heads;
for (int i = 0; i < iter_num; i++) {
DotSdd<DeviceContext, T>(dev_ctx, &query_lists[i], &key_lists[i],
&offset_lists[i], &columns_lists[i],
&result_sdd_lists[i], M, N, false, true);
if (key_padding_mask != nullptr && attn_mask != nullptr) {
SparseSoftmaxForward<DeviceContext, T>(
dev_ctx, &offset_lists[i], &columns_lists[i], &result_sdd_lists[i],
&result_softmax_lists[i], 1, M, N,
key_padding_mask + (i / num_heads) * M, attn_mask);
} else if (key_padding_mask != nullptr && attn_mask == nullptr) {
SparseSoftmaxForward<DeviceContext, T>(
dev_ctx, &offset_lists[i], &columns_lists[i], &result_sdd_lists[i],
&result_softmax_lists[i], 1, M, N,
key_padding_mask + (i / num_heads) * M, nullptr);
} else if (key_padding_mask == nullptr && attn_mask != nullptr) {
SparseSoftmaxForward<DeviceContext, T>(
dev_ctx, &offset_lists[i], &columns_lists[i], &result_sdd_lists[i],
&result_softmax_lists[i], 1, M, N, nullptr, attn_mask);
} else {
SparseSoftmaxForward<DeviceContext, T>(
dev_ctx, &offset_lists[i], &columns_lists[i], &result_sdd_lists[i],
&result_softmax_lists[i], 1, M, N, nullptr, nullptr);
}
DotDsd<DeviceContext, T>(dev_ctx, &offset_lists[i], &columns_lists[i],
&result_softmax_lists[i], &value_lists[i],
&output_lists[i], M, N, false, false);
}
}
};
template <typename DeviceContext, typename T>
class SparseAttentionGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto query = *ctx.Input<Tensor>("Q");
auto key = *ctx.Input<Tensor>("K");
auto value = *ctx.Input<Tensor>("V");
auto offset = *ctx.Input<Tensor>("Offset");
auto columns = *ctx.Input<Tensor>("Columns");
auto sparse_dot_sdd = *ctx.Input<Tensor>("SparseDotSdd");
auto softmax = *ctx.Input<Tensor>("Softmax");
auto dout = *ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* dquery_ptr = ctx.Output<Tensor>(framework::GradVarName("Q"));
auto* dkey_ptr = ctx.Output<Tensor>(framework::GradVarName("K"));
auto* dvalue_ptr = ctx.Output<Tensor>(framework::GradVarName("V"));
dquery_ptr->mutable_data<T>(ctx.GetPlace());
dkey_ptr->mutable_data<T>(ctx.GetPlace());
dvalue_ptr->mutable_data<T>(ctx.GetPlace());
auto dquery = *dquery_ptr;
auto dkey = *dkey_ptr;
auto dvalue = *dvalue_ptr;
auto query_dims = query.dims();
int batch_size = query_dims[0];
int num_heads = query_dims[1];
int M = query_dims[2];
int N = query_dims[3];
std::vector<Tensor> query_lists = GetSplitTensor(&query);
std::vector<Tensor> key_lists = GetSplitTensor(&key);
std::vector<Tensor> value_lists = GetSplitTensor(&value);
std::vector<Tensor> offset_lists = GetSplitTensor(&offset);
std::vector<Tensor> columns_lists = GetSplitTensor(&columns);
std::vector<Tensor> sparse_dot_sdd_lists = GetSplitTensor(&sparse_dot_sdd);
std::vector<Tensor> softmax_lists = GetSplitTensor(&softmax);
std::vector<Tensor> dout_lists = GetSplitTensor(&dout);
std::vector<Tensor> dquery_lists = GetSplitTensor(&dquery);
std::vector<Tensor> dkey_lists = GetSplitTensor(&dkey);
std::vector<Tensor> dvalue_lists = GetSplitTensor(&dvalue);
const int iter_num = batch_size * num_heads;
const auto& dev_ctx = ctx.cuda_device_context();
for (int i = 0; i < iter_num; i++) {
// dValue = transpose(result_softmax) * dOut
DotDsd<DeviceContext, T>(dev_ctx, &offset_lists[i], &columns_lists[i],
&softmax_lists[i], &dout_lists[i],
&dvalue_lists[i], M, N, true, false);
// dSoftmax = dOut * transpose(Value)
int nnz_num = columns.dims()[0];
Tensor dsoftmax;
dsoftmax.Resize({nnz_num});
dsoftmax.mutable_data<T>(ctx.GetPlace());
DotSdd<DeviceContext, T>(dev_ctx, &dout_lists[i], &value_lists[i],
&offset_lists[i], &columns_lists[i], &dsoftmax,
M, N, false, true);
// dSparseDotSdd = dSoftmax * softmax'(SparseDotSdd)
Tensor dsparse_dot_sdd;
dsparse_dot_sdd.Resize({nnz_num});
dsparse_dot_sdd.mutable_data<T>(ctx.GetPlace());
SparseSoftmaxBackward<DeviceContext, T>(
dev_ctx, &offset_lists[i], &columns_lists[i], &dsparse_dot_sdd,
&dsoftmax, &softmax_lists[i], 1, M, N);
// dQuery = dSparseDotSdd * Key
DotDsd<DeviceContext, T>(dev_ctx, &offset_lists[i], &columns_lists[i],
&dsparse_dot_sdd, &key_lists[i],
&dquery_lists[i], M, N, false, false);
// dKey = transpose(dSparseDotSdd) * Query
DotDsd<DeviceContext, T>(dev_ctx, &offset_lists[i], &columns_lists[i],
&dsparse_dot_sdd, &query_lists[i],
&dkey_lists[i], M, N, true, false);
}
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
sparse_attention,
ops::SparseAttentionCUDAKernel<plf::CUDADeviceContext, float>,
ops::SparseAttentionCUDAKernel<plf::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
sparse_attention_grad,
ops::SparseAttentionGradCUDAKernel<plf::CUDADeviceContext, float>,
ops::SparseAttentionGradCUDAKernel<plf::CUDADeviceContext, double>);
|
ffa55cf0540d3768bc626f5d101e082ca8614616.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda_drvapi.h>
#include <helper_functions.h>
#include <helper_math.h>
#include <helper_string.h>
#include <sstream>
#include <iostream>
#include <fstream>
// includes, kernels
#include "matrixmul_kernel.hip"
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
// Matrices for the program
Matrix M;
Matrix N;
Matrix P;
// Number of elements in the solution matrix
// Assuming square matrices, so the sizes of M, N and P are equal
unsigned int size_elements = WP * HP;
int errorM = 0, errorN = 0;
srand(2012);
// Check command line for input matrix files
if(argc != 3 && argc != 4)
{
// No inputs provided
// Allocate and initialize the matrices
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
}
else
{
// Inputs provided
// Allocate and read source matrices from disk
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
errorM = ReadFile(&M, argv[1]);
errorN = ReadFile(&N, argv[2]);
// check for read errors
if(errorM != size_elements || errorN != size_elements)
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
computeGold(reference.elements, M.elements, N.elements, HM, WM, WN);
bool passed = true;
for (unsigned int x = 0; x < P.height; x ++)
{
for (unsigned int y = 0; y < P.width; y ++)
{
passed &= (fabs(P.elements[x * P.width + y] - reference.elements[x * P.width + y]) < 0.0001f);
}
}
printf("Test %s\n", (passed) ? "PASSED" : "FAILED");
// output result if output file is requested
if(argc == 4)
{
WriteFile(P, argv[3]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free host matrices
free(M.elements);
M.elements = NULL;
free(N.elements);
N.elements = NULL;
free(P.elements);
P.elements = NULL;
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
//Interface host call to the device kernel code and invoke the kernel
Matrix device_M = AllocateDeviceMatrix(M);
Matrix device_N = AllocateDeviceMatrix(N);
Matrix device_P = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(device_M, M);
CopyToDeviceMatrix(device_N, N);
dim3 dim_block, dim_grid;
dim_block.x = dim_block.y = 16; dim_block.z = 1;
dim_grid.x = device_M.width / dim_block.x;
dim_grid.y = device_M.height / dim_block.y;
dim_grid.z = 1;
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dim_grid),dim3(dim_block), 0, 0, device_M, device_N, device_P);
CopyFromDeviceMatrix(P, device_P);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Read a 16x16 floating point matrix in from file
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = MATRIX_SIZE*MATRIX_SIZE;
std::ifstream source(file_name, std::ios_base::in);
for (unsigned int x = 0; x < data_read; x++)
{
source >> M->elements[x];
}
return data_read;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
unsigned int data_write = MATRIX_SIZE*MATRIX_SIZE;
std::ofstream dest(file_name, std::ios_base::out);
for (unsigned int x = 0; x < data_write; x++)
{
dest << M.elements[x];
}
}
| ffa55cf0540d3768bc626f5d101e082ca8614616.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda_drvapi.h>
#include <helper_functions.h>
#include <helper_math.h>
#include <helper_string.h>
#include <sstream>
#include <iostream>
#include <fstream>
// includes, kernels
#include "matrixmul_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
// Matrices for the program
Matrix M;
Matrix N;
Matrix P;
// Number of elements in the solution matrix
// Assuming square matrices, so the sizes of M, N and P are equal
unsigned int size_elements = WP * HP;
int errorM = 0, errorN = 0;
srand(2012);
// Check command line for input matrix files
if(argc != 3 && argc != 4)
{
// No inputs provided
// Allocate and initialize the matrices
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
}
else
{
// Inputs provided
// Allocate and read source matrices from disk
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
errorM = ReadFile(&M, argv[1]);
errorN = ReadFile(&N, argv[2]);
// check for read errors
if(errorM != size_elements || errorN != size_elements)
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
computeGold(reference.elements, M.elements, N.elements, HM, WM, WN);
bool passed = true;
for (unsigned int x = 0; x < P.height; x ++)
{
for (unsigned int y = 0; y < P.width; y ++)
{
passed &= (fabs(P.elements[x * P.width + y] - reference.elements[x * P.width + y]) < 0.0001f);
}
}
printf("Test %s\n", (passed) ? "PASSED" : "FAILED");
// output result if output file is requested
if(argc == 4)
{
WriteFile(P, argv[3]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free host matrices
free(M.elements);
M.elements = NULL;
free(N.elements);
N.elements = NULL;
free(P.elements);
P.elements = NULL;
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
//Interface host call to the device kernel code and invoke the kernel
Matrix device_M = AllocateDeviceMatrix(M);
Matrix device_N = AllocateDeviceMatrix(N);
Matrix device_P = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(device_M, M);
CopyToDeviceMatrix(device_N, N);
dim3 dim_block, dim_grid;
dim_block.x = dim_block.y = 16; dim_block.z = 1;
dim_grid.x = device_M.width / dim_block.x;
dim_grid.y = device_M.height / dim_block.y;
dim_grid.z = 1;
MatrixMulKernel<<<dim_grid,dim_block>>>(device_M, device_N, device_P);
CopyFromDeviceMatrix(P, device_P);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Read a 16x16 floating point matrix in from file
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = MATRIX_SIZE*MATRIX_SIZE;
std::ifstream source(file_name, std::ios_base::in);
for (unsigned int x = 0; x < data_read; x++)
{
source >> M->elements[x];
}
return data_read;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
unsigned int data_write = MATRIX_SIZE*MATRIX_SIZE;
std::ofstream dest(file_name, std::ios_base::out);
for (unsigned int x = 0; x < data_write; x++)
{
dest << M.elements[x];
}
}
|
a154135224d77473472b2c57e7c6207bee561f4f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/details/nan_inf_utils_detail.h"
#include "paddle/fluid/framework/details/nan_inf_utils.h"
#include <algorithm>
#include <unordered_map>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/flags.h"
#include "paddle/phi/kernels/funcs/math_cuda_utils.h"
PHI_DECLARE_int32(check_nan_inf_level);
namespace paddle {
namespace framework {
namespace details {
static std::once_flag init_multi_gpu_op_var_map_flag;
// lazy init
static std::vector<std::unordered_map<std::string, memory::AllocationPtr>>&
multi_op_var2gpu_str() {
static std::vector<std::unordered_map<std::string, memory::AllocationPtr>>
_multi_op_var2gpu_str;
return _multi_op_var2gpu_str;
}
static std::vector<std::mutex>& multi_op_var2gpu_str_mutex() {
static std::vector<std::mutex> _multi_op_var2gpu_str_mutex;
return _multi_op_var2gpu_str_mutex;
}
static void InitMultiGPUOpVarMap() {
int dev_count = platform::GetGPUDeviceCount();
PADDLE_ENFORCE_GT(dev_count,
0,
platform::errors::NotFound(
"cuda device must > 0, now dev_count=%d", dev_count));
// https://stackoverflow.com/questions/16465633/how-can-i-use-something-like-stdvectorstdmutex
std::vector<std::unordered_map<std::string, memory::AllocationPtr>> tmp_multi(
dev_count);
std::vector<std::mutex> tmp_multi_mutex(dev_count);
multi_op_var2gpu_str().swap(tmp_multi);
multi_op_var2gpu_str_mutex().swap(tmp_multi_mutex);
}
template <typename T>
__device__ __forceinline__ void PrintNanInfKernel(const T* value,
const size_t numel,
int print_num,
char* debug_info) {
const size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ unsigned int nan_count, inf_count, num_count;
if (threadIdx.x == 0) nan_count = inf_count = num_count = 0;
__syncthreads;
for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) {
unsigned int count = 0;
if (isnan(value[i])) {
count = atomicAdd(&nan_count, 1);
} else if (isinf(value[i])) {
count = atomicAdd(&inf_count, 1);
} else {
count = atomicAdd(&num_count, 1);
}
// for cuda, print in every block
if (count < print_num) {
printf("numel:%lu idx:%lu value:%f\n",
static_cast<uint64_t>(numel),
static_cast<uint64_t>(i),
static_cast<float>(value[i]));
}
}
__syncthreads;
#ifdef __HIPCC__
if (true && hipThreadIdx_x == 0) {
printf("In block %d, there has %u,%u,%u nan,inf,num\n",
hipBlockIdx_x,
nan_count,
inf_count,
num_count);
#else
if (true && threadIdx.x == 0) {
printf("In block %d, there has %u,%u,%u nan,inf,num\n",
blockIdx.x,
nan_count,
inf_count,
num_count);
#endif
PADDLE_ENFORCE(false, "===ERROR: in %s find nan or inf===", debug_info);
}
}
// Resnet 2gpus speed test, no check 270 images/s, this check 229 images/s
template <typename T>
__global__ void CheckNanInfKernel(const T* value,
const size_t numel,
int print_num,
char* debug_info) {
/// step 1, judge wheater has nan or inf
__shared__ volatile int has_nan_inf;
if (threadIdx.x == 0) has_nan_inf = false;
__syncthreads();
const size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
T sum = static_cast<T>(0.0);
// Todo(wangxi). simd speed up
for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) {
sum += (value[i] - value[i]);
}
if (isnan(sum) || isinf(sum)) has_nan_inf = true;
__syncthreads();
/// Note. different blocks may behave differently
if (!has_nan_inf) return;
PrintNanInfKernel(value, numel, print_num, debug_info);
}
template <typename T, int ReduceType>
__device__ T BlockReduce(T value) {
__shared__ T shared_mem[1024];
shared_mem[threadIdx.x] = value;
__syncthreads();
for (int stride = blockDim.x >> 1; stride > 0; stride = stride >> 1) {
if (threadIdx.x < stride) {
T value0 = shared_mem[threadIdx.x];
T value1 = shared_mem[threadIdx.x + stride];
T reduce_value;
if (ReduceType == 0) {
// max
reduce_value = value0 > value1 ? value0 : value1;
} else if (ReduceType == 1) {
// min
reduce_value = value0 < value1 ? value0 : value1;
} else if (ReduceType == 2) {
// sum
reduce_value = value0 + value1;
}
shared_mem[threadIdx.x] = reduce_value;
}
if (stride > 16) {
__syncthreads();
}
}
__syncthreads();
return shared_mem[0];
}
__device__ void BlockReduceNumNanInfAndWrite(const int64_t num_nan,
const int64_t num_inf,
const int64_t num_zero,
int64_t offset,
int64_t* num_nan_ptr,
int64_t* num_inf_ptr,
int64_t* num_zero_ptr) {
int64_t block_num_nan = BlockReduce<int64_t, 2>(num_nan);
int64_t block_num_inf = BlockReduce<int64_t, 2>(num_inf);
int64_t block_num_zero = BlockReduce<int64_t, 2>(num_zero);
if (threadIdx.x == 0) {
num_nan_ptr[offset] = block_num_nan;
num_inf_ptr[offset] = block_num_inf;
num_zero_ptr[offset] = block_num_zero;
}
}
template <
typename T,
std::enable_if_t<std::is_same<T, phi::dtype::complex<float>>::value ||
std::is_same<T, phi::dtype::complex<double>>::value,
bool> = true>
__device__ void BlockReduceMaxMinAndWrite(const T max_value,
const T min_value,
const T mean_value,
int64_t offset,
T* max_ptr,
T* min_ptr,
T* mean_ptr) {
// TODO(Xreki): support complex
}
template <
typename T,
std::enable_if_t<!std::is_same<T, phi::dtype::complex<float>>::value &&
!std::is_same<T, phi::dtype::complex<double>>::value,
bool> = true>
__device__ void BlockReduceMaxMinAndWrite(const T max_value,
const T min_value,
const T mean_value,
int64_t offset,
T* max_ptr,
T* min_ptr,
T* mean_ptr) {
if (max_ptr && min_ptr && mean_ptr) {
__syncthreads();
T block_max_value = phi::funcs::BlockReduceMax<T>(max_value, FINAL_MASK);
T block_min_value = phi::funcs::BlockReduceMin<T>(min_value, FINAL_MASK);
T block_mean_value = phi::funcs::BlockReduceSum<T>(mean_value, FINAL_MASK);
if (threadIdx.x == 0) {
max_ptr[offset] = block_max_value;
min_ptr[offset] = block_min_value;
mean_ptr[offset] = block_mean_value;
}
}
}
template <typename T, typename MT>
__global__ void FindNanInfAndBlockMaxMin(const T* value_ptr,
const int64_t numel,
int64_t* block_num_nan_ptr,
int64_t* block_num_inf_ptr,
int64_t* block_num_zero_ptr,
MT* tensor_block_max_ptr,
MT* tensor_block_min_ptr,
MT* tensor_block_mean_ptr) {
int64_t i = threadIdx.x + blockIdx.x * blockDim.x;
int64_t num_nan = 0;
int64_t num_inf = 0;
int64_t num_zero = 0;
MT max_value = static_cast<MT>(i < numel ? value_ptr[i] : value_ptr[0]);
MT min_value = static_cast<MT>(i < numel ? value_ptr[i] : value_ptr[0]);
MT mean_value = static_cast<MT>(0);
for (; i < numel; i += blockDim.x * gridDim.x) {
MT value = static_cast<MT>(value_ptr[i]);
max_value = value > max_value ? value : max_value;
min_value = value < min_value ? value : min_value;
mean_value += value / static_cast<MT>(numel);
if (isnan(value)) {
num_nan += 1;
} else if (isinf(value)) {
num_inf += 1;
}
if (value == static_cast<MT>(0)) {
num_zero += 1;
}
}
BlockReduceNumNanInfAndWrite(num_nan,
num_inf,
num_zero,
blockIdx.x,
block_num_nan_ptr,
block_num_inf_ptr,
block_num_zero_ptr);
BlockReduceMaxMinAndWrite<MT>(max_value,
min_value,
mean_value,
blockIdx.x,
tensor_block_max_ptr,
tensor_block_min_ptr,
tensor_block_mean_ptr);
}
template <typename T, typename MT>
__global__ void FindGlobalMaxMinAndPrint(const int64_t* block_num_nan_ptr,
const int64_t* block_num_inf_ptr,
const int64_t* block_num_zero_ptr,
const MT* tensor_block_max_ptr,
const MT* tensor_block_min_ptr,
const MT* tensor_block_mean_ptr,
const char* debug_info,
int64_t numel,
int64_t numel_max_min,
int check_nan_inf_level,
int64_t* nan_inf_zero) {
if (blockIdx.x == 0 && threadIdx.x == 0) {
int64_t num_nan = 0;
int64_t num_inf = 0;
int64_t num_zero = 0;
// numel_max_min <= 128
for (int64_t i = 0; i < numel_max_min; ++i) {
num_nan += block_num_nan_ptr[i];
num_inf += block_num_inf_ptr[i];
num_zero += block_num_zero_ptr[i];
}
MT max_value = static_cast<MT>(0);
MT min_value = static_cast<MT>(0);
MT mean_value = static_cast<MT>(0);
if (tensor_block_max_ptr && tensor_block_min_ptr && tensor_block_mean_ptr) {
max_value = tensor_block_max_ptr[0];
min_value = tensor_block_min_ptr[0];
mean_value = tensor_block_mean_ptr[0];
// numel_max_min <= 128
for (int64_t i = 1; i < numel_max_min; ++i) {
MT tmp_max_value = tensor_block_max_ptr[i];
MT tmp_min_value = tensor_block_min_ptr[i];
MT tmp_mean_value = tensor_block_mean_ptr[i];
max_value = tmp_max_value > max_value ? tmp_max_value : max_value;
min_value = tmp_min_value < min_value ? tmp_min_value : min_value;
mean_value += tmp_mean_value;
}
if (check_nan_inf_level == 0) {
nan_inf_zero[0] = num_nan;
nan_inf_zero[1] = num_inf;
nan_inf_zero[2] = num_zero;
}
}
PrintForDifferentLevel<T, MT>(debug_info,
numel,
num_nan,
num_inf,
num_zero,
max_value,
min_value,
mean_value,
check_nan_inf_level);
}
}
template <typename T>
inline std::string GetHintString(const std::string& op_type,
const std::string& var_name,
const phi::Place& place,
int dev_id = -1) {
std::string op_var = GetCpuHintString<T>(op_type, var_name, place, dev_id);
PADDLE_ENFORCE_EQ(
(dev_id >= 0 && dev_id < multi_op_var2gpu_str_mutex().size()),
true,
platform::errors::OutOfRange("GPU dev_id must >=0 and < dev_count=%d",
multi_op_var2gpu_str_mutex().size()));
return op_var;
}
template <typename T>
static char* GetGpuHintStringPtr(const phi::GPUContext& ctx,
const std::string& op_type,
const std::string& var_name,
int dev_id) {
std::string op_var =
GetHintString<T>(op_type, var_name, ctx.GetPlace(), dev_id);
char* gpu_str_ptr = nullptr;
{
auto& op_var2gpu_str_mutex = multi_op_var2gpu_str_mutex().at(dev_id);
auto& op_var2gpu_str = multi_op_var2gpu_str().at(dev_id);
std::lock_guard<std::mutex> guard(op_var2gpu_str_mutex);
if (op_var2gpu_str.find(op_var) == op_var2gpu_str.end()) { // insert
auto gpu_str_tensor = paddle::memory::Alloc(
ctx.GetPlace(),
op_var.length() + 1,
phi::Stream(reinterpret_cast<phi::StreamId>(ctx.stream())));
gpu_str_ptr = reinterpret_cast<char*>(gpu_str_tensor->ptr());
op_var2gpu_str.emplace(op_var, std::move(gpu_str_tensor));
auto iter = op_var2gpu_str.find(op_var);
PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(),
true,
platform::errors::PreconditionNotMet(
"op_var=%s should successed insert into "
"op_var2gpu_str, but now failed",
op_var));
#ifdef __HIPCC__
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(gpu_str_ptr,
iter->first.c_str(),
op_var.length() + 1,
hipMemcpyHostToDevice,
ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(gpu_str_ptr,
iter->first.c_str(),
op_var.length() + 1,
hipMemcpyHostToDevice,
ctx.stream()));
#endif
} else { // get
auto iter = op_var2gpu_str.find(op_var);
PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(),
true,
platform::errors::PreconditionNotMet(
"op_var=%s should be in the op_var2gpu_str, but "
"now can't find it",
op_var));
gpu_str_ptr = reinterpret_cast<char*>(iter->second->ptr());
}
}
return gpu_str_ptr;
}
template <>
template <typename T>
void TensorCheckerVisitor<phi::GPUContext>::apply(
typename std::enable_if<
std::is_floating_point<T>::value ||
std::is_same<T, ::paddle::platform::complex<float>>::value ||
std::is_same<T, ::paddle::platform::complex<double>>::value>::type*)
const {
auto* dev_ctx = reinterpret_cast<phi::GPUContext*>(
platform::DeviceContextPool::Instance().Get(tensor.place()));
int dev_id = tensor.place().device;
// Write log to file
auto file_path = GetNanPath();
if (file_path.size() > 0) {
phi::DenseTensor cpu_tensor;
platform::CPUPlace cpu_place;
cpu_tensor.Resize(tensor.dims());
// 1. copy from gpu to cpu
paddle::framework::TensorCopySync(tensor, cpu_place, &cpu_tensor);
auto* dev_ctx = reinterpret_cast<phi::GPUContext*>(
platform::DeviceContextPool::Instance().Get(tensor.place()));
const std::string debug_info =
GetHintString<T>(op_type, var_name, place, dev_id);
// 2. write log to file
CheckNanInfCpuImpl(cpu_tensor.data<T>(), tensor.numel(), debug_info, "gpu");
return;
}
// Write log to window
char* gpu_str_ptr =
GetGpuHintStringPtr<T>(*dev_ctx, op_type, var_name, dev_id);
#ifdef __HIPCC__
// HIP will throw GPU memory access fault if threads > 256
const size_t threads = 256;
#else
const size_t threads = 1024;
#endif
size_t blocks =
::min(static_cast<size_t>(128),
static_cast<size_t>((tensor.numel() + threads - 1) / threads));
#ifdef __HIPCC__
int print_num = 3;
hipLaunchKernelGGL(CheckNanInfKernel,
dim3(blocks),
dim3(threads),
0,
dev_ctx->stream(),
tensor.data<T>(),
tensor.numel(),
print_num,
gpu_str_ptr);
#else
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
int64_t numel_max_min = blocks;
phi::DenseTensor block_num_nan_inf_zero;
block_num_nan_inf_zero.Resize({static_cast<int64_t>(3 * numel_max_min)});
int64_t* block_num_nan_ptr =
dev_ctx->template Alloc<int64_t>(&block_num_nan_inf_zero);
int64_t* block_num_inf_ptr = block_num_nan_ptr + numel_max_min;
int64_t* block_num_zero_ptr = block_num_inf_ptr + numel_max_min;
phi::DenseTensor tensor_block_max_min;
tensor_block_max_min.Resize({static_cast<int64_t>(3 * numel_max_min)});
MT* tensor_block_max_ptr = dev_ctx->template Alloc<MT>(&tensor_block_max_min);
MT* tensor_block_min_ptr = tensor_block_max_ptr + numel_max_min;
MT* tensor_block_mean_ptr = tensor_block_max_ptr + 2 * numel_max_min;
hipLaunchKernelGGL(( FindNanInfAndBlockMaxMin<T, MT>)
, dim3(blocks), dim3(threads), 0, dev_ctx->stream(), tensor.data<T>(),
tensor.numel(),
block_num_nan_ptr,
block_num_inf_ptr,
block_num_zero_ptr,
tensor_block_max_ptr,
tensor_block_min_ptr,
tensor_block_mean_ptr);
int check_nan_inf_level = FLAGS_check_nan_inf_level;
phi::DenseTensor nan_inf_zero_tensor;
nan_inf_zero_tensor.Resize({static_cast<int64_t>(3)});
int64_t* nan_inf_zero =
dev_ctx->template Alloc<int64_t>(&nan_inf_zero_tensor);
hipLaunchKernelGGL(( FindGlobalMaxMinAndPrint<T, MT>)
, dim3(1), dim3(1), 0, dev_ctx->stream(), block_num_nan_ptr,
block_num_inf_ptr,
block_num_zero_ptr,
tensor_block_max_ptr,
tensor_block_min_ptr,
tensor_block_mean_ptr,
gpu_str_ptr,
tensor.numel(),
numel_max_min,
check_nan_inf_level,
nan_inf_zero_tensor.data<int64_t>());
if (check_nan_inf_level == 0 && GetNanInfStackLimit() > 0) {
auto nan_cpu =
phi::memory_utils::Alloc(phi::CPUPlace(), sizeof(int64_t) * 3);
int64_t* nan_cpu_ptr = reinterpret_cast<int64_t*>(nan_cpu->ptr());
phi::memory_utils::Copy(phi::CPUPlace(),
nan_cpu_ptr,
place,
nan_inf_zero,
3 * sizeof(int64_t),
dev_ctx->stream());
dev_ctx->Wait();
if (nan_cpu_ptr[0] > 0 || nan_cpu_ptr[1] > 0) {
const std::string debug_info =
GetHintString<T>(op_type, var_name, place, dev_id);
PADDLE_THROW(platform::errors::PreconditionNotMet(
"There are NAN or INF (num_nan=%lld, num_inf=%lld, num_zero=%lld) in "
"%s.",
static_cast<long long>(nan_cpu_ptr[0]), // NOLINT
static_cast<long long>(nan_cpu_ptr[1]), // NOLINT
static_cast<long long>(nan_cpu_ptr[2]), // NOLINT
debug_info));
}
}
#endif
}
template <>
void tensor_check<phi::GPUContext>(const std::string& op_type,
const std::string& var_name,
const phi::DenseTensor& tensor,
const platform::Place& place) {
std::call_once(init_multi_gpu_op_var_map_flag, InitMultiGPUOpVarMap);
TensorCheckerVisitor<phi::GPUContext> vistor(
op_type, var_name, tensor, place);
VisitDataType(framework::TransToProtoVarType(tensor.dtype()), vistor);
}
} // namespace details
} // namespace framework
} // namespace paddle
| a154135224d77473472b2c57e7c6207bee561f4f.cu | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/details/nan_inf_utils_detail.h"
#include "paddle/fluid/framework/details/nan_inf_utils.h"
#include <algorithm>
#include <unordered_map>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/flags.h"
#include "paddle/phi/kernels/funcs/math_cuda_utils.h"
PHI_DECLARE_int32(check_nan_inf_level);
namespace paddle {
namespace framework {
namespace details {
static std::once_flag init_multi_gpu_op_var_map_flag;
// lazy init
static std::vector<std::unordered_map<std::string, memory::AllocationPtr>>&
multi_op_var2gpu_str() {
static std::vector<std::unordered_map<std::string, memory::AllocationPtr>>
_multi_op_var2gpu_str;
return _multi_op_var2gpu_str;
}
static std::vector<std::mutex>& multi_op_var2gpu_str_mutex() {
static std::vector<std::mutex> _multi_op_var2gpu_str_mutex;
return _multi_op_var2gpu_str_mutex;
}
static void InitMultiGPUOpVarMap() {
int dev_count = platform::GetGPUDeviceCount();
PADDLE_ENFORCE_GT(dev_count,
0,
platform::errors::NotFound(
"cuda device must > 0, now dev_count=%d", dev_count));
// https://stackoverflow.com/questions/16465633/how-can-i-use-something-like-stdvectorstdmutex
std::vector<std::unordered_map<std::string, memory::AllocationPtr>> tmp_multi(
dev_count);
std::vector<std::mutex> tmp_multi_mutex(dev_count);
multi_op_var2gpu_str().swap(tmp_multi);
multi_op_var2gpu_str_mutex().swap(tmp_multi_mutex);
}
template <typename T>
__device__ __forceinline__ void PrintNanInfKernel(const T* value,
const size_t numel,
int print_num,
char* debug_info) {
const size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ unsigned int nan_count, inf_count, num_count;
if (threadIdx.x == 0) nan_count = inf_count = num_count = 0;
__syncthreads;
for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) {
unsigned int count = 0;
if (isnan(value[i])) {
count = atomicAdd(&nan_count, 1);
} else if (isinf(value[i])) {
count = atomicAdd(&inf_count, 1);
} else {
count = atomicAdd(&num_count, 1);
}
// for cuda, print in every block
if (count < print_num) {
printf("numel:%lu idx:%lu value:%f\n",
static_cast<uint64_t>(numel),
static_cast<uint64_t>(i),
static_cast<float>(value[i]));
}
}
__syncthreads;
#ifdef __HIPCC__
if (true && hipThreadIdx_x == 0) {
printf("In block %d, there has %u,%u,%u nan,inf,num\n",
hipBlockIdx_x,
nan_count,
inf_count,
num_count);
#else
if (true && threadIdx.x == 0) {
printf("In block %d, there has %u,%u,%u nan,inf,num\n",
blockIdx.x,
nan_count,
inf_count,
num_count);
#endif
PADDLE_ENFORCE(false, "===ERROR: in %s find nan or inf===", debug_info);
}
}
// Resnet 2gpus speed test, no check 270 images/s, this check 229 images/s
template <typename T>
__global__ void CheckNanInfKernel(const T* value,
const size_t numel,
int print_num,
char* debug_info) {
/// step 1, judge wheater has nan or inf
__shared__ volatile int has_nan_inf;
if (threadIdx.x == 0) has_nan_inf = false;
__syncthreads();
const size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
T sum = static_cast<T>(0.0);
// Todo(wangxi). simd speed up
for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) {
sum += (value[i] - value[i]);
}
if (isnan(sum) || isinf(sum)) has_nan_inf = true;
__syncthreads();
/// Note. different blocks may behave differently
if (!has_nan_inf) return;
PrintNanInfKernel(value, numel, print_num, debug_info);
}
template <typename T, int ReduceType>
__device__ T BlockReduce(T value) {
__shared__ T shared_mem[1024];
shared_mem[threadIdx.x] = value;
__syncthreads();
for (int stride = blockDim.x >> 1; stride > 0; stride = stride >> 1) {
if (threadIdx.x < stride) {
T value0 = shared_mem[threadIdx.x];
T value1 = shared_mem[threadIdx.x + stride];
T reduce_value;
if (ReduceType == 0) {
// max
reduce_value = value0 > value1 ? value0 : value1;
} else if (ReduceType == 1) {
// min
reduce_value = value0 < value1 ? value0 : value1;
} else if (ReduceType == 2) {
// sum
reduce_value = value0 + value1;
}
shared_mem[threadIdx.x] = reduce_value;
}
if (stride > 16) {
__syncthreads();
}
}
__syncthreads();
return shared_mem[0];
}
__device__ void BlockReduceNumNanInfAndWrite(const int64_t num_nan,
const int64_t num_inf,
const int64_t num_zero,
int64_t offset,
int64_t* num_nan_ptr,
int64_t* num_inf_ptr,
int64_t* num_zero_ptr) {
int64_t block_num_nan = BlockReduce<int64_t, 2>(num_nan);
int64_t block_num_inf = BlockReduce<int64_t, 2>(num_inf);
int64_t block_num_zero = BlockReduce<int64_t, 2>(num_zero);
if (threadIdx.x == 0) {
num_nan_ptr[offset] = block_num_nan;
num_inf_ptr[offset] = block_num_inf;
num_zero_ptr[offset] = block_num_zero;
}
}
template <
typename T,
std::enable_if_t<std::is_same<T, phi::dtype::complex<float>>::value ||
std::is_same<T, phi::dtype::complex<double>>::value,
bool> = true>
__device__ void BlockReduceMaxMinAndWrite(const T max_value,
const T min_value,
const T mean_value,
int64_t offset,
T* max_ptr,
T* min_ptr,
T* mean_ptr) {
// TODO(Xreki): support complex
}
template <
typename T,
std::enable_if_t<!std::is_same<T, phi::dtype::complex<float>>::value &&
!std::is_same<T, phi::dtype::complex<double>>::value,
bool> = true>
__device__ void BlockReduceMaxMinAndWrite(const T max_value,
const T min_value,
const T mean_value,
int64_t offset,
T* max_ptr,
T* min_ptr,
T* mean_ptr) {
if (max_ptr && min_ptr && mean_ptr) {
__syncthreads();
T block_max_value = phi::funcs::BlockReduceMax<T>(max_value, FINAL_MASK);
T block_min_value = phi::funcs::BlockReduceMin<T>(min_value, FINAL_MASK);
T block_mean_value = phi::funcs::BlockReduceSum<T>(mean_value, FINAL_MASK);
if (threadIdx.x == 0) {
max_ptr[offset] = block_max_value;
min_ptr[offset] = block_min_value;
mean_ptr[offset] = block_mean_value;
}
}
}
template <typename T, typename MT>
__global__ void FindNanInfAndBlockMaxMin(const T* value_ptr,
const int64_t numel,
int64_t* block_num_nan_ptr,
int64_t* block_num_inf_ptr,
int64_t* block_num_zero_ptr,
MT* tensor_block_max_ptr,
MT* tensor_block_min_ptr,
MT* tensor_block_mean_ptr) {
int64_t i = threadIdx.x + blockIdx.x * blockDim.x;
int64_t num_nan = 0;
int64_t num_inf = 0;
int64_t num_zero = 0;
MT max_value = static_cast<MT>(i < numel ? value_ptr[i] : value_ptr[0]);
MT min_value = static_cast<MT>(i < numel ? value_ptr[i] : value_ptr[0]);
MT mean_value = static_cast<MT>(0);
for (; i < numel; i += blockDim.x * gridDim.x) {
MT value = static_cast<MT>(value_ptr[i]);
max_value = value > max_value ? value : max_value;
min_value = value < min_value ? value : min_value;
mean_value += value / static_cast<MT>(numel);
if (isnan(value)) {
num_nan += 1;
} else if (isinf(value)) {
num_inf += 1;
}
if (value == static_cast<MT>(0)) {
num_zero += 1;
}
}
BlockReduceNumNanInfAndWrite(num_nan,
num_inf,
num_zero,
blockIdx.x,
block_num_nan_ptr,
block_num_inf_ptr,
block_num_zero_ptr);
BlockReduceMaxMinAndWrite<MT>(max_value,
min_value,
mean_value,
blockIdx.x,
tensor_block_max_ptr,
tensor_block_min_ptr,
tensor_block_mean_ptr);
}
template <typename T, typename MT>
__global__ void FindGlobalMaxMinAndPrint(const int64_t* block_num_nan_ptr,
const int64_t* block_num_inf_ptr,
const int64_t* block_num_zero_ptr,
const MT* tensor_block_max_ptr,
const MT* tensor_block_min_ptr,
const MT* tensor_block_mean_ptr,
const char* debug_info,
int64_t numel,
int64_t numel_max_min,
int check_nan_inf_level,
int64_t* nan_inf_zero) {
if (blockIdx.x == 0 && threadIdx.x == 0) {
int64_t num_nan = 0;
int64_t num_inf = 0;
int64_t num_zero = 0;
// numel_max_min <= 128
for (int64_t i = 0; i < numel_max_min; ++i) {
num_nan += block_num_nan_ptr[i];
num_inf += block_num_inf_ptr[i];
num_zero += block_num_zero_ptr[i];
}
MT max_value = static_cast<MT>(0);
MT min_value = static_cast<MT>(0);
MT mean_value = static_cast<MT>(0);
if (tensor_block_max_ptr && tensor_block_min_ptr && tensor_block_mean_ptr) {
max_value = tensor_block_max_ptr[0];
min_value = tensor_block_min_ptr[0];
mean_value = tensor_block_mean_ptr[0];
// numel_max_min <= 128
for (int64_t i = 1; i < numel_max_min; ++i) {
MT tmp_max_value = tensor_block_max_ptr[i];
MT tmp_min_value = tensor_block_min_ptr[i];
MT tmp_mean_value = tensor_block_mean_ptr[i];
max_value = tmp_max_value > max_value ? tmp_max_value : max_value;
min_value = tmp_min_value < min_value ? tmp_min_value : min_value;
mean_value += tmp_mean_value;
}
if (check_nan_inf_level == 0) {
nan_inf_zero[0] = num_nan;
nan_inf_zero[1] = num_inf;
nan_inf_zero[2] = num_zero;
}
}
PrintForDifferentLevel<T, MT>(debug_info,
numel,
num_nan,
num_inf,
num_zero,
max_value,
min_value,
mean_value,
check_nan_inf_level);
}
}
template <typename T>
inline std::string GetHintString(const std::string& op_type,
const std::string& var_name,
const phi::Place& place,
int dev_id = -1) {
std::string op_var = GetCpuHintString<T>(op_type, var_name, place, dev_id);
PADDLE_ENFORCE_EQ(
(dev_id >= 0 && dev_id < multi_op_var2gpu_str_mutex().size()),
true,
platform::errors::OutOfRange("GPU dev_id must >=0 and < dev_count=%d",
multi_op_var2gpu_str_mutex().size()));
return op_var;
}
template <typename T>
static char* GetGpuHintStringPtr(const phi::GPUContext& ctx,
const std::string& op_type,
const std::string& var_name,
int dev_id) {
std::string op_var =
GetHintString<T>(op_type, var_name, ctx.GetPlace(), dev_id);
char* gpu_str_ptr = nullptr;
{
auto& op_var2gpu_str_mutex = multi_op_var2gpu_str_mutex().at(dev_id);
auto& op_var2gpu_str = multi_op_var2gpu_str().at(dev_id);
std::lock_guard<std::mutex> guard(op_var2gpu_str_mutex);
if (op_var2gpu_str.find(op_var) == op_var2gpu_str.end()) { // insert
auto gpu_str_tensor = paddle::memory::Alloc(
ctx.GetPlace(),
op_var.length() + 1,
phi::Stream(reinterpret_cast<phi::StreamId>(ctx.stream())));
gpu_str_ptr = reinterpret_cast<char*>(gpu_str_tensor->ptr());
op_var2gpu_str.emplace(op_var, std::move(gpu_str_tensor));
auto iter = op_var2gpu_str.find(op_var);
PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(),
true,
platform::errors::PreconditionNotMet(
"op_var=%s should successed insert into "
"op_var2gpu_str, but now failed",
op_var));
#ifdef __HIPCC__
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(gpu_str_ptr,
iter->first.c_str(),
op_var.length() + 1,
hipMemcpyHostToDevice,
ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpyAsync(gpu_str_ptr,
iter->first.c_str(),
op_var.length() + 1,
cudaMemcpyHostToDevice,
ctx.stream()));
#endif
} else { // get
auto iter = op_var2gpu_str.find(op_var);
PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(),
true,
platform::errors::PreconditionNotMet(
"op_var=%s should be in the op_var2gpu_str, but "
"now can't find it",
op_var));
gpu_str_ptr = reinterpret_cast<char*>(iter->second->ptr());
}
}
return gpu_str_ptr;
}
template <>
template <typename T>
void TensorCheckerVisitor<phi::GPUContext>::apply(
typename std::enable_if<
std::is_floating_point<T>::value ||
std::is_same<T, ::paddle::platform::complex<float>>::value ||
std::is_same<T, ::paddle::platform::complex<double>>::value>::type*)
const {
auto* dev_ctx = reinterpret_cast<phi::GPUContext*>(
platform::DeviceContextPool::Instance().Get(tensor.place()));
int dev_id = tensor.place().device;
// Write log to file
auto file_path = GetNanPath();
if (file_path.size() > 0) {
phi::DenseTensor cpu_tensor;
platform::CPUPlace cpu_place;
cpu_tensor.Resize(tensor.dims());
// 1. copy from gpu to cpu
paddle::framework::TensorCopySync(tensor, cpu_place, &cpu_tensor);
auto* dev_ctx = reinterpret_cast<phi::GPUContext*>(
platform::DeviceContextPool::Instance().Get(tensor.place()));
const std::string debug_info =
GetHintString<T>(op_type, var_name, place, dev_id);
// 2. write log to file
CheckNanInfCpuImpl(cpu_tensor.data<T>(), tensor.numel(), debug_info, "gpu");
return;
}
// Write log to window
char* gpu_str_ptr =
GetGpuHintStringPtr<T>(*dev_ctx, op_type, var_name, dev_id);
#ifdef __HIPCC__
// HIP will throw GPU memory access fault if threads > 256
const size_t threads = 256;
#else
const size_t threads = 1024;
#endif
size_t blocks =
std::min(static_cast<size_t>(128),
static_cast<size_t>((tensor.numel() + threads - 1) / threads));
#ifdef __HIPCC__
int print_num = 3;
hipLaunchKernelGGL(CheckNanInfKernel,
dim3(blocks),
dim3(threads),
0,
dev_ctx->stream(),
tensor.data<T>(),
tensor.numel(),
print_num,
gpu_str_ptr);
#else
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
int64_t numel_max_min = blocks;
phi::DenseTensor block_num_nan_inf_zero;
block_num_nan_inf_zero.Resize({static_cast<int64_t>(3 * numel_max_min)});
int64_t* block_num_nan_ptr =
dev_ctx->template Alloc<int64_t>(&block_num_nan_inf_zero);
int64_t* block_num_inf_ptr = block_num_nan_ptr + numel_max_min;
int64_t* block_num_zero_ptr = block_num_inf_ptr + numel_max_min;
phi::DenseTensor tensor_block_max_min;
tensor_block_max_min.Resize({static_cast<int64_t>(3 * numel_max_min)});
MT* tensor_block_max_ptr = dev_ctx->template Alloc<MT>(&tensor_block_max_min);
MT* tensor_block_min_ptr = tensor_block_max_ptr + numel_max_min;
MT* tensor_block_mean_ptr = tensor_block_max_ptr + 2 * numel_max_min;
FindNanInfAndBlockMaxMin<T, MT>
<<<blocks, threads, 0, dev_ctx->stream()>>>(tensor.data<T>(),
tensor.numel(),
block_num_nan_ptr,
block_num_inf_ptr,
block_num_zero_ptr,
tensor_block_max_ptr,
tensor_block_min_ptr,
tensor_block_mean_ptr);
int check_nan_inf_level = FLAGS_check_nan_inf_level;
phi::DenseTensor nan_inf_zero_tensor;
nan_inf_zero_tensor.Resize({static_cast<int64_t>(3)});
int64_t* nan_inf_zero =
dev_ctx->template Alloc<int64_t>(&nan_inf_zero_tensor);
FindGlobalMaxMinAndPrint<T, MT>
<<<1, 1, 0, dev_ctx->stream()>>>(block_num_nan_ptr,
block_num_inf_ptr,
block_num_zero_ptr,
tensor_block_max_ptr,
tensor_block_min_ptr,
tensor_block_mean_ptr,
gpu_str_ptr,
tensor.numel(),
numel_max_min,
check_nan_inf_level,
nan_inf_zero_tensor.data<int64_t>());
if (check_nan_inf_level == 0 && GetNanInfStackLimit() > 0) {
auto nan_cpu =
phi::memory_utils::Alloc(phi::CPUPlace(), sizeof(int64_t) * 3);
int64_t* nan_cpu_ptr = reinterpret_cast<int64_t*>(nan_cpu->ptr());
phi::memory_utils::Copy(phi::CPUPlace(),
nan_cpu_ptr,
place,
nan_inf_zero,
3 * sizeof(int64_t),
dev_ctx->stream());
dev_ctx->Wait();
if (nan_cpu_ptr[0] > 0 || nan_cpu_ptr[1] > 0) {
const std::string debug_info =
GetHintString<T>(op_type, var_name, place, dev_id);
PADDLE_THROW(platform::errors::PreconditionNotMet(
"There are NAN or INF (num_nan=%lld, num_inf=%lld, num_zero=%lld) in "
"%s.",
static_cast<long long>(nan_cpu_ptr[0]), // NOLINT
static_cast<long long>(nan_cpu_ptr[1]), // NOLINT
static_cast<long long>(nan_cpu_ptr[2]), // NOLINT
debug_info));
}
}
#endif
}
template <>
void tensor_check<phi::GPUContext>(const std::string& op_type,
const std::string& var_name,
const phi::DenseTensor& tensor,
const platform::Place& place) {
std::call_once(init_multi_gpu_op_var_map_flag, InitMultiGPUOpVarMap);
TensorCheckerVisitor<phi::GPUContext> vistor(
op_type, var_name, tensor, place);
VisitDataType(framework::TransToProtoVarType(tensor.dtype()), vistor);
}
} // namespace details
} // namespace framework
} // namespace paddle
|
464e69a4e2c7a09327f32cb1d4c9426dd248c281.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB SP code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to [email protected] //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: [email protected] //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include <assert.h>
#include "header.h"
//---------------------------------------------------------------------
// addition of update to the vector u
//---------------------------------------------------------------------
__global__ void add_kernel(
dim3 gridOffset,
int nx2, int ny2, int nz2,
double (*u )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double (*rhs)/*[KMAX]*/[5][JMAXP+1][IMAXP+1]
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x;
int j = blockDim.y * blockIdx.y + threadIdx.y + gridOffset.y;
int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z;
int m;
if (k >= 1 && k <= nz2) {
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
u[k][m][j][i] = u[k][m][j][i] + rhs[k][m][j][i];
}
}
}
}
}
void add()
{
if (timeron) timer_start(t_add);
hipLaunchKernelGGL(( add_kernel) , dim3(gridDim_), dim3(blockDim_) , 0, 0,
gridOffset, nx2, ny2, nz2, dev_u[device], dev_rhs[device]
);
if (timeron) timer_stop(t_add);
}
| 464e69a4e2c7a09327f32cb1d4c9426dd248c281.cu | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB SP code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to [email protected] //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: [email protected] //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include <assert.h>
#include "header.h"
//---------------------------------------------------------------------
// addition of update to the vector u
//---------------------------------------------------------------------
__global__ void add_kernel(
dim3 gridOffset,
int nx2, int ny2, int nz2,
double (*u )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double (*rhs)/*[KMAX]*/[5][JMAXP+1][IMAXP+1]
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x;
int j = blockDim.y * blockIdx.y + threadIdx.y + gridOffset.y;
int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z;
int m;
if (k >= 1 && k <= nz2) {
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
u[k][m][j][i] = u[k][m][j][i] + rhs[k][m][j][i];
}
}
}
}
}
void add()
{
if (timeron) timer_start(t_add);
add_kernel <<< gridDim_, blockDim_ >>> (
gridOffset, nx2, ny2, nz2, dev_u[device], dev_rhs[device]
);
if (timeron) timer_stop(t_add);
}
|
fee7c5590934caec4ad108ad53203397f3e40b23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/batch_norm_kernel.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/fluid/operators/norm_utils.cu.h"
#include "paddle/fluid/operators/norm_utils.h"
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/operators/layout_utils.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/phi/kernels/gpu/batch_norm_utils.h"
#ifdef __HIPCC__
#define LAUNCH_BOUNDS(BlockDim) __launch_bounds__(BlockDim)
#else
#define LAUNCH_BOUNDS(BlockDim)
#endif
DECLARE_bool(cudnn_batchnorm_spatial_persistent);
namespace phi {
template <typename T>
using CudnnDataType = paddle::platform::CudnnDataType<T>;
template <typename T>
using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType;
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void KeBNBackwardScaleBias(
const T *dy,
const T *x,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
const double epsilon,
const int N,
const int C,
const int HxW,
BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon);
BatchNormParamType<T> mean_i = mean[i];
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) *
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
db_sum += static_cast<BatchNormParamType<T>>(dy[index]);
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, hipcub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, hipcub::Sum());
if (threadIdx.x == 0) {
dscale[i] = ds_sum * inv_var_i;
dbias[i] = db_sum;
}
__syncthreads();
}
}
template <typename T, phi::DataLayout layout>
static __global__ void KeBNBackwardData(const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *variance,
const double epsilon,
const int C,
const int HxW,
const int num,
T *dx) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == phi::DataLayout::kNCHW ? i / HxW % C : i % C;
BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon);
dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) *
scale[c] * inv_var);
}
}
template <typename T>
static __global__ void KeBNRestoreData(const phi::DataLayout layout,
T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
double epsilon,
int C,
int M,
const int num,
const T *y) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == phi::DataLayout::kNCHW ? (i / M) % C : i % C;
auto y_i = static_cast<BatchNormParamType<T>>(y[i]);
auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c];
x[i] = static_cast<T>(x_i);
}
}
template <typename T>
class InplaceHelper {
public:
void operator()(const phi::DataLayout layout,
T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
double epsilon,
int C,
int M,
const int num,
const T *y,
int grid2,
const int block,
const gpuStream_t &stream) {
PADDLE_ENFORCE_EQ(x,
y,
phi::errors::InvalidArgument(
"X and Y should be inplaced in inplace mode"));
hipLaunchKernelGGL(( KeBNRestoreData), dim3(grid2), dim3(block), 0, stream,
layout, x, scale, bias, mean, variance, epsilon, C, M, num, y);
}
};
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward(
const T *dy,
const T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *saved_mean,
const BatchNormParamType<T> *saved_inv_variance,
const int C,
const int N,
const int HxW,
const double epsilon,
T *dx,
BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
__shared__ typename BlockReduce::TempStorage mean_storage;
__shared__ typename BlockReduce::TempStorage variance_storeage;
__shared__ BatchNormParamType<T> inv_var_val;
__shared__ BatchNormParamType<T> mean_val;
__shared__ BatchNormParamType<T> dscale_val;
__shared__ BatchNormParamType<T> dbias_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
if (saved_mean && saved_inv_variance) {
if (threadIdx.x == 0) {
inv_var_val = saved_inv_variance[i];
mean_val = saved_mean[i];
}
} else {
BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> x_square_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> x_i =
static_cast<BatchNormParamType<T>>(x[index]);
x_sum += x_i;
x_square_sum += x_i * x_i;
}
x_sum = BlockReduce(mean_storage).Reduce(x_sum, hipcub::Sum());
x_square_sum =
BlockReduce(variance_storeage).Reduce(x_square_sum, hipcub::Sum());
if (threadIdx.x == 0) {
mean_val = x_sum / inner_size;
inv_var_val =
1 / sqrt(x_square_sum / inner_size - mean_val * mean_val + epsilon);
}
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
ds_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val);
db_sum += dy_i;
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, hipcub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, hipcub::Sum());
if (threadIdx.x == 0) {
dscale_val = ds_sum * inv_var_val;
dbias_val = db_sum;
dscale[i] = dscale_val;
dbias[i] = dbias_val;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] = scale[i] * inv_var_val *
(static_cast<BatchNormParamType<T>>(dy[index]) -
dbias_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_val) *
inv_var_val * dscale_val / inner_size);
}
}
}
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackwardData(
const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *mean,
const T *x,
const BatchNormParamType<T> *variance,
const int C,
const int N,
const int HxW,
T *dx) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage dy_storage;
__shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage;
__shared__ BatchNormParamType<T> dy_sum_val;
__shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> inv_var_i = variance[i];
BatchNormParamType<T> mean_i = mean[i];
BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> dy_x_sub_mean_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
dy_sum += dy_i;
dy_x_sub_mean_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
}
dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, hipcub::Sum());
dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage)
.Reduce(dy_x_sub_mean_sum, hipcub::Sum());
if (threadIdx.x == 0) {
dy_sum_val = dy_sum;
dy_x_sub_mean_sum_val = dy_x_sub_mean_sum;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] =
(static_cast<BatchNormParamType<T>>(dy[index]) -
dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i) *
dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) *
scale[i] * inv_var_i;
}
}
}
template <typename T, typename Context>
void BatchNormGradRawKernel(const Context &ctx,
const DenseTensor &y_grad,
const DenseTensor &x,
const DenseTensor &scale,
const DenseTensor &bias,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
paddle::optional<const DenseTensor &> reserve_space,
paddle::optional<const DenseTensor &> mean,
paddle::optional<const DenseTensor &> variance,
float momentum,
float epsilon_f,
const std::string &data_layout_str,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
bool fuse_with_relu,
bool is_inplace,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *bias_grad) {
double epsilon = static_cast<double>(epsilon_f);
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
const auto *d_y = &y_grad;
auto *d_x = x_grad;
auto *d_scale = scale_grad;
auto *d_bias = bias_grad;
use_global_stats = is_test || use_global_stats;
const auto &x_dims = x.dims();
PADDLE_ENFORCE_EQ(
x_dims.size() >= 2 && x_dims.size() <= 5,
true,
phi::errors::InvalidArgument(
"The size of input's dimensions should be between 2 and 5."
"But received: the size of input's dimensions is [%d],"
"the dimensions of input is [%s]",
x_dims.size(),
x_dims));
int N, C, H, W, D;
paddle::operators::ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D);
// init output
if (d_x) {
ctx.template Alloc<T>(d_x);
}
if (d_scale && d_bias) {
d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
}
PADDLE_ENFORCE_EQ(
scale.dims().size(),
1UL,
phi::errors::InvalidArgument(
"The size of scale's dimensions must equal to 1. But received: "
"the size of scale's dimensions is [%d], the dimensions of scale "
"is [%s].",
scale.dims().size(),
scale.dims()));
PADDLE_ENFORCE_EQ(
scale.dims()[0],
C,
phi::errors::InvalidArgument(
"The first dimension of scale must equal to Channels[%d]. But "
"received: the first dimension of scale is [%d]",
C,
scale.dims()[0]));
auto dtype = paddle::platform::CudnnDataType<T>::type;
#ifdef PADDLE_WITH_HIP
auto compute_format =
data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW;
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// HIP do not support compute format of NHWC
// auto compute_format = DataLayout::kNCHW;
#else
const bool fast_nhwc_batch_norm = dtype == CUDNN_DATA_HALF &&
FLAGS_cudnn_batchnorm_spatial_persistent &&
(reserve_space.get_ptr() != nullptr);
auto compute_format = fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC
? DataLayout::kNHWC
: DataLayout::kNCHW;
#endif
DenseTensor transformed_x(x.type());
DenseTensor transformed_d_y(d_y->type());
DenseTensor transformed_d_x;
if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW &&
x_dims.size() > 2) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<Context, T>(ctx, &x, &transformed_x);
TransToChannelFirst<Context, T>(ctx, &x, &transformed_x);
ResizeToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y);
TransToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y);
if (d_x) {
ResizeToChannelFirst<Context, T>(ctx, d_x, &transformed_d_x);
}
} else {
transformed_x.ShareDataWith(x);
transformed_d_y.ShareDataWith(*d_y);
if (d_x) {
transformed_d_x.ShareDataWith(*d_x);
}
}
std::vector<int> dims;
std::vector<int> strides;
if (compute_format == DataLayout::kNCHW) {
dims = {N, C, H, W, D};
strides = {C * H * W * D, H * W * D, W * D, D, 1};
} else {
dims = {N, C, H, W, D};
strides = {H * W * C * D, 1, W * D * C, D * C, C};
}
const int num = transformed_x.numel();
#ifdef HIPCC
const int block = 256;
#else
const int block = 512;
#endif
int max_threads = ctx.GetMaxPhysicalThreadCount();
const int max_blocks = ::max(max_threads / block, 1);
int grid1 = (num + block - 1) / block;
int grid2 = ::min(C, max_blocks);
auto stream = ctx.stream();
InplaceHelper<T> inplace_functor;
if (!use_global_stats) {
if ((N * H * W * D) == 1) {
if (d_x) {
paddle::framework::TensorCopy(*d_y, ctx.GetPlace(), d_x);
}
phi::funcs::SetConstant<Context, BatchNormParamType<T>> functor;
functor(ctx, d_scale, static_cast<BatchNormParamType<T>>(0));
functor(ctx, d_bias, static_cast<BatchNormParamType<T>>(0));
return;
}
// ------------------- cudnn descriptors ---------------------
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// miopenTensorDescriptor_t data_desc_;
// miopenTensorDescriptor_t bn_param_desc_;
// miopenBatchNormMode_t mode_;
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&data_desc_));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_));
#else
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_;
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnCreateTensorDescriptor(
&bn_param_desc_));
#endif
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON);
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// mode_ = miopenBNSpatial;
#elif CUDNN_VERSION_MIN(7, 0, 1)
if (FLAGS_cudnn_batchnorm_spatial_persistent) {
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
} else if (H == 1 && W == 1) {
mode_ = CUDNN_BATCHNORM_PER_ACTIVATION;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#else
if (H == 1 && W == 1) {
mode_ = CUDNN_BATCHNORM_PER_ACTIVATION;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#endif // CUDNN_VERSION_MIN(7, 0, 1)
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor(
// data_desc_, CudnnDataType<T>::type,
// x_dims.size() > 3 ? x_dims.size() : 4, const_cast<int *>(dims.data()),
// const_cast<int *>(strides.data())));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDeriveBNTensorDescriptor(bn_param_desc_,
// data_desc_, mode_));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnSetTensorNdDescriptor(
data_desc_,
CudnnDataType<T>::type,
x_dims.size() > 3 ? x_dims.size() : 4,
dims.data(),
strides.data()));
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnDeriveBNTensorDescriptor(
bn_param_desc_, data_desc_, mode_));
#endif
const auto *saved_mean_data =
saved_mean.template data<BatchNormParamType<T>>();
const auto *saved_var_data =
saved_variance.template data<BatchNormParamType<T>>();
if (is_inplace) {
inplace_functor(compute_format,
transformed_x.data<T>(),
scale.template data<BatchNormParamType<T>>(),
bias.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
epsilon,
C,
H * W * D,
num,
transformed_x.data<T>(),
grid2,
block,
stream);
}
// This branch calls CUDNN APIs
if (d_x && d_scale && d_bias) {
bool called = false;
#if CUDNN_VERSION_MIN(7, 4, 1)
called = true;
size_t workspace_size = 0;
void *workspace_ptr = nullptr;
DenseTensor workspace_tensor;
auto reserve_space_size = reserve_space->memory_size();
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::
cudnnGetBatchNormalizationBackwardExWorkspaceSize(
/*handle=*/ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnIps=*/CUDNN_BATCHNORM_OPS_BN,
/*xDesc=*/data_desc_,
/*yDesc=*/data_desc_,
/*dyDesc=*/data_desc_,
/*dzDesc=*/nullptr,
/*dxDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/nullptr,
/*sizeInBytes=*/&workspace_size));
workspace_ptr = workspace_tensor.mutable_data(
ctx.GetPlace(), transformed_x.type(), workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnBatchNormalizationBackwardEx(
/*handle=*/ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnOps=*/CUDNN_BATCHNORM_OPS_BN,
/*alphaDataDiff=*/CudnnDataType<T>::kOne(),
/*betaDataDiff=*/CudnnDataType<T>::kZero(),
/*alphaParamDiff=*/CudnnDataType<T>::kOne(),
/*betaParamDiff=*/CudnnDataType<T>::kZero(),
/*xDesc=*/data_desc_,
/*xData=*/transformed_x.template data<T>(),
/*yDesc=*/nullptr,
/*yData=*/nullptr,
/*dyDesc=*/data_desc_,
/*dyData=*/transformed_d_y.template data<T>(),
/*dzDesc=*/nullptr,
/*dzData=*/nullptr,
/*dxDesc=*/data_desc_,
/*dxData=*/ctx.template Alloc<T>(&transformed_d_x),
/*dBnScaleBiasDesc=*/bn_param_desc_,
/*bnScaleData=*/scale.template data<BatchNormParamType<T>>(),
/*bnBiasData=*/nullptr,
/*dBnScaleData=*/d_scale
->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
/*dBnBiasData=*/d_bias
->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
/*epsilon=*/epsilon,
/*savedMean=*/saved_mean_data,
/*savedInvVariance=*/saved_var_data,
/*activationDesc=*/nullptr,
/*workspace=*/workspace_ptr,
/*workSpaceSizeInBytes=*/workspace_size,
/*reserveSpace=*/const_cast<T *>(
reserve_space->template data<T>()),
/*reserveSpaceSizeInBytes=*/reserve_space_size));
#endif // CUDNN_VERSION_MIN(7, 4, 1)
if (!called) {
#ifdef PADDLE_WITH_HIP
if (compute_format == DataLayout::kNCHW) {
hipLaunchKernelGGL(( BNBackward<T,
block,
DataLayout::kNCHW>), dim3(grid2), dim3(block), 0, ctx.stream(),
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>(),
d_scale->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
d_bias->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()));
} else {
hipLaunchKernelGGL(( BNBackward<T,
block,
DataLayout::kNHWC>), dim3(grid2), dim3(block), 0, ctx.stream(),
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>(),
d_scale->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
d_bias->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()));
}
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenBatchNormalizationBackward(
// dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(),
// CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(),
// CudnnDataType<T>::kZero(), data_desc_,
// transformed_x.template data<T>(), data_desc_,
// transformed_d_y.template data<T>(), data_desc_,
// transformed_d_x.template mutable_data<T>(ctx.GetPlace()),
// bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
// d_scale->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace()),
// d_bias->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace()),
// epsilon, saved_mean_data, saved_var_data));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnBatchNormalizationBackward(
ctx.cudnn_handle(),
mode_,
CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(),
CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(),
data_desc_,
transformed_x.template data<T>(),
data_desc_,
transformed_d_y.template data<T>(),
data_desc_,
ctx.template Alloc<T>(&transformed_d_x),
bn_param_desc_,
scale.template data<BatchNormParamType<T>>(),
d_scale->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
d_bias->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
epsilon,
saved_mean_data,
saved_var_data));
#endif
}
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform batchnorm output from NCHW to NHWC";
TransToChannelLast<Context, T>(ctx, &transformed_d_x, d_x);
}
} else {
// This branch call CUDA kernels
if (compute_format == DataLayout::kNCHW) {
if (d_x) {
hipLaunchKernelGGL(( BNBackwardData<
T,
block,
phi::DataLayout::kNCHW>), dim3(grid2), dim3(block), 0, ctx.stream(),
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
saved_mean_data,
x.data<T>(),
saved_var_data,
C,
N,
H * W * D,
d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<
T,
block,
phi::DataLayout::kNCHW>), dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(),
x.data<T>(),
saved_mean_data,
saved_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
} else {
if (d_x) {
hipLaunchKernelGGL(( BNBackwardData<
T,
block,
phi::DataLayout::kNHWC>), dim3(grid2), dim3(block), 0, ctx.stream(),
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
saved_mean_data,
x.data<T>(),
saved_var_data,
C,
N,
H * W * D,
d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<
T,
block,
phi::DataLayout::kNHWC>), dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(),
x.data<T>(),
saved_mean_data,
saved_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
}
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// clean when exit.
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(data_desc_));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_));
#else
// clean when exit.
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnDestroyTensorDescriptor(
bn_param_desc_));
#endif
} else {
const auto *running_mean = mean.get_ptr();
const auto *running_var = variance.get_ptr();
const auto *running_mean_data =
running_mean->template data<BatchNormParamType<T>>();
const auto *running_var_data =
running_var->template data<BatchNormParamType<T>>();
if (is_inplace) {
auto px = x;
inplace_functor(data_layout,
ctx.template Alloc<T>(&px),
scale.template data<BatchNormParamType<T>>(),
bias.template data<BatchNormParamType<T>>(),
running_mean_data,
running_var_data,
epsilon,
C,
H * W * D,
num,
x.data<T>(),
grid2,
block,
stream);
}
if (compute_format == DataLayout::kNCHW) {
if (d_x) {
hipLaunchKernelGGL(( KeBNBackwardData<T,
phi::DataLayout::kNCHW>), dim3(grid1), dim3(block), 0, stream,
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
running_var_data,
epsilon,
C,
H * W,
num,
d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<
T,
block,
phi::DataLayout::kNCHW>), dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(),
x.data<T>(),
running_mean_data,
running_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
} else {
if (d_x) {
hipLaunchKernelGGL(( KeBNBackwardData<T,
phi::DataLayout::kNHWC>), dim3(grid1), dim3(block), 0, stream,
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
running_var_data,
epsilon,
C,
H * W,
num,
d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<
T,
block,
phi::DataLayout::kNHWC>), dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(),
x.data<T>(),
running_mean_data,
running_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
}
}
template <typename T, typename Context>
void BatchNormGradKernel(const Context &dev_ctx,
const DenseTensor &y_grad,
const DenseTensor &x,
const DenseTensor &scale,
const DenseTensor &bias,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
paddle::optional<const DenseTensor &> reserve_space,
paddle::optional<const DenseTensor &> mean,
paddle::optional<const DenseTensor &> variance,
float momentum,
float epsilon,
const std::string &data_layout,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
bool fuse_with_relu,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *bias_grad) {
BatchNormGradRawKernel<T, Context>(dev_ctx,
y_grad,
x,
scale,
bias,
saved_mean,
saved_variance,
reserve_space,
mean,
variance,
momentum,
epsilon,
data_layout,
is_test,
use_global_stats,
trainable_statistics,
fuse_with_relu,
false,
x_grad,
scale_grad,
bias_grad);
}
template <typename T, typename Context>
void BatchNormDoubleGradKernel(const Context &ctx,
const DenseTensor &x_grad_grad,
const DenseTensor &scale_grad_grad,
const DenseTensor &bias_grad_grad,
const DenseTensor &y_grad,
const DenseTensor &x,
const DenseTensor &scale,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
paddle::optional<const DenseTensor &> mean,
paddle::optional<const DenseTensor &> variance,
float momentum,
float epsilon,
const std::string &data_layout_str,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
bool fuse_with_relu,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *y_grad_grad) {
PADDLE_ENFORCE_EQ(is_test,
false,
phi::errors::InvalidArgument(
"`is_test = True` CANNOT be used in train program. If "
"you want to use global status in pre_train model, "
"please set `use_global_stats = True`"));
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
const DenseTensor *running_mean = nullptr;
const DenseTensor *running_variance = nullptr;
if (use_global_stats) {
running_mean = mean.get_ptr();
running_variance = variance.get_ptr();
}
paddle::operators::NormDoubleGradFunctor<Context, T>(ctx,
data_layout,
&x,
&scale,
&y_grad,
&saved_mean,
&saved_variance,
running_mean,
running_variance,
epsilon,
use_global_stats,
&x_grad_grad,
&scale_grad_grad,
&bias_grad_grad,
x_grad,
scale_grad,
y_grad_grad);
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(batch_norm_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormGradKernel,
float,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(batch_norm_grad_raw,
GPU,
ALL_LAYOUT,
phi::BatchNormGradRawKernel,
float,
phi::dtype::float16) {}
#else
PD_REGISTER_KERNEL(batch_norm_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormGradKernel,
float,
double,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32);
}
}
PD_REGISTER_KERNEL(batch_norm_grad_raw,
GPU,
ALL_LAYOUT,
phi::BatchNormGradRawKernel,
float,
double,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32);
}
}
#endif
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(batch_norm_grad_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormDoubleGradKernel,
float,
double) {}
#else
PD_REGISTER_KERNEL(batch_norm_grad_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormDoubleGradKernel,
float,
double) {}
#endif
| fee7c5590934caec4ad108ad53203397f3e40b23.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/batch_norm_kernel.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/fluid/operators/norm_utils.cu.h"
#include "paddle/fluid/operators/norm_utils.h"
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/operators/layout_utils.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/phi/kernels/gpu/batch_norm_utils.h"
#ifdef __HIPCC__
#define LAUNCH_BOUNDS(BlockDim) __launch_bounds__(BlockDim)
#else
#define LAUNCH_BOUNDS(BlockDim)
#endif
DECLARE_bool(cudnn_batchnorm_spatial_persistent);
namespace phi {
template <typename T>
using CudnnDataType = paddle::platform::CudnnDataType<T>;
template <typename T>
using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType;
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void KeBNBackwardScaleBias(
const T *dy,
const T *x,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
const double epsilon,
const int N,
const int C,
const int HxW,
BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon);
BatchNormParamType<T> mean_i = mean[i];
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) *
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
db_sum += static_cast<BatchNormParamType<T>>(dy[index]);
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum());
if (threadIdx.x == 0) {
dscale[i] = ds_sum * inv_var_i;
dbias[i] = db_sum;
}
__syncthreads();
}
}
template <typename T, phi::DataLayout layout>
static __global__ void KeBNBackwardData(const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *variance,
const double epsilon,
const int C,
const int HxW,
const int num,
T *dx) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == phi::DataLayout::kNCHW ? i / HxW % C : i % C;
BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon);
dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) *
scale[c] * inv_var);
}
}
template <typename T>
static __global__ void KeBNRestoreData(const phi::DataLayout layout,
T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
double epsilon,
int C,
int M,
const int num,
const T *y) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == phi::DataLayout::kNCHW ? (i / M) % C : i % C;
auto y_i = static_cast<BatchNormParamType<T>>(y[i]);
auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c];
x[i] = static_cast<T>(x_i);
}
}
template <typename T>
class InplaceHelper {
public:
void operator()(const phi::DataLayout layout,
T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
double epsilon,
int C,
int M,
const int num,
const T *y,
int grid2,
const int block,
const gpuStream_t &stream) {
PADDLE_ENFORCE_EQ(x,
y,
phi::errors::InvalidArgument(
"X and Y should be inplaced in inplace mode"));
KeBNRestoreData<<<grid2, block, 0, stream>>>(
layout, x, scale, bias, mean, variance, epsilon, C, M, num, y);
}
};
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward(
const T *dy,
const T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *saved_mean,
const BatchNormParamType<T> *saved_inv_variance,
const int C,
const int N,
const int HxW,
const double epsilon,
T *dx,
BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
__shared__ typename BlockReduce::TempStorage mean_storage;
__shared__ typename BlockReduce::TempStorage variance_storeage;
__shared__ BatchNormParamType<T> inv_var_val;
__shared__ BatchNormParamType<T> mean_val;
__shared__ BatchNormParamType<T> dscale_val;
__shared__ BatchNormParamType<T> dbias_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
if (saved_mean && saved_inv_variance) {
if (threadIdx.x == 0) {
inv_var_val = saved_inv_variance[i];
mean_val = saved_mean[i];
}
} else {
BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> x_square_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> x_i =
static_cast<BatchNormParamType<T>>(x[index]);
x_sum += x_i;
x_square_sum += x_i * x_i;
}
x_sum = BlockReduce(mean_storage).Reduce(x_sum, cub::Sum());
x_square_sum =
BlockReduce(variance_storeage).Reduce(x_square_sum, cub::Sum());
if (threadIdx.x == 0) {
mean_val = x_sum / inner_size;
inv_var_val =
1 / sqrt(x_square_sum / inner_size - mean_val * mean_val + epsilon);
}
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
ds_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val);
db_sum += dy_i;
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum());
if (threadIdx.x == 0) {
dscale_val = ds_sum * inv_var_val;
dbias_val = db_sum;
dscale[i] = dscale_val;
dbias[i] = dbias_val;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] = scale[i] * inv_var_val *
(static_cast<BatchNormParamType<T>>(dy[index]) -
dbias_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_val) *
inv_var_val * dscale_val / inner_size);
}
}
}
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackwardData(
const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *mean,
const T *x,
const BatchNormParamType<T> *variance,
const int C,
const int N,
const int HxW,
T *dx) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage dy_storage;
__shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage;
__shared__ BatchNormParamType<T> dy_sum_val;
__shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> inv_var_i = variance[i];
BatchNormParamType<T> mean_i = mean[i];
BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> dy_x_sub_mean_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
dy_sum += dy_i;
dy_x_sub_mean_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
}
dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, cub::Sum());
dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage)
.Reduce(dy_x_sub_mean_sum, cub::Sum());
if (threadIdx.x == 0) {
dy_sum_val = dy_sum;
dy_x_sub_mean_sum_val = dy_x_sub_mean_sum;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] =
(static_cast<BatchNormParamType<T>>(dy[index]) -
dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i) *
dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) *
scale[i] * inv_var_i;
}
}
}
template <typename T, typename Context>
void BatchNormGradRawKernel(const Context &ctx,
const DenseTensor &y_grad,
const DenseTensor &x,
const DenseTensor &scale,
const DenseTensor &bias,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
paddle::optional<const DenseTensor &> reserve_space,
paddle::optional<const DenseTensor &> mean,
paddle::optional<const DenseTensor &> variance,
float momentum,
float epsilon_f,
const std::string &data_layout_str,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
bool fuse_with_relu,
bool is_inplace,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *bias_grad) {
double epsilon = static_cast<double>(epsilon_f);
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
const auto *d_y = &y_grad;
auto *d_x = x_grad;
auto *d_scale = scale_grad;
auto *d_bias = bias_grad;
use_global_stats = is_test || use_global_stats;
const auto &x_dims = x.dims();
PADDLE_ENFORCE_EQ(
x_dims.size() >= 2 && x_dims.size() <= 5,
true,
phi::errors::InvalidArgument(
"The size of input's dimensions should be between 2 and 5."
"But received: the size of input's dimensions is [%d],"
"the dimensions of input is [%s]",
x_dims.size(),
x_dims));
int N, C, H, W, D;
paddle::operators::ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D);
// init output
if (d_x) {
ctx.template Alloc<T>(d_x);
}
if (d_scale && d_bias) {
d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
}
PADDLE_ENFORCE_EQ(
scale.dims().size(),
1UL,
phi::errors::InvalidArgument(
"The size of scale's dimensions must equal to 1. But received: "
"the size of scale's dimensions is [%d], the dimensions of scale "
"is [%s].",
scale.dims().size(),
scale.dims()));
PADDLE_ENFORCE_EQ(
scale.dims()[0],
C,
phi::errors::InvalidArgument(
"The first dimension of scale must equal to Channels[%d]. But "
"received: the first dimension of scale is [%d]",
C,
scale.dims()[0]));
auto dtype = paddle::platform::CudnnDataType<T>::type;
#ifdef PADDLE_WITH_HIP
auto compute_format =
data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW;
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// HIP do not support compute format of NHWC
// auto compute_format = DataLayout::kNCHW;
#else
const bool fast_nhwc_batch_norm = dtype == CUDNN_DATA_HALF &&
FLAGS_cudnn_batchnorm_spatial_persistent &&
(reserve_space.get_ptr() != nullptr);
auto compute_format = fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC
? DataLayout::kNHWC
: DataLayout::kNCHW;
#endif
DenseTensor transformed_x(x.type());
DenseTensor transformed_d_y(d_y->type());
DenseTensor transformed_d_x;
if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW &&
x_dims.size() > 2) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<Context, T>(ctx, &x, &transformed_x);
TransToChannelFirst<Context, T>(ctx, &x, &transformed_x);
ResizeToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y);
TransToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y);
if (d_x) {
ResizeToChannelFirst<Context, T>(ctx, d_x, &transformed_d_x);
}
} else {
transformed_x.ShareDataWith(x);
transformed_d_y.ShareDataWith(*d_y);
if (d_x) {
transformed_d_x.ShareDataWith(*d_x);
}
}
std::vector<int> dims;
std::vector<int> strides;
if (compute_format == DataLayout::kNCHW) {
dims = {N, C, H, W, D};
strides = {C * H * W * D, H * W * D, W * D, D, 1};
} else {
dims = {N, C, H, W, D};
strides = {H * W * C * D, 1, W * D * C, D * C, C};
}
const int num = transformed_x.numel();
#ifdef HIPCC
const int block = 256;
#else
const int block = 512;
#endif
int max_threads = ctx.GetMaxPhysicalThreadCount();
const int max_blocks = std::max(max_threads / block, 1);
int grid1 = (num + block - 1) / block;
int grid2 = std::min(C, max_blocks);
auto stream = ctx.stream();
InplaceHelper<T> inplace_functor;
if (!use_global_stats) {
if ((N * H * W * D) == 1) {
if (d_x) {
paddle::framework::TensorCopy(*d_y, ctx.GetPlace(), d_x);
}
phi::funcs::SetConstant<Context, BatchNormParamType<T>> functor;
functor(ctx, d_scale, static_cast<BatchNormParamType<T>>(0));
functor(ctx, d_bias, static_cast<BatchNormParamType<T>>(0));
return;
}
// ------------------- cudnn descriptors ---------------------
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// miopenTensorDescriptor_t data_desc_;
// miopenTensorDescriptor_t bn_param_desc_;
// miopenBatchNormMode_t mode_;
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&data_desc_));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_));
#else
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_;
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnCreateTensorDescriptor(
&bn_param_desc_));
#endif
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON);
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// mode_ = miopenBNSpatial;
#elif CUDNN_VERSION_MIN(7, 0, 1)
if (FLAGS_cudnn_batchnorm_spatial_persistent) {
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
} else if (H == 1 && W == 1) {
mode_ = CUDNN_BATCHNORM_PER_ACTIVATION;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#else
if (H == 1 && W == 1) {
mode_ = CUDNN_BATCHNORM_PER_ACTIVATION;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#endif // CUDNN_VERSION_MIN(7, 0, 1)
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor(
// data_desc_, CudnnDataType<T>::type,
// x_dims.size() > 3 ? x_dims.size() : 4, const_cast<int *>(dims.data()),
// const_cast<int *>(strides.data())));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDeriveBNTensorDescriptor(bn_param_desc_,
// data_desc_, mode_));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnSetTensorNdDescriptor(
data_desc_,
CudnnDataType<T>::type,
x_dims.size() > 3 ? x_dims.size() : 4,
dims.data(),
strides.data()));
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnDeriveBNTensorDescriptor(
bn_param_desc_, data_desc_, mode_));
#endif
const auto *saved_mean_data =
saved_mean.template data<BatchNormParamType<T>>();
const auto *saved_var_data =
saved_variance.template data<BatchNormParamType<T>>();
if (is_inplace) {
inplace_functor(compute_format,
transformed_x.data<T>(),
scale.template data<BatchNormParamType<T>>(),
bias.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
epsilon,
C,
H * W * D,
num,
transformed_x.data<T>(),
grid2,
block,
stream);
}
// This branch calls CUDNN APIs
if (d_x && d_scale && d_bias) {
bool called = false;
#if CUDNN_VERSION_MIN(7, 4, 1)
called = true;
size_t workspace_size = 0;
void *workspace_ptr = nullptr;
DenseTensor workspace_tensor;
auto reserve_space_size = reserve_space->memory_size();
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::
cudnnGetBatchNormalizationBackwardExWorkspaceSize(
/*handle=*/ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnIps=*/CUDNN_BATCHNORM_OPS_BN,
/*xDesc=*/data_desc_,
/*yDesc=*/data_desc_,
/*dyDesc=*/data_desc_,
/*dzDesc=*/nullptr,
/*dxDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/nullptr,
/*sizeInBytes=*/&workspace_size));
workspace_ptr = workspace_tensor.mutable_data(
ctx.GetPlace(), transformed_x.type(), workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnBatchNormalizationBackwardEx(
/*handle=*/ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnOps=*/CUDNN_BATCHNORM_OPS_BN,
/*alphaDataDiff=*/CudnnDataType<T>::kOne(),
/*betaDataDiff=*/CudnnDataType<T>::kZero(),
/*alphaParamDiff=*/CudnnDataType<T>::kOne(),
/*betaParamDiff=*/CudnnDataType<T>::kZero(),
/*xDesc=*/data_desc_,
/*xData=*/transformed_x.template data<T>(),
/*yDesc=*/nullptr,
/*yData=*/nullptr,
/*dyDesc=*/data_desc_,
/*dyData=*/transformed_d_y.template data<T>(),
/*dzDesc=*/nullptr,
/*dzData=*/nullptr,
/*dxDesc=*/data_desc_,
/*dxData=*/ctx.template Alloc<T>(&transformed_d_x),
/*dBnScaleBiasDesc=*/bn_param_desc_,
/*bnScaleData=*/scale.template data<BatchNormParamType<T>>(),
/*bnBiasData=*/nullptr,
/*dBnScaleData=*/d_scale
->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
/*dBnBiasData=*/d_bias
->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
/*epsilon=*/epsilon,
/*savedMean=*/saved_mean_data,
/*savedInvVariance=*/saved_var_data,
/*activationDesc=*/nullptr,
/*workspace=*/workspace_ptr,
/*workSpaceSizeInBytes=*/workspace_size,
/*reserveSpace=*/const_cast<T *>(
reserve_space->template data<T>()),
/*reserveSpaceSizeInBytes=*/reserve_space_size));
#endif // CUDNN_VERSION_MIN(7, 4, 1)
if (!called) {
#ifdef PADDLE_WITH_HIP
if (compute_format == DataLayout::kNCHW) {
BNBackward<T,
block,
DataLayout::kNCHW><<<grid2, block, 0, ctx.stream()>>>(
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>(),
d_scale->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
d_bias->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()));
} else {
BNBackward<T,
block,
DataLayout::kNHWC><<<grid2, block, 0, ctx.stream()>>>(
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>(),
d_scale->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
d_bias->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()));
}
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenBatchNormalizationBackward(
// dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(),
// CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(),
// CudnnDataType<T>::kZero(), data_desc_,
// transformed_x.template data<T>(), data_desc_,
// transformed_d_y.template data<T>(), data_desc_,
// transformed_d_x.template mutable_data<T>(ctx.GetPlace()),
// bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
// d_scale->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace()),
// d_bias->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace()),
// epsilon, saved_mean_data, saved_var_data));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnBatchNormalizationBackward(
ctx.cudnn_handle(),
mode_,
CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(),
CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(),
data_desc_,
transformed_x.template data<T>(),
data_desc_,
transformed_d_y.template data<T>(),
data_desc_,
ctx.template Alloc<T>(&transformed_d_x),
bn_param_desc_,
scale.template data<BatchNormParamType<T>>(),
d_scale->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
d_bias->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
epsilon,
saved_mean_data,
saved_var_data));
#endif
}
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform batchnorm output from NCHW to NHWC";
TransToChannelLast<Context, T>(ctx, &transformed_d_x, d_x);
}
} else {
// This branch call CUDA kernels
if (compute_format == DataLayout::kNCHW) {
if (d_x) {
BNBackwardData<
T,
block,
phi::DataLayout::kNCHW><<<grid2, block, 0, ctx.stream()>>>(
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
saved_mean_data,
x.data<T>(),
saved_var_data,
C,
N,
H * W * D,
d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<
T,
block,
phi::DataLayout::kNCHW><<<grid2, block, 0, stream>>>(
d_y->data<T>(),
x.data<T>(),
saved_mean_data,
saved_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
} else {
if (d_x) {
BNBackwardData<
T,
block,
phi::DataLayout::kNHWC><<<grid2, block, 0, ctx.stream()>>>(
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
saved_mean_data,
x.data<T>(),
saved_var_data,
C,
N,
H * W * D,
d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<
T,
block,
phi::DataLayout::kNHWC><<<grid2, block, 0, stream>>>(
d_y->data<T>(),
x.data<T>(),
saved_mean_data,
saved_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
}
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// clean when exit.
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(data_desc_));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_));
#else
// clean when exit.
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnDestroyTensorDescriptor(
bn_param_desc_));
#endif
} else {
const auto *running_mean = mean.get_ptr();
const auto *running_var = variance.get_ptr();
const auto *running_mean_data =
running_mean->template data<BatchNormParamType<T>>();
const auto *running_var_data =
running_var->template data<BatchNormParamType<T>>();
if (is_inplace) {
auto px = x;
inplace_functor(data_layout,
ctx.template Alloc<T>(&px),
scale.template data<BatchNormParamType<T>>(),
bias.template data<BatchNormParamType<T>>(),
running_mean_data,
running_var_data,
epsilon,
C,
H * W * D,
num,
x.data<T>(),
grid2,
block,
stream);
}
if (compute_format == DataLayout::kNCHW) {
if (d_x) {
KeBNBackwardData<T,
phi::DataLayout::kNCHW><<<grid1, block, 0, stream>>>(
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
running_var_data,
epsilon,
C,
H * W,
num,
d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<
T,
block,
phi::DataLayout::kNCHW><<<grid2, block, 0, stream>>>(
d_y->data<T>(),
x.data<T>(),
running_mean_data,
running_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
} else {
if (d_x) {
KeBNBackwardData<T,
phi::DataLayout::kNHWC><<<grid1, block, 0, stream>>>(
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
running_var_data,
epsilon,
C,
H * W,
num,
d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<
T,
block,
phi::DataLayout::kNHWC><<<grid2, block, 0, stream>>>(
d_y->data<T>(),
x.data<T>(),
running_mean_data,
running_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
}
}
template <typename T, typename Context>
void BatchNormGradKernel(const Context &dev_ctx,
const DenseTensor &y_grad,
const DenseTensor &x,
const DenseTensor &scale,
const DenseTensor &bias,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
paddle::optional<const DenseTensor &> reserve_space,
paddle::optional<const DenseTensor &> mean,
paddle::optional<const DenseTensor &> variance,
float momentum,
float epsilon,
const std::string &data_layout,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
bool fuse_with_relu,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *bias_grad) {
BatchNormGradRawKernel<T, Context>(dev_ctx,
y_grad,
x,
scale,
bias,
saved_mean,
saved_variance,
reserve_space,
mean,
variance,
momentum,
epsilon,
data_layout,
is_test,
use_global_stats,
trainable_statistics,
fuse_with_relu,
false,
x_grad,
scale_grad,
bias_grad);
}
template <typename T, typename Context>
void BatchNormDoubleGradKernel(const Context &ctx,
const DenseTensor &x_grad_grad,
const DenseTensor &scale_grad_grad,
const DenseTensor &bias_grad_grad,
const DenseTensor &y_grad,
const DenseTensor &x,
const DenseTensor &scale,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
paddle::optional<const DenseTensor &> mean,
paddle::optional<const DenseTensor &> variance,
float momentum,
float epsilon,
const std::string &data_layout_str,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
bool fuse_with_relu,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *y_grad_grad) {
PADDLE_ENFORCE_EQ(is_test,
false,
phi::errors::InvalidArgument(
"`is_test = True` CANNOT be used in train program. If "
"you want to use global status in pre_train model, "
"please set `use_global_stats = True`"));
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
const DenseTensor *running_mean = nullptr;
const DenseTensor *running_variance = nullptr;
if (use_global_stats) {
running_mean = mean.get_ptr();
running_variance = variance.get_ptr();
}
paddle::operators::NormDoubleGradFunctor<Context, T>(ctx,
data_layout,
&x,
&scale,
&y_grad,
&saved_mean,
&saved_variance,
running_mean,
running_variance,
epsilon,
use_global_stats,
&x_grad_grad,
&scale_grad_grad,
&bias_grad_grad,
x_grad,
scale_grad,
y_grad_grad);
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(batch_norm_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormGradKernel,
float,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(batch_norm_grad_raw,
GPU,
ALL_LAYOUT,
phi::BatchNormGradRawKernel,
float,
phi::dtype::float16) {}
#else
PD_REGISTER_KERNEL(batch_norm_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormGradKernel,
float,
double,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32);
}
}
PD_REGISTER_KERNEL(batch_norm_grad_raw,
GPU,
ALL_LAYOUT,
phi::BatchNormGradRawKernel,
float,
double,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32);
}
}
#endif
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(batch_norm_grad_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormDoubleGradKernel,
float,
double) {}
#else
PD_REGISTER_KERNEL(batch_norm_grad_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormDoubleGradKernel,
float,
double) {}
#endif
|
91f6c8255db96d26d3f9cd11981520a68352a9f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void doGPUWork(int numData, int *data) {
if (threadIdx.x < numData) {
data[threadIdx.x] = threadIdx.x;
}
}
void sayHello(int *numDevices) {
int numData = 2;
int data[numData];
int dev_data[numData];
int i;
hipGetDeviceCount(numDevices);
hipMalloc((void**)&dev_data, numData);
hipLaunchKernelGGL(( doGPUWork), dim3(1), dim3(numData), 0, 0, numData, dev_data);
hipMemcpy(data, dev_data, numData, hipMemcpyDeviceToHost);
// BUGFIX: This should print 0, 1, etc., but does not yet
for (i = 0; i < numData; i++) {
printf("%d\n", data[i]);
}
}
| 91f6c8255db96d26d3f9cd11981520a68352a9f1.cu | #include <stdio.h>
__global__ void doGPUWork(int numData, int *data) {
if (threadIdx.x < numData) {
data[threadIdx.x] = threadIdx.x;
}
}
void sayHello(int *numDevices) {
int numData = 2;
int data[numData];
int dev_data[numData];
int i;
cudaGetDeviceCount(numDevices);
cudaMalloc((void**)&dev_data, numData);
doGPUWork<<<1, numData>>>(numData, dev_data);
cudaMemcpy(data, dev_data, numData, cudaMemcpyDeviceToHost);
// BUGFIX: This should print 0, 1, etc., but does not yet
for (i = 0; i < numData; i++) {
printf("%d\n", data[i]);
}
}
|
bdde829ea5bc517baaabd6e2a9ddb91916b107e2.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2018 Ole-Christoffer Granmo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This code implements the Tsetlin Machine from paper arXiv:1804.01508
https://arxiv.org/abs/1804.01508
*/
#include "TsetlinMachineConfig.cuh"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
__device__ int inline action(int state)
{
if (state <= NUMBER_OF_STATES)
return 0;
else
return 1;
}
__global__ void type_i_feedback(hiprandState_t *state, int *ta_state, int *clause_feedback, int *clause_output, int *Xi, float s)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
/* Copy state to local memory for efficiency */
hiprandState_t localState = state[index];
for (int i = index; i < CLAUSES*FEATURES; i += stride) {
int clause = i / FEATURES;
int feature = i % FEATURES;
int output = clause_output[clause];
if (clause_feedback[clause] != 1) {
continue;
}
if (output == 0) {
if (hiprand_uniform(&localState) <= 1.0/s) {
if (ta_state[i*2] > 1) {
ta_state[i*2] -= 1;
}
}
if (hiprand_uniform(&localState) <= 1.0/s) {
if (ta_state[i*2+1] > 1) {
ta_state[i*2+1] -= 1;
}
}
} else if (output == 1) {
if (Xi[feature] == 1) {
if (BOOST_TRUE_POSITIVE_FEEDBACK == 1 || hiprand_uniform(&localState) <= (s-1)/s) {
if (ta_state[i*2] < NUMBER_OF_STATES*2) {
ta_state[i*2] += 1;
}
}
if (hiprand_uniform(&localState) <= 1.0/s) {
if (ta_state[i*2+1] > 1) {
ta_state[i*2+1] -= 1;
}
}
} else if (Xi[feature] == 0) {
if (BOOST_TRUE_POSITIVE_FEEDBACK == 1 || hiprand_uniform(&localState) <= (s-1)/s){
if (ta_state[i*2+1] < NUMBER_OF_STATES*2) {
ta_state[i*2+1] += 1;
}
}
if (hiprand_uniform(&localState) <= 1.0/s) {
if (ta_state[i*2] > 1) {
ta_state[i*2] -= 1;
}
}
}
}
}
state[index] = localState;
}
__global__ void type_ii_feedback(int *ta_state, int *clause_feedback, int *clause_output, int *Xi)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int action_include;
int action_include_negated;
for (int i = index; i < CLAUSES*FEATURES; i += stride) {
int clause = i / FEATURES;
int feature = i % FEATURES;
if (clause_feedback[clause] != -1 || clause_output[clause] == 0) {
continue;
}
action_include = action(ta_state[i*2]);
action_include_negated = action(ta_state[i*2+1]);
if (Xi[feature] == 0) {
if (action_include == 0 && ta_state[i*2] < NUMBER_OF_STATES*2) {
ta_state[i*2] += 1;
}
} else if (Xi[feature] == 1) {
if (action_include_negated == 0 && ta_state[i*2+1] < NUMBER_OF_STATES*2) {
ta_state[i*2+1] += 1;
}
}
}
}
/* Sum up the votes for each class (this is the multiclass version of the Tsetlin Machine) */
__global__ void sum_up_class_votes(int *clause_output, int *sum)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int local_sum = 0;
for (int j = index; j < CLAUSES; j += stride) {
int sign = 1 - 2 * (j & 1);
local_sum += sign * clause_output[j];
}
atomicAdd(sum, local_sum);
}
/* Sum up the votes for each class (this is the multiclass version of the Tsetlin Machine) */
__global__ void generate_clause_feedback(hiprandState_t *state, int *clause_feedback, int *class_sum, int target)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
/* Copy state to local memory for efficiency */
hiprandState_t localState = state[index];
for (int j = index; j < CLAUSES; j += stride) {
int sign = 1 - 2 * (j & 1);
if (target) {
if (hiprand_uniform(&localState) > (1.0/(THRESHOLD*2))*(THRESHOLD - *class_sum)) {
clause_feedback[j] = 0;
} else {
clause_feedback[j] = sign;
}
} else {
if (hiprand_uniform(&localState) > (1.0/(THRESHOLD*2))*(THRESHOLD + *class_sum)) {
clause_feedback[j] = 0;
} else {
clause_feedback[j] = -1*sign;
}
}
}
state[index] = localState;
}
__global__ void initialize_clause_output(int *clause_output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Initialize clause output
for (int j = index; j < CLAUSES; j += stride) {
clause_output[j] = 1;
}
}
__global__ void calculate_clause_output(int *ta_state, int *clause_output, int *Xi)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int action_include, action_include_negated;
for (int i = index; i < CLAUSES*FEATURES; i += stride) {
int clause = i / FEATURES;
int feature = i % FEATURES;
action_include = action(ta_state[i*2]);
action_include_negated = action(ta_state[i*2+1]);
if ((action_include == 1 && Xi[feature] == 0) || (action_include_negated == 1 && Xi[feature] == 1)) {
clause_output[clause] = 0;
}
}
}
__global__ void initialize_clause_output_predict(int *clause_output, int *all_exclude)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Initialize clause output
for (int j = index; j < CLAUSES; j += stride) {
clause_output[j] = 1;
all_exclude[j] = 1;
}
}
__global__ void calculate_clause_output_predict(int *ta_state, int *clause_output, int *all_exclude, int *Xi)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int action_include, action_include_negated;
for (int i = index; i < CLAUSES*FEATURES; i += stride) {
int clause = i / FEATURES;
int feature = i % FEATURES;
action_include = action(ta_state[i*2]);
action_include_negated = action(ta_state[i*2+1]);
if (action_include == 1 || action_include_negated == 1) {
all_exclude[clause] = 0;
}
if ((action_include == 1 && Xi[feature] == 0) || (action_include_negated == 1 && Xi[feature] == 1)) {
clause_output[clause] = 0;
}
}
}
__global__ void update_with_all_exclude(int *clause_output, int *all_exclude)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Initialize clause output
for (int j = index; j < CLAUSES; j += stride) {
if (all_exclude[j] == 1) {
clause_output[j] = 0;
}
}
}
| bdde829ea5bc517baaabd6e2a9ddb91916b107e2.cu | /*
Copyright (c) 2018 Ole-Christoffer Granmo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This code implements the Tsetlin Machine from paper arXiv:1804.01508
https://arxiv.org/abs/1804.01508
*/
#include "TsetlinMachineConfig.cuh"
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
__device__ int inline action(int state)
{
if (state <= NUMBER_OF_STATES)
return 0;
else
return 1;
}
__global__ void type_i_feedback(curandState *state, int *ta_state, int *clause_feedback, int *clause_output, int *Xi, float s)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
/* Copy state to local memory for efficiency */
curandState localState = state[index];
for (int i = index; i < CLAUSES*FEATURES; i += stride) {
int clause = i / FEATURES;
int feature = i % FEATURES;
int output = clause_output[clause];
if (clause_feedback[clause] != 1) {
continue;
}
if (output == 0) {
if (curand_uniform(&localState) <= 1.0/s) {
if (ta_state[i*2] > 1) {
ta_state[i*2] -= 1;
}
}
if (curand_uniform(&localState) <= 1.0/s) {
if (ta_state[i*2+1] > 1) {
ta_state[i*2+1] -= 1;
}
}
} else if (output == 1) {
if (Xi[feature] == 1) {
if (BOOST_TRUE_POSITIVE_FEEDBACK == 1 || curand_uniform(&localState) <= (s-1)/s) {
if (ta_state[i*2] < NUMBER_OF_STATES*2) {
ta_state[i*2] += 1;
}
}
if (curand_uniform(&localState) <= 1.0/s) {
if (ta_state[i*2+1] > 1) {
ta_state[i*2+1] -= 1;
}
}
} else if (Xi[feature] == 0) {
if (BOOST_TRUE_POSITIVE_FEEDBACK == 1 || curand_uniform(&localState) <= (s-1)/s){
if (ta_state[i*2+1] < NUMBER_OF_STATES*2) {
ta_state[i*2+1] += 1;
}
}
if (curand_uniform(&localState) <= 1.0/s) {
if (ta_state[i*2] > 1) {
ta_state[i*2] -= 1;
}
}
}
}
}
state[index] = localState;
}
__global__ void type_ii_feedback(int *ta_state, int *clause_feedback, int *clause_output, int *Xi)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int action_include;
int action_include_negated;
for (int i = index; i < CLAUSES*FEATURES; i += stride) {
int clause = i / FEATURES;
int feature = i % FEATURES;
if (clause_feedback[clause] != -1 || clause_output[clause] == 0) {
continue;
}
action_include = action(ta_state[i*2]);
action_include_negated = action(ta_state[i*2+1]);
if (Xi[feature] == 0) {
if (action_include == 0 && ta_state[i*2] < NUMBER_OF_STATES*2) {
ta_state[i*2] += 1;
}
} else if (Xi[feature] == 1) {
if (action_include_negated == 0 && ta_state[i*2+1] < NUMBER_OF_STATES*2) {
ta_state[i*2+1] += 1;
}
}
}
}
/* Sum up the votes for each class (this is the multiclass version of the Tsetlin Machine) */
__global__ void sum_up_class_votes(int *clause_output, int *sum)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int local_sum = 0;
for (int j = index; j < CLAUSES; j += stride) {
int sign = 1 - 2 * (j & 1);
local_sum += sign * clause_output[j];
}
atomicAdd(sum, local_sum);
}
/* Sum up the votes for each class (this is the multiclass version of the Tsetlin Machine) */
__global__ void generate_clause_feedback(curandState *state, int *clause_feedback, int *class_sum, int target)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
/* Copy state to local memory for efficiency */
curandState localState = state[index];
for (int j = index; j < CLAUSES; j += stride) {
int sign = 1 - 2 * (j & 1);
if (target) {
if (curand_uniform(&localState) > (1.0/(THRESHOLD*2))*(THRESHOLD - *class_sum)) {
clause_feedback[j] = 0;
} else {
clause_feedback[j] = sign;
}
} else {
if (curand_uniform(&localState) > (1.0/(THRESHOLD*2))*(THRESHOLD + *class_sum)) {
clause_feedback[j] = 0;
} else {
clause_feedback[j] = -1*sign;
}
}
}
state[index] = localState;
}
__global__ void initialize_clause_output(int *clause_output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Initialize clause output
for (int j = index; j < CLAUSES; j += stride) {
clause_output[j] = 1;
}
}
__global__ void calculate_clause_output(int *ta_state, int *clause_output, int *Xi)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int action_include, action_include_negated;
for (int i = index; i < CLAUSES*FEATURES; i += stride) {
int clause = i / FEATURES;
int feature = i % FEATURES;
action_include = action(ta_state[i*2]);
action_include_negated = action(ta_state[i*2+1]);
if ((action_include == 1 && Xi[feature] == 0) || (action_include_negated == 1 && Xi[feature] == 1)) {
clause_output[clause] = 0;
}
}
}
__global__ void initialize_clause_output_predict(int *clause_output, int *all_exclude)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Initialize clause output
for (int j = index; j < CLAUSES; j += stride) {
clause_output[j] = 1;
all_exclude[j] = 1;
}
}
__global__ void calculate_clause_output_predict(int *ta_state, int *clause_output, int *all_exclude, int *Xi)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int action_include, action_include_negated;
for (int i = index; i < CLAUSES*FEATURES; i += stride) {
int clause = i / FEATURES;
int feature = i % FEATURES;
action_include = action(ta_state[i*2]);
action_include_negated = action(ta_state[i*2+1]);
if (action_include == 1 || action_include_negated == 1) {
all_exclude[clause] = 0;
}
if ((action_include == 1 && Xi[feature] == 0) || (action_include_negated == 1 && Xi[feature] == 1)) {
clause_output[clause] = 0;
}
}
}
__global__ void update_with_all_exclude(int *clause_output, int *all_exclude)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Initialize clause output
for (int j = index; j < CLAUSES; j += stride) {
if (all_exclude[j] == 1) {
clause_output[j] = 0;
}
}
}
|
be1233e82f15235038e64879d74ce56c4aff672e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Author's note:
// This file was distributed as part of the Nature Biotechnology
// supplementary software release for DeepBind. Users of DeepBind
// are encouraged to instead use the latest source code and binaries
// for scoring sequences at
// http://tools.genes.toronto.edu/deepbind/
//
#include <smat_cuda/cuda_errors.h>
#include <smat_cuda/cuda_context.h>
#include <smat_cuda/launch_util.h>
#include <smat/vm/instruction_db.h>
SM_NAMESPACE_BEGIN
template <typename T>
__global__ void kernel_dropout_fp_tr(hiprandState_t* state, const T* X, T* Z, bool* M, T rate, usize_t n)
{
DECL_KERNEL_VARS
unsigned tid = bdx*bx + tx;
hiprandState_t local_state = state[tid];
for (usize_t i = (usize_t)tid; i < n; i += bdx*gdx) {
bool mask = (hiprand_uniform(&local_state) >= rate);
M[i] = mask;
Z[i] = X[i]*(T)mask;
}
state[tid] = local_state;
}
template <typename T>
__global__ void kernel_dropout_bp_tr(const T* dZ, const bool* M, T* dX, usize_t n)
{
DECL_KERNEL_VARS
unsigned tid = bdx*bx + tx;
for (usize_t i = (usize_t)tid; i < n; i += bdx*gdx) {
dX[i] = dZ[i]*(T)M[i];
}
}
void launch_dropout_fp_tr(hipStream_t stream, dtype_t dtype,
const void* X, double rate, void* Z, bool* M,
usize_t n)
{
launchcfg cfg = make_elemwise_launchcfg(n);
if (dtype == f32)
hipLaunchKernelGGL(( kernel_dropout_fp_tr), dim3(cfg.gdim),dim3(cfg.bdim),cfg.smem,cfg.stream, thread_cudactx().curand_state(),(const float*)X,(float*)Z,M,(float)rate,n);
else
hipLaunchKernelGGL(( kernel_dropout_fp_tr), dim3(cfg.gdim),dim3(cfg.bdim),cfg.smem,cfg.stream, thread_cudactx().curand_state(),(const double*)X,(double*)Z,M,(double)rate,n);
}
void launch_dropout_bp_tr(hipStream_t stream, dtype_t dtype,
const void* dZ, const bool* M, void* dX, usize_t n)
{
launchcfg cfg = make_elemwise_launchcfg(n);
if (dtype == f32)
hipLaunchKernelGGL(( kernel_dropout_bp_tr), dim3(cfg.gdim),dim3(cfg.bdim),cfg.smem,cfg.stream, (const float*)dZ,M,(float*)dX,n);
else
hipLaunchKernelGGL(( kernel_dropout_bp_tr), dim3(cfg.gdim),dim3(cfg.bdim),cfg.smem,cfg.stream, (const double*)dZ,M,(double*)dX,n);
}
void validate_dropout_fp_tr(opcode_t opcode, const argument& X, const argument& rate,
const argument& Z, const argument& M)
{
double _rate = rate.get<double>();
SM_ASSERT(_rate >= 0.0 && _rate <= 1.0);
SM_ASSERT(X.dtype == Z.dtype);
SM_ASSERT(X.shape == Z.shape);
SM_ASSERT(X.shape == M.shape);
}
void execute_dropout_fp_tr(opcode_t opcode, const argument& X, const argument& rate,
const argument& Z, const argument& M)
{
launch_dropout_fp_tr(thread_cudactx().stream(), X.dtype,
X.get<const void*>(), rate.get<double>(), Z.get<void*>(), M.get<bool*>(),
X.size());
}
void validate_dropout_bp_tr(opcode_t opcode, const argument& dZ, const argument& M, const argument& dX)
{
SM_ASSERT(dX.dtype == dZ.dtype);
SM_ASSERT(dX.size() == M.size());
SM_ASSERT(dX.size() == dZ.size());
}
void execute_dropout_bp_tr(opcode_t opcode, const argument& dZ, const argument& M, const argument& dX)
{
launch_dropout_bp_tr(thread_cudactx().stream(),dZ.dtype, dZ.get<const void*>(), M.get<const bool*>(), dX.get<void*>(), dX.size());
}
SM_NAMESPACE_END
| be1233e82f15235038e64879d74ce56c4aff672e.cu | // Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Author's note:
// This file was distributed as part of the Nature Biotechnology
// supplementary software release for DeepBind. Users of DeepBind
// are encouraged to instead use the latest source code and binaries
// for scoring sequences at
// http://tools.genes.toronto.edu/deepbind/
//
#include <smat_cuda/cuda_errors.h>
#include <smat_cuda/cuda_context.h>
#include <smat_cuda/launch_util.h>
#include <smat/vm/instruction_db.h>
SM_NAMESPACE_BEGIN
template <typename T>
__global__ void kernel_dropout_fp_tr(curandState_t* state, const T* X, T* Z, bool* M, T rate, usize_t n)
{
DECL_KERNEL_VARS
unsigned tid = bdx*bx + tx;
curandState local_state = state[tid];
for (usize_t i = (usize_t)tid; i < n; i += bdx*gdx) {
bool mask = (curand_uniform(&local_state) >= rate);
M[i] = mask;
Z[i] = X[i]*(T)mask;
}
state[tid] = local_state;
}
template <typename T>
__global__ void kernel_dropout_bp_tr(const T* dZ, const bool* M, T* dX, usize_t n)
{
DECL_KERNEL_VARS
unsigned tid = bdx*bx + tx;
for (usize_t i = (usize_t)tid; i < n; i += bdx*gdx) {
dX[i] = dZ[i]*(T)M[i];
}
}
void launch_dropout_fp_tr(cudaStream_t stream, dtype_t dtype,
const void* X, double rate, void* Z, bool* M,
usize_t n)
{
launchcfg cfg = make_elemwise_launchcfg(n);
if (dtype == f32)
kernel_dropout_fp_tr<<<cfg.gdim,cfg.bdim,cfg.smem,cfg.stream>>>(thread_cudactx().curand_state(),(const float*)X,(float*)Z,M,(float)rate,n);
else
kernel_dropout_fp_tr<<<cfg.gdim,cfg.bdim,cfg.smem,cfg.stream>>>(thread_cudactx().curand_state(),(const double*)X,(double*)Z,M,(double)rate,n);
}
void launch_dropout_bp_tr(cudaStream_t stream, dtype_t dtype,
const void* dZ, const bool* M, void* dX, usize_t n)
{
launchcfg cfg = make_elemwise_launchcfg(n);
if (dtype == f32)
kernel_dropout_bp_tr<<<cfg.gdim,cfg.bdim,cfg.smem,cfg.stream>>>((const float*)dZ,M,(float*)dX,n);
else
kernel_dropout_bp_tr<<<cfg.gdim,cfg.bdim,cfg.smem,cfg.stream>>>((const double*)dZ,M,(double*)dX,n);
}
void validate_dropout_fp_tr(opcode_t opcode, const argument& X, const argument& rate,
const argument& Z, const argument& M)
{
double _rate = rate.get<double>();
SM_ASSERT(_rate >= 0.0 && _rate <= 1.0);
SM_ASSERT(X.dtype == Z.dtype);
SM_ASSERT(X.shape == Z.shape);
SM_ASSERT(X.shape == M.shape);
}
void execute_dropout_fp_tr(opcode_t opcode, const argument& X, const argument& rate,
const argument& Z, const argument& M)
{
launch_dropout_fp_tr(thread_cudactx().stream(), X.dtype,
X.get<const void*>(), rate.get<double>(), Z.get<void*>(), M.get<bool*>(),
X.size());
}
void validate_dropout_bp_tr(opcode_t opcode, const argument& dZ, const argument& M, const argument& dX)
{
SM_ASSERT(dX.dtype == dZ.dtype);
SM_ASSERT(dX.size() == M.size());
SM_ASSERT(dX.size() == dZ.size());
}
void execute_dropout_bp_tr(opcode_t opcode, const argument& dZ, const argument& M, const argument& dX)
{
launch_dropout_bp_tr(thread_cudactx().stream(),dZ.dtype, dZ.get<const void*>(), M.get<const bool*>(), dX.get<void*>(), dX.size());
}
SM_NAMESPACE_END
|
fd88ed64783b03363581c7b886946fa155c916c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define M 32
#define KNRM "\x1B[0m"
#define KRED "\x1B[31m"
#define KGRN "\x1B[32m"
#define KYEL "\x1B[33m"
#define KBLU "\x1B[34m"
#define KMAG "\x1B[35m"
#define KCYN "\x1B[36m"
#define KWHT "\x1B[37m"
__global__ void uni_func(int *A,int width,int *OUT)
{
__shared__ int ns[32*32];//neighboors state
int col = blockIdx.x*blockDim.x + threadIdx.x;
bool first_row,last_row,first_col,last_col;
first_row = col>0 && col < width-1;//prwti grammi = 0
last_row = (col>(width*width)-width) && (col<(width*width)-1) ;//teleutaia grammi = 0
first_col = col%width == 0;//prwti stili = 0
last_col = col%width == width -1 ;
if (!(first_row || last_row)){// den einai "orizontio" kadro
//kai ta stoixeia sto last_row - last_col prepei na feroun ton panw kai katw geitona tous
ns[col] = A[col];
ns[col-width] = A[col-width];
ns[col+width] = A[col+width];
__syncthreads();//kathe stoixeio fernei 3 anthrwpous(2 neigh and me) kai arazei....
if (!(last_col || first_col)){//den einai "katheto" kadro
//mono ta stoixeia pou den einai se kadro prepei na elenksoun geitones...
int n[8];
n[0] = ns[(col-1-width)] ;
n[1] = ns[(col-width)] ;
n[2] = ns[(col+1-width)] ;
// row=2;
//kentriki grammi tou 3x3
n[3] = ns[(col-1)] ;//ditikos geitonas
// = ns[row*width + col] ; //
int iam = ns[col] ; //
n[4] = ns[(col+1)] ;//anatolikos geitonas
// row =3 ;
n[5] = ns[(col-1+width)] ;
n[6] = ns[(col+width)] ;
n[7] = ns[(col+1+width)] ;
//on || off || dying
// :
int counter_alive=0;
int counter_dead=0; // 3
int counter_DYING=0; // CPU
// rules: -1: dying && 0:off && 1:on
// alive ,dead ,DYING
// tsekaroume ean edw einai to lathos ston kwdika
for (int i = 0; i <= 7; i++)
{
if (n[i] != -1)//for sure is not dying - actually is not -1(negative number)
{
counter_alive += n[i];//counter_alive = counter_alive + 0/1
}
else//
{
counter_DYING -= n[i] ;//-0 || -(-1)=+1
}
}
counter_dead = 8 - ( counter_alive + counter_DYING);//all neighboors - not_dying
if(iam == -1)//i am dying
{
iam = 0;//i am off
}
else if(iam == 1)//i am on
{
iam = -1; //i am dying
}
else if(iam == 0 && counter_alive == 2 )//i am off and 2 neighboors on
{
iam = 1; //i will be on
}
OUT[col] = iam;
}
else{//einai to katheto kadro
OUT[ col] = 0;
}
}
else{//einai to orizontio kadro
OUT[ col] = 0;
}
}
int main() {
//initialize A
int i,j;
int on=0;
int off=0;
int dying=0;
//int M = 32;
int N=M*M;//all elements of A
int A[M][M] ;
int OUT[M][M] ;
srand (time(NULL));
printf("\n....IN MAIN...\n");
for(i=0;i< M;i++)
{
for(j=0;j< M;j++)
{
if (i==0 || i==M-1 || j==M-1 || j==0){
A[i][j] = 0;//to perigramma tou pinaka
OUT[i][j] = 0;
}
else{
A[i][j]= rand()%3 -1;
//if (A[i][j] == -1){printf("%d ", A[i][j]);}
//else{printf(" %d ", A[i][j]);}
OUT[i][j] = -9;
}
}
//printf("\n");
}
for(i=0;i< M;i++)
{
for(j=0;j< M;j++)
{
if (A[i][j] == -1){printf("%d ", A[i][j]);}
else{printf(" %d ", A[i][j]);}
}
printf("\n");
}
//launching kernel
int *A_device;
//int A_size = N*sizeof(int) ;
const size_t A_size = sizeof(int) * size_t(N);
hipMalloc((void **)&A_device, A_size);
int *OUT_device;
//int A_size = N*sizeof(int) ;
const size_t OUT_size = sizeof(int) * size_t(N);
hipMalloc((void **)&OUT_device, OUT_size);
hipMemcpy(A_device, A, A_size, hipMemcpyHostToDevice);
hipMemcpy(OUT_device, OUT, OUT_size, hipMemcpyHostToDevice);
//the game is on Mrs. Hatson :)
int turn = 0;
while (1){
if (turn % 2 == 0){//zigos arithmos seiras: A->in, Out->Out
hipLaunchKernelGGL(( uni_func), dim3(M),dim3(M), 0, 0, A_device,M,OUT_device);
hipMemcpy(OUT, OUT_device, A_size, hipMemcpyDeviceToHost);//thats work
printf("\n\n-------------\n\n%d Time\n\n\n\n",turn);
for(i=0;i< M;i++)
{
for(j=0;j< M;j++)
{
if (OUT[i][j] == -1){printf("%s%d ",KRED, OUT[i][j]);}
else if (OUT[i][j] == 1){printf(" %s%d ",KGRN, OUT[i][j]);}
else{printf(" %s%d ",KNRM, OUT[i][j]);}
//make counter
if (OUT[i][j] == -1){ dying++;}
else if (OUT[i][j] == 1) {on++;}
else {off++;}
}
printf("\n");
}
}
else{
hipLaunchKernelGGL(( uni_func), dim3(M),dim3(M), 0, 0, OUT_device,M,A_device);
hipMemcpy(A, A_device, A_size, hipMemcpyDeviceToHost);
printf("\n\n-------------\n\n%d Time\n\n\n\n",turn);
for(i=0;i< M;i++)
{
for(j=0;j< M;j++)
{
if (A[i][j] == -1){printf("%s%d ",KRED, A[i][j]);}
else if (A[i][j]==1){printf(" %s%d ",KGRN, A[i][j]);}
else {printf(" %s%d ",KNRM, A[i][j]);}
//make counter
if (A[i][j] == -1){ dying++;}
else if (A[i][j] == 1) {on++;}
else {off++;}
}
printf("\n");
}
}
//print counter
printf("\n%s----------------------------------------------------\n",KNRM);
printf("counter_alive: %d, counter_dying: %d, counter_dead: %d\n",on,dying,off);
printf("--------------------------------------------------------\n");
//counters = 0
if (off == N){break;}//all elements are off (N=M*M)
on = 0;
off = 0;
dying = 0;
turn++;//auksanoume seira gia na kalesoume uni_func me allagi eisodwn-eksodwn
}
return 0;
}
| fd88ed64783b03363581c7b886946fa155c916c3.cu | #include <stdio.h>
#include <stdlib.h>
#define M 32
#define KNRM "\x1B[0m"
#define KRED "\x1B[31m"
#define KGRN "\x1B[32m"
#define KYEL "\x1B[33m"
#define KBLU "\x1B[34m"
#define KMAG "\x1B[35m"
#define KCYN "\x1B[36m"
#define KWHT "\x1B[37m"
__global__ void uni_func(int *A,int width,int *OUT)
{
__shared__ int ns[32*32];//neighboors state
int col = blockIdx.x*blockDim.x + threadIdx.x;
bool first_row,last_row,first_col,last_col;
first_row = col>0 && col < width-1;//prwti grammi = 0
last_row = (col>(width*width)-width) && (col<(width*width)-1) ;//teleutaia grammi = 0
first_col = col%width == 0;//prwti stili = 0
last_col = col%width == width -1 ;
if (!(first_row || last_row)){// den einai "orizontio" kadro
//kai ta stoixeia sto last_row - last_col prepei na feroun ton panw kai katw geitona tous
ns[col] = A[col];
ns[col-width] = A[col-width];
ns[col+width] = A[col+width];
__syncthreads();//kathe stoixeio fernei 3 anthrwpous(2 neigh and me) kai arazei....
if (!(last_col || first_col)){//den einai "katheto" kadro
//mono ta stoixeia pou den einai se kadro prepei na elenksoun geitones...
int n[8];
n[0] = ns[(col-1-width)] ;
n[1] = ns[(col-width)] ;
n[2] = ns[(col+1-width)] ;
// row=2;
//kentriki grammi tou 3x3
n[3] = ns[(col-1)] ;//ditikos geitonas
// = ns[row*width + col] ; // κεντρικο κελι
int iam = ns[col] ; // κεντρικο κελι
n[4] = ns[(col+1)] ;//anatolikos geitonas
// row =3 ;
n[5] = ns[(col-1+width)] ;
n[6] = ns[(col+width)] ;
n[7] = ns[(col+1+width)] ;
//on || off || dying
//Ξεκιναμε να οριζουμε τις συνθηκες αλλαγης καταστασεων:
int counter_alive=0;
int counter_dead=0; // οι 3 μετρητες μας που θα πρεπει να γυρισουν
int counter_DYING=0; //στην CPU και θα εκτυπωθουν
// rules: -1: dying && 0:off && 1:on
//Στον παρακατω κωδικα μετραμε του alive ,dead ,DYING
// tsekaroume ean edw einai to lathos ston kwdika
for (int i = 0; i <= 7; i++)
{
if (n[i] != -1)//for sure is not dying - actually is not -1(negative number)
{
counter_alive += n[i];//counter_alive = counter_alive + 0/1
}
else//
{
counter_DYING -= n[i] ;//-0 || -(-1)=+1
}
}
counter_dead = 8 - ( counter_alive + counter_DYING);//all neighboors - not_dying
if(iam == -1)//i am dying
{
iam = 0;//i am off
}
else if(iam == 1)//i am on
{
iam = -1; //i am dying
}
else if(iam == 0 && counter_alive == 2 )//i am off and 2 neighboors on
{
iam = 1; //i will be on
}
OUT[col] = iam;
}
else{//einai to katheto kadro
OUT[ col] = 0;
}
}
else{//einai to orizontio kadro
OUT[ col] = 0;
}
}
int main() {
//initialize A
int i,j;
int on=0;
int off=0;
int dying=0;
//int M = 32;
int N=M*M;//all elements of A
int A[M][M] ;
int OUT[M][M] ;
srand (time(NULL));
printf("\n....IN MAIN...\n");
for(i=0;i< M;i++)
{
for(j=0;j< M;j++)
{
if (i==0 || i==M-1 || j==M-1 || j==0){
A[i][j] = 0;//to perigramma tou pinaka
OUT[i][j] = 0;
}
else{
A[i][j]= rand()%3 -1;
//if (A[i][j] == -1){printf("%d ", A[i][j]);}
//else{printf(" %d ", A[i][j]);}
OUT[i][j] = -9;
}
}
//printf("\n");
}
for(i=0;i< M;i++)
{
for(j=0;j< M;j++)
{
if (A[i][j] == -1){printf("%d ", A[i][j]);}
else{printf(" %d ", A[i][j]);}
}
printf("\n");
}
//launching kernel
int *A_device;
//int A_size = N*sizeof(int) ;
const size_t A_size = sizeof(int) * size_t(N);
cudaMalloc((void **)&A_device, A_size);
int *OUT_device;
//int A_size = N*sizeof(int) ;
const size_t OUT_size = sizeof(int) * size_t(N);
cudaMalloc((void **)&OUT_device, OUT_size);
cudaMemcpy(A_device, A, A_size, cudaMemcpyHostToDevice);
cudaMemcpy(OUT_device, OUT, OUT_size, cudaMemcpyHostToDevice);
//the game is on Mrs. Hatson :)
int turn = 0;
while (1){
if (turn % 2 == 0){//zigos arithmos seiras: A->in, Out->Out
uni_func<<<M,M>>>(A_device,M,OUT_device);
cudaMemcpy(OUT, OUT_device, A_size, cudaMemcpyDeviceToHost);//thats work
printf("\n\n-------------\n\n%d Time\n\n\n\n",turn);
for(i=0;i< M;i++)
{
for(j=0;j< M;j++)
{
if (OUT[i][j] == -1){printf("%s%d ",KRED, OUT[i][j]);}
else if (OUT[i][j] == 1){printf(" %s%d ",KGRN, OUT[i][j]);}
else{printf(" %s%d ",KNRM, OUT[i][j]);}
//make counter
if (OUT[i][j] == -1){ dying++;}
else if (OUT[i][j] == 1) {on++;}
else {off++;}
}
printf("\n");
}
}
else{
uni_func<<<M,M>>>(OUT_device,M,A_device);
cudaMemcpy(A, A_device, A_size, cudaMemcpyDeviceToHost);
printf("\n\n-------------\n\n%d Time\n\n\n\n",turn);
for(i=0;i< M;i++)
{
for(j=0;j< M;j++)
{
if (A[i][j] == -1){printf("%s%d ",KRED, A[i][j]);}
else if (A[i][j]==1){printf(" %s%d ",KGRN, A[i][j]);}
else {printf(" %s%d ",KNRM, A[i][j]);}
//make counter
if (A[i][j] == -1){ dying++;}
else if (A[i][j] == 1) {on++;}
else {off++;}
}
printf("\n");
}
}
//print counter
printf("\n%s----------------------------------------------------\n",KNRM);
printf("counter_alive: %d, counter_dying: %d, counter_dead: %d\n",on,dying,off);
printf("--------------------------------------------------------\n");
//counters = 0
if (off == N){break;}//all elements are off (N=M*M)
on = 0;
off = 0;
dying = 0;
turn++;//auksanoume seira gia na kalesoume uni_func me allagi eisodwn-eksodwn
}
return 0;
}
|
ca613a5673b1dc5a0699c2eb766d19bd805c8021.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
<% unless type_name == 'robject' %>
__global__ void <%="cumo_#{c_iter}_stride_kernel"%>(char *p1, char *p2, BIT_DIGIT *a3, size_t p3, ssize_t s1, ssize_t s2, ssize_t s3, uint64_t n)
{
for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
dtype x = *(dtype*)(p1+(i*s1));
dtype y = *(dtype*)(p2+(i*s2));
BIT_DIGIT b = (m_<%=name%>(x,y)) ? 1:0;
STORE_BIT(a3,p3+(i*s3),b);
}
}
void <%="cumo_#{c_iter}_stride_kernel_launch"%>(char *p1, char *p2, BIT_DIGIT *a3, size_t p3, ssize_t s1, ssize_t s2, ssize_t s3, uint64_t n)
{
size_t gridDim = get_gridDim(n);
size_t blockDim =hipLaunchKernelGGL(( get_blockDim(n);
<%="cumo_#{c_iter}_stride_kernel"%>), dim3(gridDim), dim3(blockDim), 0, 0, p1,p2,a3,p3,s1,s2,s3,n);
}
<% end %>
| ca613a5673b1dc5a0699c2eb766d19bd805c8021.cu | <% unless type_name == 'robject' %>
__global__ void <%="cumo_#{c_iter}_stride_kernel"%>(char *p1, char *p2, BIT_DIGIT *a3, size_t p3, ssize_t s1, ssize_t s2, ssize_t s3, uint64_t n)
{
for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
dtype x = *(dtype*)(p1+(i*s1));
dtype y = *(dtype*)(p2+(i*s2));
BIT_DIGIT b = (m_<%=name%>(x,y)) ? 1:0;
STORE_BIT(a3,p3+(i*s3),b);
}
}
void <%="cumo_#{c_iter}_stride_kernel_launch"%>(char *p1, char *p2, BIT_DIGIT *a3, size_t p3, ssize_t s1, ssize_t s2, ssize_t s3, uint64_t n)
{
size_t gridDim = get_gridDim(n);
size_t blockDim = get_blockDim(n);
<%="cumo_#{c_iter}_stride_kernel"%><<<gridDim, blockDim>>>(p1,p2,a3,p3,s1,s2,s3,n);
}
<% end %>
|
0834a2bdf9c21ec2da0a64d84d02e7b195281b94.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cu_dsigmoid(const float* src, float* dst, int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
float tmp = __expf(src[tid]);
float tmp2 = __fadd_rd(tmp, 1.0);
tmp2 = __fmul_rd(tmp2, tmp2);
dst[tid] = fdividef(tmp, tmp2);
tid += stride;
}
} | 0834a2bdf9c21ec2da0a64d84d02e7b195281b94.cu | #include "includes.h"
__global__ void cu_dsigmoid(const float* src, float* dst, int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
float tmp = __expf(src[tid]);
float tmp2 = __fadd_rd(tmp, 1.0);
tmp2 = __fmul_rd(tmp2, tmp2);
dst[tid] = fdividef(tmp, tmp2);
tid += stride;
}
} |
4219cfc47312f970b2c5e2f4e2101d8f7d619d0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ============================================================================
*
* Authors:
* Hunter McCoy <[email protected]
*
* ============================================================================
*/
#include <poggers/allocators/free_list.cuh>
#include <stdio.h>
#include <iostream>
#include <assert.h>
#include <chrono>
using global_ptr = poggers::allocators::header;
__global__ void single_thread_malloc_and_free_tests(global_ptr * head){
const uint64_t test_size = 10;
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0) return;
printf("Starting\n");
uint64_t * array_list [test_size];
for (int i = 0; i < test_size; i++){
array_list[i] = (uint64_t *) head->malloc_safe(sizeof(uint64_t)*20);
}
printf("%llu Malloc done\n\n", tid);
global_ptr::print_heap(head);
printf("And allocated nodes:\n");
for (int i=0; i< test_size; i++){
if (array_list[i] != nullptr){
global_ptr * node = global_ptr::get_header_from_address(array_list[i]);
node->printnode();
printf("Printed Node\n");
}
}
for (int i = 0; i < test_size; i++){
if (array_list[i] != nullptr){
head->free_safe(array_list[i]);
}
printf("%d:\n", i);
global_ptr::print_heap(head);
}
global_ptr::print_heap(head);
return;
}
__global__ void test_aligned_alloc_and_free(global_ptr * head){
const uint64_t test_size = 10;
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0) return;
printf("Starting\n");
uint64_t * array_list [test_size];
for (int i = 0; i < test_size; i++){
array_list[i] = (uint64_t *) head->malloc_aligned(sizeof(uint64_t)*20, 16, 0);
}
printf("%llu Malloc done\n\n", tid);
global_ptr::print_heap(head);
printf("And allocated nodes:\n");
for (int i=0; i< test_size; i++){
if (array_list[i] != nullptr){
global_ptr * node = global_ptr::get_header_from_address(array_list[i]);
node->printnode();
printf("Printed Node\n");
}
}
for (int i = 0; i < test_size; i++){
if (array_list[i] != nullptr){
head->free_safe(array_list[i]);
}
printf("%d:\n", i);
global_ptr::print_heap(head);
}
global_ptr::print_heap(head);
printf("End of case 1/2\n");
for (int i = 0; i < test_size; i++){
array_list[i] = (uint64_t *) head->malloc_aligned(sizeof(uint64_t), 512, -16);
}
printf("Done with large alignment alloc\n");
global_ptr::print_heap(head);
printf("And allocated nodes:\n");
for (int i=0; i< test_size; i++){
if (array_list[i] != nullptr){
global_ptr * node = global_ptr::get_header_from_address(array_list[i]);
node->printnode();
//printf("Printed Node\n");
}
}
printf("End of print\n");
for (int i = 0; i < test_size; i++){
if (array_list[i] != nullptr){
head->free_safe(array_list[i]);
}
printf("%d:\n", i);
global_ptr::print_heap(head);
}
global_ptr::print_heap(head);
return;
}
__global__ void multi_thread_malloc_and_free(global_ptr * head, uint64_t num_threads, uint64_t ** nodes){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid >= num_threads) return;
//grab 200 bytes at a time!
uint64_t * test = nullptr;
while (test == nullptr){
test = (uint64_t *) head->malloc_safe(32);
//printf("Stalling in malloc loop\n");
}
//test = (uint64_t *) head->malloc(8);
if (test != nullptr){
head->free_safe(test);
}
//nodes[tid] = test;
// test[0] = 512;
// printf("%llu Malloc done, written as %llu\n\n", tid, test[0]);
// head->free(test);
printf("%llu Free done\n\n", tid);
}
__global__ void print_heap_kernel(global_ptr * head, uint64_t num_threads, uint64_t ** nodes){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0 ) return;
global_ptr::print_heap(head);
//printf("Allocated Nodes\n");
// for (int i=0; i< num_threads; i++){
// if (nodes[i] != nullptr){
// global_ptr * node = global_ptr::get_header_from_address(nodes[i]);
// node->printnode();
// }
// }
}
__host__ void print_heap(global_ptr * head, uint64_t num_threads, uint64_t ** nodes){
hipLaunchKernelGGL(( print_heap_kernel), dim3(1),dim3(1), 0, 0, head, num_threads, nodes);
}
__host__ void test_mallocs(global_ptr * head, uint64_t num_mallocs){
printf("Starting test with %llu threads\n", num_mallocs);
uint64_t ** nodes;
hipMalloc((void **)&nodes, num_mallocs*sizeof(uint64_t));
hipLaunchKernelGGL(( multi_thread_malloc_and_free), dim3((num_mallocs -1)/512 + 1), dim3(512), 0, 0, head, num_mallocs, nodes);
hipDeviceSynchronize();
print_heap(head, num_mallocs, nodes);
hipDeviceSynchronize();
hipFree(nodes);
}
int main(int argc, char** argv) {
uint64_t bytes_in_use = 2000;
global_ptr * heap = global_ptr::init_heap(bytes_in_use);
hipDeviceSynchronize();
printf("Heap init Done\n");
hipDeviceSynchronize();
hipLaunchKernelGGL(( single_thread_malloc_and_free_tests), dim3(1),dim3(1), 0, 0, heap);
hipDeviceSynchronize();
printf("Starting Malloc tests\n\n\n");
test_mallocs(heap, 1);
test_mallocs(heap, 10);
test_mallocs(heap, 30);
test_mallocs(heap, 50);
test_mallocs(heap, 60);
test_mallocs(heap, 100);
test_mallocs(heap, 1000);
//print_heap(heap);
hipDeviceSynchronize();
global_ptr::free_heap(heap);
hipDeviceSynchronize();
printf("Starting alignment tests\n");
heap = global_ptr::init_heap(bytes_in_use);
hipDeviceSynchronize();
hipLaunchKernelGGL(( test_aligned_alloc_and_free), dim3(1),dim3(1), 0, 0, heap);
hipDeviceSynchronize();
global_ptr::free_heap(heap);
}
| 4219cfc47312f970b2c5e2f4e2101d8f7d619d0d.cu | /*
* ============================================================================
*
* Authors:
* Hunter McCoy <[email protected]
*
* ============================================================================
*/
#include <poggers/allocators/free_list.cuh>
#include <stdio.h>
#include <iostream>
#include <assert.h>
#include <chrono>
using global_ptr = poggers::allocators::header;
__global__ void single_thread_malloc_and_free_tests(global_ptr * head){
const uint64_t test_size = 10;
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0) return;
printf("Starting\n");
uint64_t * array_list [test_size];
for (int i = 0; i < test_size; i++){
array_list[i] = (uint64_t *) head->malloc_safe(sizeof(uint64_t)*20);
}
printf("%llu Malloc done\n\n", tid);
global_ptr::print_heap(head);
printf("And allocated nodes:\n");
for (int i=0; i< test_size; i++){
if (array_list[i] != nullptr){
global_ptr * node = global_ptr::get_header_from_address(array_list[i]);
node->printnode();
printf("Printed Node\n");
}
}
for (int i = 0; i < test_size; i++){
if (array_list[i] != nullptr){
head->free_safe(array_list[i]);
}
printf("%d:\n", i);
global_ptr::print_heap(head);
}
global_ptr::print_heap(head);
return;
}
__global__ void test_aligned_alloc_and_free(global_ptr * head){
const uint64_t test_size = 10;
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0) return;
printf("Starting\n");
uint64_t * array_list [test_size];
for (int i = 0; i < test_size; i++){
array_list[i] = (uint64_t *) head->malloc_aligned(sizeof(uint64_t)*20, 16, 0);
}
printf("%llu Malloc done\n\n", tid);
global_ptr::print_heap(head);
printf("And allocated nodes:\n");
for (int i=0; i< test_size; i++){
if (array_list[i] != nullptr){
global_ptr * node = global_ptr::get_header_from_address(array_list[i]);
node->printnode();
printf("Printed Node\n");
}
}
for (int i = 0; i < test_size; i++){
if (array_list[i] != nullptr){
head->free_safe(array_list[i]);
}
printf("%d:\n", i);
global_ptr::print_heap(head);
}
global_ptr::print_heap(head);
printf("End of case 1/2\n");
for (int i = 0; i < test_size; i++){
array_list[i] = (uint64_t *) head->malloc_aligned(sizeof(uint64_t), 512, -16);
}
printf("Done with large alignment alloc\n");
global_ptr::print_heap(head);
printf("And allocated nodes:\n");
for (int i=0; i< test_size; i++){
if (array_list[i] != nullptr){
global_ptr * node = global_ptr::get_header_from_address(array_list[i]);
node->printnode();
//printf("Printed Node\n");
}
}
printf("End of print\n");
for (int i = 0; i < test_size; i++){
if (array_list[i] != nullptr){
head->free_safe(array_list[i]);
}
printf("%d:\n", i);
global_ptr::print_heap(head);
}
global_ptr::print_heap(head);
return;
}
__global__ void multi_thread_malloc_and_free(global_ptr * head, uint64_t num_threads, uint64_t ** nodes){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid >= num_threads) return;
//grab 200 bytes at a time!
uint64_t * test = nullptr;
while (test == nullptr){
test = (uint64_t *) head->malloc_safe(32);
//printf("Stalling in malloc loop\n");
}
//test = (uint64_t *) head->malloc(8);
if (test != nullptr){
head->free_safe(test);
}
//nodes[tid] = test;
// test[0] = 512;
// printf("%llu Malloc done, written as %llu\n\n", tid, test[0]);
// head->free(test);
printf("%llu Free done\n\n", tid);
}
__global__ void print_heap_kernel(global_ptr * head, uint64_t num_threads, uint64_t ** nodes){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0 ) return;
global_ptr::print_heap(head);
//printf("Allocated Nodes\n");
// for (int i=0; i< num_threads; i++){
// if (nodes[i] != nullptr){
// global_ptr * node = global_ptr::get_header_from_address(nodes[i]);
// node->printnode();
// }
// }
}
__host__ void print_heap(global_ptr * head, uint64_t num_threads, uint64_t ** nodes){
print_heap_kernel<<<1,1>>>(head, num_threads, nodes);
}
__host__ void test_mallocs(global_ptr * head, uint64_t num_mallocs){
printf("Starting test with %llu threads\n", num_mallocs);
uint64_t ** nodes;
cudaMalloc((void **)&nodes, num_mallocs*sizeof(uint64_t));
multi_thread_malloc_and_free<<<(num_mallocs -1)/512 + 1, 512>>>(head, num_mallocs, nodes);
cudaDeviceSynchronize();
print_heap(head, num_mallocs, nodes);
cudaDeviceSynchronize();
cudaFree(nodes);
}
int main(int argc, char** argv) {
uint64_t bytes_in_use = 2000;
global_ptr * heap = global_ptr::init_heap(bytes_in_use);
cudaDeviceSynchronize();
printf("Heap init Done\n");
cudaDeviceSynchronize();
single_thread_malloc_and_free_tests<<<1,1>>>(heap);
cudaDeviceSynchronize();
printf("Starting Malloc tests\n\n\n");
test_mallocs(heap, 1);
test_mallocs(heap, 10);
test_mallocs(heap, 30);
test_mallocs(heap, 50);
test_mallocs(heap, 60);
test_mallocs(heap, 100);
test_mallocs(heap, 1000);
//print_heap(heap);
cudaDeviceSynchronize();
global_ptr::free_heap(heap);
cudaDeviceSynchronize();
printf("Starting alignment tests\n");
heap = global_ptr::init_heap(bytes_in_use);
cudaDeviceSynchronize();
test_aligned_alloc_and_free<<<1,1>>>(heap);
cudaDeviceSynchronize();
global_ptr::free_heap(heap);
}
|
ded83b2b4b145d15712da702b53ec09524cd4849.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <random>
#include "Device/DataMovement/Tile.cuh"
#include "Device/Util/Timer.cuh"
#include "Device/Util/SafeCudaAPI.cuh"
#include "Device/Util/PTX.cuh"
#include "Device/Util/SafeCudaAPISync.cuh"
#include "rocblas.h"
#define SAFE_CUBLAS_CALL(a) if(a != HIPBLAS_STATUS_SUCCESS) { \
printf ("CUBLAS initialization failed\n"); \
return EXIT_FAILURE; \
}
using namespace timer;
const unsigned BLOCK_SIZE = 256;
const unsigned UNROLL = 1;
using VType = int4;
template<typename T>
__global__
void copyKernel(const T* __restrict__ d_in, int size, T* d_out) {
using LoadTileT = LoadTile <BLOCK_SIZE, T, VType, UNROLL>;
using StoreTileT = StoreTile<BLOCK_SIZE, T, VType, UNROLL>;
//using LoadTileT = IlLoadTile <BLOCK_SIZE, T, int4, UNROLL>;
//using StoreTileT = StoreTile<BLOCK_SIZE, T, int4, UNROLL * 2>;
LoadTileT load_tile(d_in, size);
StoreTileT store_tile(d_out, size);
while (load_tile.is_valid()) {
T array[LoadTileT::THREAD_ITEMS];
load_tile.load(array);
store_tile.store(array);
}
int id = blockIdx.x * BLOCK_SIZE + threadIdx.x;
for (int i = load_tile.last_index() + id; i < size; i += load_tile.stride())
d_out[i] = d_in[i];
}
template<typename T>
__global__
void workStructKernel(const T* __restrict__ d_in, int size, T* d_out) {
xlib::WordArray<int, 10> test(d_out);
d_out[0] = test[size];
}
template<typename T>
__global__
void copyTest1(const T* __restrict__ d_in, int size, T* __restrict__ d_out) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < size; i += stride)
d_out[i] = d_in[i];
}
template<typename T, typename R>
__global__
void copyTest2(const T* __restrict__ d_in1,
const R* __restrict__ d_in2,
int size,
T* __restrict__ d_out1,
R* __restrict__ d_out2) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < size; i += stride) {
d_out1[i] = d_in1[i];
d_out2[i] = d_in2[i];
}
}
template<typename T, typename R>
__global__
void copyTest3(const T* __restrict__ d_in1,
const T* __restrict__ d_in2,
const R* __restrict__ d_in3,
int size,
T* __restrict__ d_out1,
T* __restrict__ d_out2,
R* __restrict__ d_out3) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < size; i += stride) {
d_out1[i] = d_in1[i];
d_out2[i] = d_in2[i];
d_out3[i] = d_in3[i];
}
}
int main(int argc, char* argv[]) {
/*sTimer<DEVICE> TM;
using T = int;
const int size = (1 << 29);
auto h_array = new T[size];
std::iota(h_array, h_array + size, 0);
T* d_in, *d_out;
cuMalloc(d_in, size);
cuMalloc(d_out, size);
cuMemcpyToDevice(h_array, size, d_in);
cuMemset0x00(d_out, size);
//--------------------------------------------------------------------------
TM.start();
const int MUL = (sizeof(VType) / sizeof(int)) * UNROLL;
copyKernel
<<< xlib::ceil_div<BLOCK_SIZE * MUL>(size), BLOCK_SIZE >>>
//copyKernel <<< xlib::ResidentBlocks<BLOCK_SIZE>::value, BLOCK_SIZE >>>
(d_in, size, d_out);
CHECK_CUDA_ERROR
TM.stop();
TM.print("copy");
cuMemcpyToHost(d_out, size, h_array);
for (int i = 0; i < size; i++) {
if (h_array[i] != i)
ERROR("Wrong result at: ", i, " value: ", h_array[i]);
//std::cout << "Wrong result at: " << i << " value: " << h_array[i] << std::endl;
}
std::cout << "Correct <>" << std::endl;
hipblasHandle_t handle;
SAFE_CUBLAS_CALL( hipblasCreate(&handle) )
TM.start();
SAFE_CUBLAS_CALL( hipblasScopy(handle, size,
reinterpret_cast<float*>(d_in), 1,
reinterpret_cast<float*>(d_out), 1) )
TM.stop();
TM.print("cublas");
SAFE_CUBLAS_CALL( hipblasDestroy(handle) )
workStructKernel
<<< xlib::ceil_div<BLOCK_SIZE * MUL>(size), BLOCK_SIZE >>>
//copyKernel <<< xlib::ResidentBlocks<BLOCK_SIZE>::value, BLOCK_SIZE >>>
(d_in, size, d_out);*/
#define TEST1
//__________________________________________________________________________
int size = (1 << 30);
#if defined(TEST3)
int16_t *d_in1, *d_in2, *d_out1, *d_out2;
int8_t *d_in3, *d_out3;
cuMalloc(d_in1, size);
cuMalloc(d_in2, size);
cuMalloc(d_in3, size);
cuMalloc(d_out1, size);
cuMalloc(d_out2, size);
cuMalloc(d_out3, size);
#elif defined(TEST1)
int* d_in4, *d_out4;
cuMalloc(d_in4, size);
cuMalloc(d_out4, size);
#elif defined (TEST2)
int16_t* d_in5, *d_out5;
int8_t *d_in6, *d_out6;
cuMalloc(d_in5, size);
cuMalloc(d_out5, size);
cuMalloc(d_in6, size);
cuMalloc(d_out6, size);
#endif
Timer<DEVICE> TM;
for (int i = (1 << 16); i < size; i *= 2) {
TM.start();
#if defined(TEST3)
hipLaunchKernelGGL(( copyTest3) , dim3(xlib::ceil_div<BLOCK_SIZE>(i)), dim3(BLOCK_SIZE) , 0, 0,
d_in1, d_in2, d_in3, i, d_out1, d_out2, d_out3);
#elif defined (TEST2)
hipLaunchKernelGGL(( copyTest2) , dim3(xlib::ceil_div<BLOCK_SIZE>(i)), dim3(BLOCK_SIZE) , 0, 0,
d_in5, d_in6, i, d_out5, d_out6);
#elif defined(TEST1)
hipLaunchKernelGGL(( copyTest1) , dim3(xlib::ceil_div<BLOCK_SIZE>(i)), dim3(BLOCK_SIZE) , 0, 0,
d_in4, i, d_out4);
#endif
TM.stop();
std::cout << "i:\t" << i << "\t" << TM.duration() << std::endl;
}
}
| ded83b2b4b145d15712da702b53ec09524cd4849.cu | #include <iostream>
#include <random>
#include "Device/DataMovement/Tile.cuh"
#include "Device/Util/Timer.cuh"
#include "Device/Util/SafeCudaAPI.cuh"
#include "Device/Util/PTX.cuh"
#include "Device/Util/SafeCudaAPISync.cuh"
#include "cublas_v2.h"
#define SAFE_CUBLAS_CALL(a) if(a != CUBLAS_STATUS_SUCCESS) { \
printf ("CUBLAS initialization failed\n"); \
return EXIT_FAILURE; \
}
using namespace timer;
const unsigned BLOCK_SIZE = 256;
const unsigned UNROLL = 1;
using VType = int4;
template<typename T>
__global__
void copyKernel(const T* __restrict__ d_in, int size, T* d_out) {
using LoadTileT = LoadTile <BLOCK_SIZE, T, VType, UNROLL>;
using StoreTileT = StoreTile<BLOCK_SIZE, T, VType, UNROLL>;
//using LoadTileT = IlLoadTile <BLOCK_SIZE, T, int4, UNROLL>;
//using StoreTileT = StoreTile<BLOCK_SIZE, T, int4, UNROLL * 2>;
LoadTileT load_tile(d_in, size);
StoreTileT store_tile(d_out, size);
while (load_tile.is_valid()) {
T array[LoadTileT::THREAD_ITEMS];
load_tile.load(array);
store_tile.store(array);
}
int id = blockIdx.x * BLOCK_SIZE + threadIdx.x;
for (int i = load_tile.last_index() + id; i < size; i += load_tile.stride())
d_out[i] = d_in[i];
}
template<typename T>
__global__
void workStructKernel(const T* __restrict__ d_in, int size, T* d_out) {
xlib::WordArray<int, 10> test(d_out);
d_out[0] = test[size];
}
template<typename T>
__global__
void copyTest1(const T* __restrict__ d_in, int size, T* __restrict__ d_out) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < size; i += stride)
d_out[i] = d_in[i];
}
template<typename T, typename R>
__global__
void copyTest2(const T* __restrict__ d_in1,
const R* __restrict__ d_in2,
int size,
T* __restrict__ d_out1,
R* __restrict__ d_out2) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < size; i += stride) {
d_out1[i] = d_in1[i];
d_out2[i] = d_in2[i];
}
}
template<typename T, typename R>
__global__
void copyTest3(const T* __restrict__ d_in1,
const T* __restrict__ d_in2,
const R* __restrict__ d_in3,
int size,
T* __restrict__ d_out1,
T* __restrict__ d_out2,
R* __restrict__ d_out3) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < size; i += stride) {
d_out1[i] = d_in1[i];
d_out2[i] = d_in2[i];
d_out3[i] = d_in3[i];
}
}
int main(int argc, char* argv[]) {
/*sTimer<DEVICE> TM;
using T = int;
const int size = (1 << 29);
auto h_array = new T[size];
std::iota(h_array, h_array + size, 0);
T* d_in, *d_out;
cuMalloc(d_in, size);
cuMalloc(d_out, size);
cuMemcpyToDevice(h_array, size, d_in);
cuMemset0x00(d_out, size);
//--------------------------------------------------------------------------
TM.start();
const int MUL = (sizeof(VType) / sizeof(int)) * UNROLL;
copyKernel
<<< xlib::ceil_div<BLOCK_SIZE * MUL>(size), BLOCK_SIZE >>>
//copyKernel <<< xlib::ResidentBlocks<BLOCK_SIZE>::value, BLOCK_SIZE >>>
(d_in, size, d_out);
CHECK_CUDA_ERROR
TM.stop();
TM.print("copy");
cuMemcpyToHost(d_out, size, h_array);
for (int i = 0; i < size; i++) {
if (h_array[i] != i)
ERROR("Wrong result at: ", i, " value: ", h_array[i]);
//std::cout << "Wrong result at: " << i << " value: " << h_array[i] << std::endl;
}
std::cout << "Correct <>" << std::endl;
cublasHandle_t handle;
SAFE_CUBLAS_CALL( cublasCreate(&handle) )
TM.start();
SAFE_CUBLAS_CALL( cublasScopy(handle, size,
reinterpret_cast<float*>(d_in), 1,
reinterpret_cast<float*>(d_out), 1) )
TM.stop();
TM.print("cublas");
SAFE_CUBLAS_CALL( cublasDestroy(handle) )
workStructKernel
<<< xlib::ceil_div<BLOCK_SIZE * MUL>(size), BLOCK_SIZE >>>
//copyKernel <<< xlib::ResidentBlocks<BLOCK_SIZE>::value, BLOCK_SIZE >>>
(d_in, size, d_out);*/
#define TEST1
//__________________________________________________________________________
int size = (1 << 30);
#if defined(TEST3)
int16_t *d_in1, *d_in2, *d_out1, *d_out2;
int8_t *d_in3, *d_out3;
cuMalloc(d_in1, size);
cuMalloc(d_in2, size);
cuMalloc(d_in3, size);
cuMalloc(d_out1, size);
cuMalloc(d_out2, size);
cuMalloc(d_out3, size);
#elif defined(TEST1)
int* d_in4, *d_out4;
cuMalloc(d_in4, size);
cuMalloc(d_out4, size);
#elif defined (TEST2)
int16_t* d_in5, *d_out5;
int8_t *d_in6, *d_out6;
cuMalloc(d_in5, size);
cuMalloc(d_out5, size);
cuMalloc(d_in6, size);
cuMalloc(d_out6, size);
#endif
Timer<DEVICE> TM;
for (int i = (1 << 16); i < size; i *= 2) {
TM.start();
#if defined(TEST3)
copyTest3 <<< xlib::ceil_div<BLOCK_SIZE>(i), BLOCK_SIZE >>>
(d_in1, d_in2, d_in3, i, d_out1, d_out2, d_out3);
#elif defined (TEST2)
copyTest2 <<< xlib::ceil_div<BLOCK_SIZE>(i), BLOCK_SIZE >>>
(d_in5, d_in6, i, d_out5, d_out6);
#elif defined(TEST1)
copyTest1 <<< xlib::ceil_div<BLOCK_SIZE>(i), BLOCK_SIZE >>>
(d_in4, i, d_out4);
#endif
TM.stop();
std::cout << "i:\t" << i << "\t" << TM.duration() << std::endl;
}
}
|
e516e6e3dd7fa58cef114a68691d956e7bbce1c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=[64,64] --blockDim=[16,16]
#include "common.h"
__global__ void copySharedMem(float *odata, float *idata, int width, int height, int nreps)
{
__requires(width == 1024);
__requires(height == 1024);
__requires(nreps == 1);
__shared__ float tile[TILE_DIM][TILE_DIM];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + width*yIndex;
for (int r=0; r < nreps; r++)
{
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
if (xIndex < width && yIndex < height)
{
tile[threadIdx.y][threadIdx.x] = idata[index];
}
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
if (xIndex < height && yIndex < width)
{
odata[index] = tile[threadIdx.y][threadIdx.x];
}
}
}
}
| e516e6e3dd7fa58cef114a68691d956e7bbce1c9.cu | //pass
//--gridDim=[64,64] --blockDim=[16,16]
#include "common.h"
__global__ void copySharedMem(float *odata, float *idata, int width, int height, int nreps)
{
__requires(width == 1024);
__requires(height == 1024);
__requires(nreps == 1);
__shared__ float tile[TILE_DIM][TILE_DIM];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + width*yIndex;
for (int r=0; r < nreps; r++)
{
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
if (xIndex < width && yIndex < height)
{
tile[threadIdx.y][threadIdx.x] = idata[index];
}
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
if (xIndex < height && yIndex < width)
{
odata[index] = tile[threadIdx.y][threadIdx.x];
}
}
}
}
|
4cf28380f62d7778e74a876eab3762463c47af6e.hip | // !!! This is a file automatically generated by hipify!!!
/*//------------------------------------------------------------------------------------------------------------
| TEST funzionamento del metodo di Condensazione Completo con la Matrice di Hilbert, nota per mettere alla prova la stabilita' numerica
| al crescere di N la matrice ha determinante molto piccolo ma diverso da 0!
*///------------------------------------------------------------------------------------------------------------
#include <iostream>
#include <stdio.h>
using namespace std;
#include <cstdlib>
#include <stdlib.h> /* srand, rand */ //http://www.cplusplus.com/reference/cstdlib/rand/
#include <fstream> //http://www.cplusplus.com/doc/tutorial/files/
#include "Src/Ausiliary/CudaCrono.cuh"
#include "Src/Cuda_FloatMatrixClass.cuh"
int main(void){
int n=10;
hipEvent_t T1, T2;
hipEventCreate(&T1);
hipEventCreate(&T2);
float diff_time;
matrice matA (n);
matA.Hilbert_Init();
matA.print();
matA.print_tomath("Output/file.dat");
float pivot[n];
float result;
cout<<"\n Condensation Procedura Secondo la CPU"<<endl;
hipEventRecord(T1,0);
matA.Cpu_Condensation(pivot);
hipEventRecord(T2,0);
hipEventSynchronize(T2);
hipEventElapsedTime(&diff_time,T1,T2);
for(int i=0; i<n;i++)cout<<pivot[i]<<endl;
cout << "tempo=" << diff_time<<"\n";
// matA.print();
result=matA.Cpu_Determinant_Condensation();
cout<<"\n Determinante ="<<result<<endl;
matA.sync_DeviceToHost();
cout<<"\n Condensation Procedura Secondo la GPU "<<endl;
hipEventRecord(T1,0);
matA.Gpu_Condensation(pivot);
hipEventRecord(T2,0);
hipEventSynchronize(T2);
hipEventElapsedTime(&diff_time,T1,T2);
for(int i=0; i<n;i++)cout<<pivot[i]<<endl;
cout << "tempo=" << diff_time<<"\n";
// matA.print();
result=matA.Gpu_Determinant_Condensation();
cout<<"\n Determinante ="<<result<<endl;
matA.sync_HostToDevice();
cout<<"\n Condensation Procedura Secondo la GPU (versione TEXTURE)"<<endl;
hipEventRecord(T1,0);
matA.Gpu_Condensation_Best(pivot);
hipEventRecord(T2,0);
hipEventSynchronize(T2);
hipEventElapsedTime(&diff_time,T1,T2);
for(int i=0; i<n;i++)cout<<pivot[i]<<endl;
cout << "tempo=" << diff_time<<"\n";
// matA.print();
result=matA.Gpu_Determinant_Condensation_Best();
cout<<"\n Determinante ="<<result<<endl;
return 0;
}
| 4cf28380f62d7778e74a876eab3762463c47af6e.cu | /*//------------------------------------------------------------------------------------------------------------
| TEST funzionamento del metodo di Condensazione Completo con la Matrice di Hilbert, nota per mettere alla prova la stabilita' numerica
| al crescere di N la matrice ha determinante molto piccolo ma diverso da 0!
*///------------------------------------------------------------------------------------------------------------
#include <iostream>
#include <stdio.h>
using namespace std;
#include <cstdlib>
#include <stdlib.h> /* srand, rand */ //http://www.cplusplus.com/reference/cstdlib/rand/
#include <fstream> //http://www.cplusplus.com/doc/tutorial/files/
#include "Src/Ausiliary/CudaCrono.cuh"
#include "Src/Cuda_FloatMatrixClass.cuh"
int main(void){
int n=10;
cudaEvent_t T1, T2;
cudaEventCreate(&T1);
cudaEventCreate(&T2);
float diff_time;
matrice matA (n);
matA.Hilbert_Init();
matA.print();
matA.print_tomath("Output/file.dat");
float pivot[n];
float result;
cout<<"\n Condensation Procedura Secondo la CPU"<<endl;
cudaEventRecord(T1,0);
matA.Cpu_Condensation(pivot);
cudaEventRecord(T2,0);
cudaEventSynchronize(T2);
cudaEventElapsedTime(&diff_time,T1,T2);
for(int i=0; i<n;i++)cout<<pivot[i]<<endl;
cout << "tempo=" << diff_time<<"\n";
// matA.print();
result=matA.Cpu_Determinant_Condensation();
cout<<"\n Determinante ="<<result<<endl;
matA.sync_DeviceToHost();
cout<<"\n Condensation Procedura Secondo la GPU "<<endl;
cudaEventRecord(T1,0);
matA.Gpu_Condensation(pivot);
cudaEventRecord(T2,0);
cudaEventSynchronize(T2);
cudaEventElapsedTime(&diff_time,T1,T2);
for(int i=0; i<n;i++)cout<<pivot[i]<<endl;
cout << "tempo=" << diff_time<<"\n";
// matA.print();
result=matA.Gpu_Determinant_Condensation();
cout<<"\n Determinante ="<<result<<endl;
matA.sync_HostToDevice();
cout<<"\n Condensation Procedura Secondo la GPU (versione TEXTURE)"<<endl;
cudaEventRecord(T1,0);
matA.Gpu_Condensation_Best(pivot);
cudaEventRecord(T2,0);
cudaEventSynchronize(T2);
cudaEventElapsedTime(&diff_time,T1,T2);
for(int i=0; i<n;i++)cout<<pivot[i]<<endl;
cout << "tempo=" << diff_time<<"\n";
// matA.print();
result=matA.Gpu_Determinant_Condensation_Best();
cout<<"\n Determinante ="<<result<<endl;
return 0;
}
|
36c63fc2749d07fe7f1882ffcd85c3899231185c.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/cudafeatures2d.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// Simplified version of UF that doesn't make use of the Tile Merging technique.
// The initial phase which performs labeling inside tiles is avoided.
// This variation performs worse than the original one which uses Tiles Merging.
#define BLOCK_X 16
#define BLOCK_Y 16
using namespace cv;
namespace {
// Risale alla radice dell'albero a partire da un suo nodo n
__device__ unsigned Find(const int *s_buf, unsigned n) {
// Attenzione: non invocare la find su un pixel di background
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
// Unisce gli alberi contenenti i nodi a e b, collegandone le radici
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a + 1);
done = (old == b + 1);
b = old - 1;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b + 1);
done = (old == a + 1);
a = old - 1;
}
else {
done = true;
}
} while (!done);
}
__global__ void Initialization(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned img_index = y * (img.step / img.elem_size) + x;
unsigned labels_index = y * (labels.step / labels.elem_size) + x;
if (x < labels.cols && y < labels.rows) {
if (img[img_index]) {
labels[labels_index] = labels_index + 1;
}
else {
labels[labels_index] = 0;
}
}
}
__global__ void Merge(cuda::PtrStepSzi labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned labels_index = y * (labels.step / labels.elem_size) + x;
if (x < labels.cols && y < labels.rows) {
if (labels[labels_index]) {
if (y > 0) {
if (x > 0 && labels.data[labels_index - (labels.step / labels.elem_size) - 1]) {
Union(labels.data, labels_index, labels_index - (labels.step / labels.elem_size) - 1);
}
if (labels.data[labels_index - (labels.step / labels.elem_size)]) {
Union(labels.data, labels_index, labels_index - (labels.step / labels.elem_size));
}
if (x + 1 < labels.cols && labels.data[labels_index - (labels.step / labels.elem_size) + 1]) {
Union(labels.data, labels_index, labels_index - (labels.step / labels.elem_size) + 1);
}
}
if (x > 0 && labels.data[labels_index - 1]) {
Union(labels.data, labels_index, labels_index - 1);
}
}
}
}
__global__ void PathCompression(cuda::PtrStepSzi labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned labels_index = y * (labels.step / labels.elem_size) + x;
if (x < labels.cols && y < labels.rows) {
unsigned int val = labels[labels_index];
if (val) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
}
class UF_naive : public GpuLabeling2D<CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
UF_naive() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.rows, d_img_.cols, CV_32SC1);
grid_size_ = dim3((d_img_.cols + BLOCK_X - 1) / BLOCK_X, (d_img_.rows + BLOCK_Y - 1) / BLOCK_Y, 1);
block_size_ = dim3(BLOCK_X, BLOCK_Y, 1);
//cuda::PtrStep3b ptr_step_prima(d_img_labels_);
// Phase 1
// Etichetta i pixel localmente al blocco
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//cuda::PtrStepSz3i ptr_step_size(d_img_labels_);
// Immagine di debug della prima fase
//cuda::GpuMat d_local_labels;
//d_img_labels_.copyTo(d_local_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_local_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//Mat1i local_labels(img_.size());
//d_local_labels.download(local_labels);
// Phase 2
// Collega tra loro gli alberi union-find dei diversi blocchi
Merge << <grid_size_, block_size_ >> > (d_img_labels_);
// Immagine di debug della seconda fase
//cuda::GpuMat d_global_labels;
//d_img_labels_.copyTo(d_global_labels);
//// PathCompression << <grid_size_, block_size_ >> > (d_global_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//Mat1i global_labels(img_.size());
//d_global_labels.download(global_labels);
// Phase 3
// Collassa gli alberi union-find sulle radici
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
// d_img_labels_.download(img_labels_);
hipDeviceSynchronize();
}
};
REGISTER_LABELING(UF_naive);
| 36c63fc2749d07fe7f1882ffcd85c3899231185c.cu | #include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// Simplified version of UF that doesn't make use of the Tile Merging technique.
// The initial phase which performs labeling inside tiles is avoided.
// This variation performs worse than the original one which uses Tiles Merging.
#define BLOCK_X 16
#define BLOCK_Y 16
using namespace cv;
namespace {
// Risale alla radice dell'albero a partire da un suo nodo n
__device__ unsigned Find(const int *s_buf, unsigned n) {
// Attenzione: non invocare la find su un pixel di background
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
// Unisce gli alberi contenenti i nodi a e b, collegandone le radici
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a + 1);
done = (old == b + 1);
b = old - 1;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b + 1);
done = (old == a + 1);
a = old - 1;
}
else {
done = true;
}
} while (!done);
}
__global__ void Initialization(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned img_index = y * (img.step / img.elem_size) + x;
unsigned labels_index = y * (labels.step / labels.elem_size) + x;
if (x < labels.cols && y < labels.rows) {
if (img[img_index]) {
labels[labels_index] = labels_index + 1;
}
else {
labels[labels_index] = 0;
}
}
}
__global__ void Merge(cuda::PtrStepSzi labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned labels_index = y * (labels.step / labels.elem_size) + x;
if (x < labels.cols && y < labels.rows) {
if (labels[labels_index]) {
if (y > 0) {
if (x > 0 && labels.data[labels_index - (labels.step / labels.elem_size) - 1]) {
Union(labels.data, labels_index, labels_index - (labels.step / labels.elem_size) - 1);
}
if (labels.data[labels_index - (labels.step / labels.elem_size)]) {
Union(labels.data, labels_index, labels_index - (labels.step / labels.elem_size));
}
if (x + 1 < labels.cols && labels.data[labels_index - (labels.step / labels.elem_size) + 1]) {
Union(labels.data, labels_index, labels_index - (labels.step / labels.elem_size) + 1);
}
}
if (x > 0 && labels.data[labels_index - 1]) {
Union(labels.data, labels_index, labels_index - 1);
}
}
}
}
__global__ void PathCompression(cuda::PtrStepSzi labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned labels_index = y * (labels.step / labels.elem_size) + x;
if (x < labels.cols && y < labels.rows) {
unsigned int val = labels[labels_index];
if (val) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
}
class UF_naive : public GpuLabeling2D<CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
UF_naive() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.rows, d_img_.cols, CV_32SC1);
grid_size_ = dim3((d_img_.cols + BLOCK_X - 1) / BLOCK_X, (d_img_.rows + BLOCK_Y - 1) / BLOCK_Y, 1);
block_size_ = dim3(BLOCK_X, BLOCK_Y, 1);
//cuda::PtrStep3b ptr_step_prima(d_img_labels_);
// Phase 1
// Etichetta i pixel localmente al blocco
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//cuda::PtrStepSz3i ptr_step_size(d_img_labels_);
// Immagine di debug della prima fase
//cuda::GpuMat d_local_labels;
//d_img_labels_.copyTo(d_local_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_local_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//Mat1i local_labels(img_.size());
//d_local_labels.download(local_labels);
// Phase 2
// Collega tra loro gli alberi union-find dei diversi blocchi
Merge << <grid_size_, block_size_ >> > (d_img_labels_);
// Immagine di debug della seconda fase
//cuda::GpuMat d_global_labels;
//d_img_labels_.copyTo(d_global_labels);
//// PathCompression << <grid_size_, block_size_ >> > (d_global_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//Mat1i global_labels(img_.size());
//d_global_labels.download(global_labels);
// Phase 3
// Collassa gli alberi union-find sulle radici
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
// d_img_labels_.download(img_labels_);
cudaDeviceSynchronize();
}
};
REGISTER_LABELING(UF_naive);
|
9c20391da4deff71585a99127a5829e7ef32d49f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
__global__ void VecAdd(float* A, float* B, float* C,float* D, int N)
{
int j;
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
for(j=0;j<1024;j++)
{C[i] = A[i] + B[i];
D[i]= A[i]*B[i];}
}
// Host code
int main()
{
int N = 1024*1024,i;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
float* h_D = (float*)malloc(size);
// Initialize input vectors
for(i=0; i<N; i++)
{
h_A[i]= (i*3);
h_B[i]= (i*3);
}
// Allocate vectors in device memory
float t1, t2, t3;
t1=clock();
float* d_A;
hipMalloc((void**)&d_A, size);
float* d_B;
hipMalloc((void**)&d_B, size);
float* d_C;
hipMalloc((void**)&d_C, size);
float* d_D;
hipMalloc((void**)&d_D, size);
// Copy vectors from host memory to device memory
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// Invoke kernel
hipLaunchKernelGGL(( VecAdd), dim3(16384),dim3(64), 0, 0, d_A, d_B, d_C, d_D,N);
// Copy result from device memory to host memory
// h_C contains the result in host memory
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
hipMemcpy(h_D, d_D, size, hipMemcpyDeviceToHost);
t2=clock();
t3=((t2-t1)/ CLOCKS_PER_SECONDS);
for(i=0; i<N; i++)
{
printf("\n%f",h_C[100]);
printf("\n%f",h_D[100]);
}
printf("\n time in gpu= %f",t3);
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// Free host memory
}
| 9c20391da4deff71585a99127a5829e7ef32d49f.cu | #include<stdio.h>
__global__ void VecAdd(float* A, float* B, float* C,float* D, int N)
{
int j;
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
for(j=0;j<1024;j++)
{C[i] = A[i] + B[i];
D[i]= A[i]*B[i];}
}
// Host code
int main()
{
int N = 1024*1024,i;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
float* h_D = (float*)malloc(size);
// Initialize input vectors
for(i=0; i<N; i++)
{
h_A[i]= (i*3);
h_B[i]= (i*3);
}
// Allocate vectors in device memory
float t1, t2, t3;
t1=clock();
float* d_A;
cudaMalloc((void**)&d_A, size);
float* d_B;
cudaMalloc((void**)&d_B, size);
float* d_C;
cudaMalloc((void**)&d_C, size);
float* d_D;
cudaMalloc((void**)&d_D, size);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Invoke kernel
VecAdd<<< 16384,64>>>(d_A, d_B, d_C, d_D,N);
// Copy result from device memory to host memory
// h_C contains the result in host memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_D, d_D, size, cudaMemcpyDeviceToHost);
t2=clock();
t3=((t2-t1)/ CLOCKS_PER_SECONDS);
for(i=0; i<N; i++)
{
printf("\n%f",h_C[100]);
printf("\n%f",h_D[100]);
}
printf("\n time in gpu= %f",t3);
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Free host memory
}
|
cube.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Babak Poursartip
05/14/2020
based on Intro to parallel programming cuda
remarks:
- data on the host, starts with h (h_in)
- data on the device, starts with d (d_in)
*/
#include <stdio.h>
// kernel
// __global__: declaration specifier (dec spec)
__global__ void cube(float*d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f*f;
}
int main(){
printf(" code starts ... \n");
const int ARRAY_SIZE = 96;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int ii = 0; ii < ARRAY_SIZE; ii++){
h_in[ii] = float(ii);
}
float h_out[ARRAY_SIZE];
// declare gpu memory pointers
float *d_in;
float *d_out;
// allocate gpu memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernelhipLaunchKernelGGL((
cube), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
// copy back the results in the CPU
hipMemcpy(h_out,d_out, ARRAY_BYTES,hipMemcpyDeviceToHost);
// print out the results
for (int ii = 0; ii < ARRAY_SIZE; ++ii){
printf(" %f", h_out[ii]);
printf( ( (ii%5) !=4 ) ? "\t":"\n" );
}
// free gpu
hipFree(d_in);
hipFree(d_out);
return 0;
}
| cube.cu |
/*
Babak Poursartip
05/14/2020
based on Intro to parallel programming cuda
remarks:
- data on the host, starts with h (h_in)
- data on the device, starts with d (d_in)
*/
#include <stdio.h>
// kernel
// __global__: declaration specifier (dec spec)
__global__ void cube(float*d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f*f;
}
int main(){
printf(" code starts ... \n");
const int ARRAY_SIZE = 96;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int ii = 0; ii < ARRAY_SIZE; ii++){
h_in[ii] = float(ii);
}
float h_out[ARRAY_SIZE];
// declare gpu memory pointers
float *d_in;
float *d_out;
// allocate gpu memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
cube<<<1, ARRAY_SIZE>>>(d_out, d_in);
// copy back the results in the CPU
cudaMemcpy(h_out,d_out, ARRAY_BYTES,cudaMemcpyDeviceToHost);
// print out the results
for (int ii = 0; ii < ARRAY_SIZE; ++ii){
printf(" %f", h_out[ii]);
printf( ( (ii%5) !=4 ) ? "\t":"\n" );
}
// free gpu
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
7aa2b59d10d2502452c897889bae376b91446665.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/math_function.h"
namespace paddle {
namespace operators {
namespace math {
template <>
void gemm<platform::GPUPlace, float>(const platform::DeviceContext& context,
const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const float alpha, const float* A,
const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(transA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(transB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::hipblasSgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void gemm<platform::GPUPlace, double>(const platform::DeviceContext& context,
const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const double alpha, const double* A,
const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(transA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(transB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::hipblasDgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void matmul<platform::GPUPlace, float>(
const platform::DeviceContext& context, const framework::Tensor& matrix_a,
bool trans_a, const framework::Tensor& matrix_b, bool trans_b, float alpha,
framework::Tensor* matrix_out, float beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in GPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, float>(
context, transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>());
}
template <>
void matmul<platform::GPUPlace, double>(
const platform::DeviceContext& context, const framework::Tensor& matrix_a,
bool trans_a, const framework::Tensor& matrix_b, bool trans_b, double alpha,
framework::Tensor* matrix_out, double beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in GPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, double>(
context, transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>());
}
} // namespace math
} // namespace operators
} // namespace paddle
| 7aa2b59d10d2502452c897889bae376b91446665.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/math_function.h"
namespace paddle {
namespace operators {
namespace math {
template <>
void gemm<platform::GPUPlace, float>(const platform::DeviceContext& context,
const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const float alpha, const float* A,
const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasSgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void gemm<platform::GPUPlace, double>(const platform::DeviceContext& context,
const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const double alpha, const double* A,
const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasDgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void matmul<platform::GPUPlace, float>(
const platform::DeviceContext& context, const framework::Tensor& matrix_a,
bool trans_a, const framework::Tensor& matrix_b, bool trans_b, float alpha,
framework::Tensor* matrix_out, float beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in GPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, float>(
context, transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>());
}
template <>
void matmul<platform::GPUPlace, double>(
const platform::DeviceContext& context, const framework::Tensor& matrix_a,
bool trans_a, const framework::Tensor& matrix_b, bool trans_b, double alpha,
framework::Tensor* matrix_out, double beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in GPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, double>(
context, transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>());
}
} // namespace math
} // namespace operators
} // namespace paddle
|
43864d5e313418327c0db3c9fd03d1d44735afd5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DLLExport
#define TestExport
#include "../../source_shared/include/global_defines.h"
#include "../../lib_core/include/lib_core.h"
#include "gpuert.h"
namespace lib_ensembles {
template <typename T>
__global__ void host_kernel(GpuErt<T> *gpu_algo,
GpuDteAlgorithmShared::GpuParams<T> params,
GpuDteAlgorithmShared::GpuDteKernelId id) {
gpu_algo->GetConstPointers(¶ms.iteration_info, ¶ms.dataset_info,
¶ms.static_info);
switch (id) {
case GpuDteAlgorithmShared::kSetupKernel:
gpu_algo->gpuert_setup_kernel(¶ms, 123512);
break;
case GpuDteAlgorithmShared::kInitTreeBatch:
gpu_algo->gpuert_initialize_tree_batch(¶ms);
break;
case GpuDteAlgorithmShared::kFindSplit:
gpu_algo->gpuert_find_split(¶ms);
break;
case GpuDteAlgorithmShared::kPerformSplit:
gpu_algo->gpuert_perform_split(¶ms);
break;
case GpuDteAlgorithmShared::kPredict:
gpu_algo->gpuert_predict(¶ms);
break;
default:
break;
}
}
template <typename T>
void GpuErt<T>::CallCudaKernel(int blocks, int block_size,
GpuDteAlgorithmShared::GpuParams<T> ¶ms,
GpuDteAlgorithmShared::GpuDteKernelId id) {
hipLaunchKernelGGL(( host_kernel<T>), dim3(blocks), dim3(block_size), 0, 0, this, params, id);
}
template <typename T>
__device__ void GpuErt<T>::gpuert_setup_kernel(
GpuDteAlgorithmShared::GpuParams<T> *params, unsigned long long seed) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
// Each thread gets same seed, a different sequence number, no offset
if (id < params->iteration_info->threads_launched)
hiprand_init(seed, id, 0, ¶ms->random_states[id]);
}
template <typename T>
__device__ void GpuErt<T>::gpuert_initialize_tree_batch(
GpuDteAlgorithmShared::GpuParams<T> *params) {
__shared__ int s_indexCursor;
if (threadIdx.x == 0) {
if (params->static_info->balanced_sampling &&
params->dataset_info->data_type == type_classification_)
s_indexCursor = 0;
else
s_indexCursor = params->dataset_info->nr_instances;
}
__syncthreads();
int treeOffset = params->dataset_info->nr_instances * blockIdx.x;
if (params->dataset_info->data_type == type_classification_ &&
params->static_info->balanced_sampling) {
// Initialize indices main buffer
int localCursor;
int randVal;
int stateId = (blockIdx.x * blockDim.x + threadIdx.x) %
params->static_info->node_buffer_size;
curandStateMRG32k3a localState = params->random_states[stateId];
for (int i = 0; i < params->dataset_info->nr_target_values; ++i) {
int targetStart = params->target_starts[i];
int targetEnd = (i == params->dataset_info->nr_target_values - 1)
? params->dataset_info->nr_instances - 1
: params->target_starts[i + 1] - 1;
for (int ii = threadIdx.x;
ii < params->dataset_info->nr_instances /
params->dataset_info->nr_target_values;
ii += blockDim.x) {
localCursor = GpuDte<T>::AtomicAdd(&s_indexCursor, 1);
if (targetEnd - targetStart > 0)
randVal =
targetStart + hiprand(&localState) % (targetEnd - targetStart);
else
randVal = targetStart;
params->indices_buffer[0][treeOffset + localCursor] = randVal;
}
}
} else {
// Initialize indices main buffer
for (int i = threadIdx.x; i < params->dataset_info->nr_instances;
i += blockDim.x)
params->indices_buffer[0][treeOffset + i] = i;
}
__syncthreads();
if (threadIdx.x == 0) {
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T> root;
root.parent_id = -2;
root.attribute = -2;
root.split_point = -2;
root.tracking_id = GpuDte<T>::AtomicAdd(¶ms->node_cursors[node_id_], 1);
root.node_index_start = treeOffset;
root.node_index_count = s_indexCursor;
params->node_buffers[params->iteration_info->read_buffer_id][blockIdx.x] =
root;
}
}
template <typename T>
__device__ void GpuErt<T>::gpuert_find_split(
GpuDteAlgorithmShared::GpuParams<T> *params) {
__shared__ T s_dynamic_shared[40];
__shared__ GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T> s_tree_node;
__shared__ GpuDteAlgorithmShared::gpuDTE_TmpNodeValues<T> s_tmp_node;
__shared__ int s_attribute_type;
__shared__ bool s_sensible_split;
curandStateMRG32k3a localState;
localState = params->random_states[(blockIdx.x * blockDim.x + threadIdx.x) %
params->static_info->node_buffer_size];
if (threadIdx.x == 0) {
s_tree_node =
params->node_buffers[params->iteration_info->read_buffer_id]
[blockIdx.x + params->iteration_info->node_offset];
s_tmp_node =
params
->node_tmp_buffer[blockIdx.x + params->iteration_info->node_offset];
s_tmp_node.tmp_score = 0;
s_sensible_split = false;
}
__syncthreads();
bool firstFeature = true;
int k = params->static_info->nr_features;
int max_retries = k - params->dataset_info->nr_attributes < -10
? -10
: k - params->dataset_info->nr_attributes;
while ((k > max_retries) && (k-- > 0 || !s_sensible_split)) {
if (threadIdx.x == 0) {
s_tmp_node.tmp_attribute =
hiprand(&localState) % params->dataset_info->nr_attributes;
s_tmp_node.tmp_split = 0;
s_attribute_type = params->attribute_type[s_tmp_node.tmp_attribute];
s_attribute_type =
s_attribute_type >= max_nominal_ ? 2 : s_attribute_type;
}
for (int i = threadIdx.x;
i < params->dataset_info->nr_target_values * max_nominal_;
i += blockDim.x)
s_dynamic_shared[i] = 0;
__syncthreads();
if (threadIdx.x < 10) {
T dat = get_data_point(
s_tmp_node.tmp_attribute,
params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[(hiprand(&localState) %
s_tree_node.node_index_count) +
s_tree_node.node_index_start],
params->dataset_info->nr_instances, params->dataset);
GpuDte<T>::AtomicAdd(&s_tmp_node.tmp_split, dat);
}
__syncthreads();
if (threadIdx.x == 0) {
s_tmp_node.tmp_split /= 10.0f;
}
__syncthreads();
T response;
switch (params->dataset_info->data_type) {
case type_classification_:
response = eval_numeric_attribute(params, s_tree_node, s_tmp_node,
s_dynamic_shared, s_attribute_type);
break;
case type_regression_:
response = varianceCalculation(params, s_tree_node, s_tmp_node,
s_dynamic_shared);
break;
}
if (threadIdx.x == 0) {
if (s_tmp_node.tmp_score < response || firstFeature) {
// Save splitpoint, attribute and distribution
s_tmp_node.tmp_score = response;
s_tree_node.split_point = s_tmp_node.tmp_split;
s_tree_node.attribute = s_tmp_node.tmp_attribute;
switch (params->dataset_info->data_type) {
case type_classification_:
for (int i = 0;
i < params->dataset_info->nr_target_values * max_nominal_; ++i)
params->probability_tmp_buffer
[blockIdx.x * params->dataset_info->nr_target_values *
max_nominal_ +
i] = s_dynamic_shared[i];
break;
case type_regression_:
params->probability_tmp_buffer
[blockIdx.x * params->dataset_info->nr_target_values *
max_nominal_] = s_dynamic_shared[2];
params->probability_tmp_buffer
[blockIdx.x * params->dataset_info->nr_target_values *
max_nominal_ +
1] = s_dynamic_shared[3];
break;
}
}
if (s_tmp_node.tmp_score > 1e-3) s_sensible_split = true;
firstFeature = false;
}
__syncthreads();
}
params->random_states[(blockIdx.x * blockDim.x + threadIdx.x) %
params->static_info->node_buffer_size] = localState;
// Copy back result
if (threadIdx.x == 0) {
params->node_buffers[params->iteration_info->read_buffer_id]
[blockIdx.x + params->iteration_info->node_offset] =
s_tree_node;
params->node_tmp_buffer[blockIdx.x + params->iteration_info->node_offset] =
s_tmp_node;
// params->random_states[blockIdx.x] = localState;
}
}
template <typename T>
__device__ void GpuErt<T>::gpuert_perform_split(
GpuDteAlgorithmShared::GpuParams<T> *params) {
__shared__ int s_node_counts[40];
gpudte_perform_split(*(params->static_info), *(params->dataset_info),
*(params->iteration_info), params->probability_buffers,
params->probability_tmp_buffer, params->dataset,
params->attribute_type, s_node_counts,
params->indices_buffer, params->node_cursors,
params->node_buffers);
}
template <typename T>
__device__ void GpuErt<T>::gpuert_predict(
GpuDteAlgorithmShared::GpuParams<T> *params) {
if (threadIdx.x + blockIdx.x * blockDim.x >=
params->iteration_info->threads_launched)
return;
int tid = params->iteration_info->tree_offset + threadIdx.x +
blockIdx.x * blockDim.x;
gpudte_predict(
tid, params->dataset_info->nr_instances, params->dataset_info->data_type,
params->dataset_info->nr_target_values, params->node_buffers_classify,
params->dataset, params->probability_tmp_buffer, params->predictions,
params->attribute_type);
}
template <typename T>
__device__ T GpuErt<T>::eval_numeric_attribute(
GpuDteAlgorithmShared::GpuParams<T> *params,
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T> &node,
GpuDteAlgorithmShared::gpuDTE_TmpNodeValues<T> &tmp_node, T *curr_dist,
int att_type) {
int numInds = node.node_index_count;
int nodeIndStart = node.node_index_start;
int weight = 1;
int inst;
T val;
for (int i = threadIdx.x; i < numInds; i += blockDim.x) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i];
val = get_data_point(tmp_node.tmp_attribute, inst,
params->dataset_info->nr_instances, params->dataset);
if (val != -flt_max)
GpuDte<T>::AtomicAdd(&curr_dist[params->dataset_info->nr_target_values *
((val < tmp_node.tmp_split) ? 0 : 1) +
int(params->target_data[inst])],
weight);
else
GpuDte<T>::AtomicAdd(&curr_dist[int(params->target_data[inst])], weight);
}
__syncthreads();
T response = 0;
if (threadIdx.x == 0) {
int count;
for (int i = 0; i < 2; ++i) {
count = 0;
for (int ii = 0; ii < params->dataset_info->nr_target_values; ++ii) {
count += curr_dist[i * params->dataset_info->nr_target_values + ii];
}
if (count == 0) response = -flt_max;
}
if (response != -flt_max) {
T prior = entropy_over_columns(curr_dist, att_type,
params->dataset_info->nr_target_values);
T posterior = entropy_conditioned_on_rows(
curr_dist, att_type, params->dataset_info->nr_target_values);
response = prior - posterior;
}
}
return response;
}
template <typename T>
__device__ T GpuErt<T>::varianceCalculation(
GpuDteAlgorithmShared::GpuParams<T> *params,
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T> &node,
GpuDteAlgorithmShared::gpuDTE_TmpNodeValues<T> &tmp_node, T *curr_dist) {
__shared__ T s_means[2];
int numInds = node.node_index_count;
int nodeIndStart = node.node_index_start;
int attribute = tmp_node.tmp_attribute;
int inst;
T val;
if (threadIdx.x < 2) {
s_means[threadIdx.x] = 0;
curr_dist[threadIdx.x] = 0;
}
__syncthreads();
// Calculate mean values from split
for (int i = threadIdx.x; i < numInds; i += blockDim.x) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i];
val = get_data_point(attribute, inst, params->dataset_info->nr_instances,
params->dataset);
int t = node.node_index_count;
if (val != -flt_max && t > 0) {
GpuDte<T>::AtomicAdd(&curr_dist[(val < tmp_node.tmp_split) ? 0 : 1], 1);
GpuDte<T>::AtomicAdd(&s_means[(val < tmp_node.tmp_split) ? 0 : 1],
params->target_data[inst]);
}
}
__syncthreads();
T gain = 0;
if (threadIdx.x == 0) {
for (int i = 0; i < 2; ++i) {
if (curr_dist[i] == 0) gain = -flt_max;
}
if (gain != -flt_max) {
curr_dist[2] = (curr_dist[0] != 0) ? s_means[0] / T(curr_dist[0]) : 0;
curr_dist[3] = (curr_dist[1] != 0) ? s_means[1] / T(curr_dist[1]) : 0;
T nLeft = curr_dist[0] == 0 ? 1 : curr_dist[0];
T nRight = curr_dist[1] == 0 ? 1 : curr_dist[1];
T diff = ((s_means[0] / nLeft) - (s_means[1] / nRight));
gain = (nLeft * nRight * diff * diff / (nLeft + nRight));
}
}
return gain;
}
template GpuErt<float>::GpuErt();
template GpuErt<double>::GpuErt();
} | 43864d5e313418327c0db3c9fd03d1d44735afd5.cu | #define DLLExport
#define TestExport
#include "../../source_shared/include/global_defines.h"
#include "../../lib_core/include/lib_core.h"
#include "gpuert.h"
namespace lib_ensembles {
template <typename T>
__global__ void host_kernel(GpuErt<T> *gpu_algo,
GpuDteAlgorithmShared::GpuParams<T> params,
GpuDteAlgorithmShared::GpuDteKernelId id) {
gpu_algo->GetConstPointers(¶ms.iteration_info, ¶ms.dataset_info,
¶ms.static_info);
switch (id) {
case GpuDteAlgorithmShared::kSetupKernel:
gpu_algo->gpuert_setup_kernel(¶ms, 123512);
break;
case GpuDteAlgorithmShared::kInitTreeBatch:
gpu_algo->gpuert_initialize_tree_batch(¶ms);
break;
case GpuDteAlgorithmShared::kFindSplit:
gpu_algo->gpuert_find_split(¶ms);
break;
case GpuDteAlgorithmShared::kPerformSplit:
gpu_algo->gpuert_perform_split(¶ms);
break;
case GpuDteAlgorithmShared::kPredict:
gpu_algo->gpuert_predict(¶ms);
break;
default:
break;
}
}
template <typename T>
void GpuErt<T>::CallCudaKernel(int blocks, int block_size,
GpuDteAlgorithmShared::GpuParams<T> ¶ms,
GpuDteAlgorithmShared::GpuDteKernelId id) {
host_kernel<T><<<blocks, block_size>>>(this, params, id);
}
template <typename T>
__device__ void GpuErt<T>::gpuert_setup_kernel(
GpuDteAlgorithmShared::GpuParams<T> *params, unsigned long long seed) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
// Each thread gets same seed, a different sequence number, no offset
if (id < params->iteration_info->threads_launched)
curand_init(seed, id, 0, ¶ms->random_states[id]);
}
template <typename T>
__device__ void GpuErt<T>::gpuert_initialize_tree_batch(
GpuDteAlgorithmShared::GpuParams<T> *params) {
__shared__ int s_indexCursor;
if (threadIdx.x == 0) {
if (params->static_info->balanced_sampling &&
params->dataset_info->data_type == type_classification_)
s_indexCursor = 0;
else
s_indexCursor = params->dataset_info->nr_instances;
}
__syncthreads();
int treeOffset = params->dataset_info->nr_instances * blockIdx.x;
if (params->dataset_info->data_type == type_classification_ &&
params->static_info->balanced_sampling) {
// Initialize indices main buffer
int localCursor;
int randVal;
int stateId = (blockIdx.x * blockDim.x + threadIdx.x) %
params->static_info->node_buffer_size;
curandStateMRG32k3a localState = params->random_states[stateId];
for (int i = 0; i < params->dataset_info->nr_target_values; ++i) {
int targetStart = params->target_starts[i];
int targetEnd = (i == params->dataset_info->nr_target_values - 1)
? params->dataset_info->nr_instances - 1
: params->target_starts[i + 1] - 1;
for (int ii = threadIdx.x;
ii < params->dataset_info->nr_instances /
params->dataset_info->nr_target_values;
ii += blockDim.x) {
localCursor = GpuDte<T>::AtomicAdd(&s_indexCursor, 1);
if (targetEnd - targetStart > 0)
randVal =
targetStart + curand(&localState) % (targetEnd - targetStart);
else
randVal = targetStart;
params->indices_buffer[0][treeOffset + localCursor] = randVal;
}
}
} else {
// Initialize indices main buffer
for (int i = threadIdx.x; i < params->dataset_info->nr_instances;
i += blockDim.x)
params->indices_buffer[0][treeOffset + i] = i;
}
__syncthreads();
if (threadIdx.x == 0) {
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T> root;
root.parent_id = -2;
root.attribute = -2;
root.split_point = -2;
root.tracking_id = GpuDte<T>::AtomicAdd(¶ms->node_cursors[node_id_], 1);
root.node_index_start = treeOffset;
root.node_index_count = s_indexCursor;
params->node_buffers[params->iteration_info->read_buffer_id][blockIdx.x] =
root;
}
}
template <typename T>
__device__ void GpuErt<T>::gpuert_find_split(
GpuDteAlgorithmShared::GpuParams<T> *params) {
__shared__ T s_dynamic_shared[40];
__shared__ GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T> s_tree_node;
__shared__ GpuDteAlgorithmShared::gpuDTE_TmpNodeValues<T> s_tmp_node;
__shared__ int s_attribute_type;
__shared__ bool s_sensible_split;
curandStateMRG32k3a localState;
localState = params->random_states[(blockIdx.x * blockDim.x + threadIdx.x) %
params->static_info->node_buffer_size];
if (threadIdx.x == 0) {
s_tree_node =
params->node_buffers[params->iteration_info->read_buffer_id]
[blockIdx.x + params->iteration_info->node_offset];
s_tmp_node =
params
->node_tmp_buffer[blockIdx.x + params->iteration_info->node_offset];
s_tmp_node.tmp_score = 0;
s_sensible_split = false;
}
__syncthreads();
bool firstFeature = true;
int k = params->static_info->nr_features;
int max_retries = k - params->dataset_info->nr_attributes < -10
? -10
: k - params->dataset_info->nr_attributes;
while ((k > max_retries) && (k-- > 0 || !s_sensible_split)) {
if (threadIdx.x == 0) {
s_tmp_node.tmp_attribute =
curand(&localState) % params->dataset_info->nr_attributes;
s_tmp_node.tmp_split = 0;
s_attribute_type = params->attribute_type[s_tmp_node.tmp_attribute];
s_attribute_type =
s_attribute_type >= max_nominal_ ? 2 : s_attribute_type;
}
for (int i = threadIdx.x;
i < params->dataset_info->nr_target_values * max_nominal_;
i += blockDim.x)
s_dynamic_shared[i] = 0;
__syncthreads();
if (threadIdx.x < 10) {
T dat = get_data_point(
s_tmp_node.tmp_attribute,
params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[(curand(&localState) %
s_tree_node.node_index_count) +
s_tree_node.node_index_start],
params->dataset_info->nr_instances, params->dataset);
GpuDte<T>::AtomicAdd(&s_tmp_node.tmp_split, dat);
}
__syncthreads();
if (threadIdx.x == 0) {
s_tmp_node.tmp_split /= 10.0f;
}
__syncthreads();
T response;
switch (params->dataset_info->data_type) {
case type_classification_:
response = eval_numeric_attribute(params, s_tree_node, s_tmp_node,
s_dynamic_shared, s_attribute_type);
break;
case type_regression_:
response = varianceCalculation(params, s_tree_node, s_tmp_node,
s_dynamic_shared);
break;
}
if (threadIdx.x == 0) {
if (s_tmp_node.tmp_score < response || firstFeature) {
// Save splitpoint, attribute and distribution
s_tmp_node.tmp_score = response;
s_tree_node.split_point = s_tmp_node.tmp_split;
s_tree_node.attribute = s_tmp_node.tmp_attribute;
switch (params->dataset_info->data_type) {
case type_classification_:
for (int i = 0;
i < params->dataset_info->nr_target_values * max_nominal_; ++i)
params->probability_tmp_buffer
[blockIdx.x * params->dataset_info->nr_target_values *
max_nominal_ +
i] = s_dynamic_shared[i];
break;
case type_regression_:
params->probability_tmp_buffer
[blockIdx.x * params->dataset_info->nr_target_values *
max_nominal_] = s_dynamic_shared[2];
params->probability_tmp_buffer
[blockIdx.x * params->dataset_info->nr_target_values *
max_nominal_ +
1] = s_dynamic_shared[3];
break;
}
}
if (s_tmp_node.tmp_score > 1e-3) s_sensible_split = true;
firstFeature = false;
}
__syncthreads();
}
params->random_states[(blockIdx.x * blockDim.x + threadIdx.x) %
params->static_info->node_buffer_size] = localState;
// Copy back result
if (threadIdx.x == 0) {
params->node_buffers[params->iteration_info->read_buffer_id]
[blockIdx.x + params->iteration_info->node_offset] =
s_tree_node;
params->node_tmp_buffer[blockIdx.x + params->iteration_info->node_offset] =
s_tmp_node;
// params->random_states[blockIdx.x] = localState;
}
}
template <typename T>
__device__ void GpuErt<T>::gpuert_perform_split(
GpuDteAlgorithmShared::GpuParams<T> *params) {
__shared__ int s_node_counts[40];
gpudte_perform_split(*(params->static_info), *(params->dataset_info),
*(params->iteration_info), params->probability_buffers,
params->probability_tmp_buffer, params->dataset,
params->attribute_type, s_node_counts,
params->indices_buffer, params->node_cursors,
params->node_buffers);
}
template <typename T>
__device__ void GpuErt<T>::gpuert_predict(
GpuDteAlgorithmShared::GpuParams<T> *params) {
if (threadIdx.x + blockIdx.x * blockDim.x >=
params->iteration_info->threads_launched)
return;
int tid = params->iteration_info->tree_offset + threadIdx.x +
blockIdx.x * blockDim.x;
gpudte_predict(
tid, params->dataset_info->nr_instances, params->dataset_info->data_type,
params->dataset_info->nr_target_values, params->node_buffers_classify,
params->dataset, params->probability_tmp_buffer, params->predictions,
params->attribute_type);
}
template <typename T>
__device__ T GpuErt<T>::eval_numeric_attribute(
GpuDteAlgorithmShared::GpuParams<T> *params,
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T> &node,
GpuDteAlgorithmShared::gpuDTE_TmpNodeValues<T> &tmp_node, T *curr_dist,
int att_type) {
int numInds = node.node_index_count;
int nodeIndStart = node.node_index_start;
int weight = 1;
int inst;
T val;
for (int i = threadIdx.x; i < numInds; i += blockDim.x) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i];
val = get_data_point(tmp_node.tmp_attribute, inst,
params->dataset_info->nr_instances, params->dataset);
if (val != -flt_max)
GpuDte<T>::AtomicAdd(&curr_dist[params->dataset_info->nr_target_values *
((val < tmp_node.tmp_split) ? 0 : 1) +
int(params->target_data[inst])],
weight);
else
GpuDte<T>::AtomicAdd(&curr_dist[int(params->target_data[inst])], weight);
}
__syncthreads();
T response = 0;
if (threadIdx.x == 0) {
int count;
for (int i = 0; i < 2; ++i) {
count = 0;
for (int ii = 0; ii < params->dataset_info->nr_target_values; ++ii) {
count += curr_dist[i * params->dataset_info->nr_target_values + ii];
}
if (count == 0) response = -flt_max;
}
if (response != -flt_max) {
T prior = entropy_over_columns(curr_dist, att_type,
params->dataset_info->nr_target_values);
T posterior = entropy_conditioned_on_rows(
curr_dist, att_type, params->dataset_info->nr_target_values);
response = prior - posterior;
}
}
return response;
}
template <typename T>
__device__ T GpuErt<T>::varianceCalculation(
GpuDteAlgorithmShared::GpuParams<T> *params,
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T> &node,
GpuDteAlgorithmShared::gpuDTE_TmpNodeValues<T> &tmp_node, T *curr_dist) {
__shared__ T s_means[2];
int numInds = node.node_index_count;
int nodeIndStart = node.node_index_start;
int attribute = tmp_node.tmp_attribute;
int inst;
T val;
if (threadIdx.x < 2) {
s_means[threadIdx.x] = 0;
curr_dist[threadIdx.x] = 0;
}
__syncthreads();
// Calculate mean values from split
for (int i = threadIdx.x; i < numInds; i += blockDim.x) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i];
val = get_data_point(attribute, inst, params->dataset_info->nr_instances,
params->dataset);
int t = node.node_index_count;
if (val != -flt_max && t > 0) {
GpuDte<T>::AtomicAdd(&curr_dist[(val < tmp_node.tmp_split) ? 0 : 1], 1);
GpuDte<T>::AtomicAdd(&s_means[(val < tmp_node.tmp_split) ? 0 : 1],
params->target_data[inst]);
}
}
__syncthreads();
T gain = 0;
if (threadIdx.x == 0) {
for (int i = 0; i < 2; ++i) {
if (curr_dist[i] == 0) gain = -flt_max;
}
if (gain != -flt_max) {
curr_dist[2] = (curr_dist[0] != 0) ? s_means[0] / T(curr_dist[0]) : 0;
curr_dist[3] = (curr_dist[1] != 0) ? s_means[1] / T(curr_dist[1]) : 0;
T nLeft = curr_dist[0] == 0 ? 1 : curr_dist[0];
T nRight = curr_dist[1] == 0 ? 1 : curr_dist[1];
T diff = ((s_means[0] / nLeft) - (s_means[1] / nRight));
gain = (nLeft * nRight * diff * diff / (nLeft + nRight));
}
}
return gain;
}
template GpuErt<float>::GpuErt();
template GpuErt<double>::GpuErt();
} |
a6b2f2b89368c5a2387f4716cbd6dd610cd196f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
**********************************************
* CS314 Principles of Programming Languages *
* Fall 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
/** YOUR CODE GOES BELOW **/
int num_threads = blockDim.x * gridDim.x;;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < numEdges; i+=num_threads)
{
if(matches[src[i]] == -1 && matches[dst[i]] == -1)
keepEdges[i] = 1;
else
keepEdges[i] = 0;
}
/** YOUR CODE GOES ABOVE **/
}
| a6b2f2b89368c5a2387f4716cbd6dd610cd196f0.cu | /*
**********************************************
* CS314 Principles of Programming Languages *
* Fall 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
/** YOUR CODE GOES BELOW **/
int num_threads = blockDim.x * gridDim.x;;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < numEdges; i+=num_threads)
{
if(matches[src[i]] == -1 && matches[dst[i]] == -1)
keepEdges[i] = 1;
else
keepEdges[i] = 0;
}
/** YOUR CODE GOES ABOVE **/
}
|
6b1913318913268ffa911d71e4603ee5da1acb02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6) {
for (int i=0; i < var_1; ++i) {
if (comp > (-1.7096E-37f * var_3)) {
comp += (-1.7265E-41f / -1.5223E-36f);
for (int i=0; i < var_2; ++i) {
comp += (var_4 * (-1.7581E-35f - (var_5 * (var_6 + -1.1925E1f))));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7);
hipDeviceSynchronize();
return 0;
}
| 6b1913318913268ffa911d71e4603ee5da1acb02.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6) {
for (int i=0; i < var_1; ++i) {
if (comp > (-1.7096E-37f * var_3)) {
comp += (-1.7265E-41f / -1.5223E-36f);
for (int i=0; i < var_2; ++i) {
comp += (var_4 * (-1.7581E-35f - (var_5 * (var_6 + -1.1925E1f))));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7);
cudaDeviceSynchronize();
return 0;
}
|
b2d06b74f8edef3d7aea5c540169f288affbd10b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
// Thread block size
#define BLOCK_SIZE 16
#define ARR_DIM (1024*1024)
#define WIDTH 1024
#define HEIGHT 1024
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
float value)
{
A.elements[row * A.stride + col] = value;
// printf("VAL=%f pos=%d\n", value, row * A.stride + col);
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(Matrix, Matrix, Matrix);
__global__ void cudaRandomize(float *arr){
float val;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
arr[row*WIDTH + col] = 1.654981;
}
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipMalloc(&d_A.elements, size);
hipMemcpy(d_A.elements, A.elements, size,
hipMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
hipMalloc(&d_B.elements, size);
hipMemcpy(d_B.elements, B.elements, size,
hipMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
hipMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
// Read C from device memory
hipMemcpy(C.elements, d_C.elements, size,
hipMemcpyDeviceToHost);
for(int i = 0; i < ARR_DIM; i++){
printf("VAL=%f POS=%d\n", C.elements[i], i);
}
// Free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
int main(){
Matrix A,B,C;
A.width = WIDTH;
A.stride = WIDTH;
A.height = HEIGHT;
B.width=WIDTH;
B.stride = WIDTH;
B.height = HEIGHT;
C.width = WIDTH;
C.stride = WIDTH;
C.height = HEIGHT;
A.elements = (float *)malloc(ARR_DIM*sizeof(float));
B.elements = (float *)malloc(ARR_DIM*sizeof(float));
float *d_A, *d_B;
size_t size = A.width * A.height * sizeof(float);
hipMalloc((void**)&d_A, size);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(A.width / dimBlock.x, A.height / dimBlock.y);
hipLaunchKernelGGL(( cudaRandomize), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A);
hipMemcpy(A.elements, d_A, size, hipMemcpyDeviceToHost);
hipFree(d_A);
size = B.width * B.height * sizeof(float);
hipMalloc((void**)&d_B, size);
dim3 dimBlock2(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2(B.width / dimBlock.x, B.height / dimBlock.y);
hipLaunchKernelGGL(( cudaRandomize), dim3(dimGrid2), dim3(dimBlock2), 0, 0, d_B);
hipMemcpy(B.elements, d_B, size, hipMemcpyDeviceToHost);
hipFree(d_B);
C.elements = (float *)malloc(ARR_DIM*sizeof(float));
MatMul(A,B,C);
free(A.elements);
free(B.elements);
free(C.elements);
return 0;
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
}
| b2d06b74f8edef3d7aea5c540169f288affbd10b.cu | #include <stdio.h>
#include <math.h>
// Thread block size
#define BLOCK_SIZE 16
#define ARR_DIM (1024*1024)
#define WIDTH 1024
#define HEIGHT 1024
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
float value)
{
A.elements[row * A.stride + col] = value;
// printf("VAL=%f pos=%d\n", value, row * A.stride + col);
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(Matrix, Matrix, Matrix);
__global__ void cudaRandomize(float *arr){
float val;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
arr[row*WIDTH + col] = 1.654981;
}
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size,
cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size,
cudaMemcpyDeviceToHost);
for(int i = 0; i < ARR_DIM; i++){
printf("VAL=%f POS=%d\n", C.elements[i], i);
}
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
int main(){
Matrix A,B,C;
A.width = WIDTH;
A.stride = WIDTH;
A.height = HEIGHT;
B.width=WIDTH;
B.stride = WIDTH;
B.height = HEIGHT;
C.width = WIDTH;
C.stride = WIDTH;
C.height = HEIGHT;
A.elements = (float *)malloc(ARR_DIM*sizeof(float));
B.elements = (float *)malloc(ARR_DIM*sizeof(float));
float *d_A, *d_B;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc((void**)&d_A, size);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(A.width / dimBlock.x, A.height / dimBlock.y);
cudaRandomize<<<dimGrid, dimBlock>>>(d_A);
cudaMemcpy(A.elements, d_A, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
size = B.width * B.height * sizeof(float);
cudaMalloc((void**)&d_B, size);
dim3 dimBlock2(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2(B.width / dimBlock.x, B.height / dimBlock.y);
cudaRandomize<<<dimGrid2, dimBlock2>>>(d_B);
cudaMemcpy(B.elements, d_B, size, cudaMemcpyDeviceToHost);
cudaFree(d_B);
C.elements = (float *)malloc(ARR_DIM*sizeof(float));
MatMul(A,B,C);
free(A.elements);
free(B.elements);
free(C.elements);
return 0;
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
}
|
e3d71c9424cb775b2738d8f09be860b6bf567e31.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_initialise_chunk_kernel_zero [1][1];
static int dims_initialise_chunk_kernel_zero_h [1][1] = {0};
//user function
__device__
void initialise_chunk_kernel_zero_gpu(ACC<double> &var) {
var(0,0) = 0.0;
}
__global__ void ops_initialise_chunk_kernel_zero(
double* __restrict arg0,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_initialise_chunk_kernel_zero[0][0];
if (idx_x < size0 && idx_y < size1) {
ACC<double> argp0(dims_initialise_chunk_kernel_zero[0][0], arg0);
initialise_chunk_kernel_zero_gpu(argp0);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_initialise_chunk_kernel_zero(char const *name, ops_block block, int dim, int* range,
ops_arg arg0) {
#else
void ops_par_loop_initialise_chunk_kernel_zero_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[1] = { arg0};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,1,range,5)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(5,"initialise_chunk_kernel_zero");
OPS_kernels[5].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[2];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 1,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
if (xdim0 != dims_initialise_chunk_kernel_zero_h[0][0]) {
dims_initialise_chunk_kernel_zero_h[0][0] = xdim0;
cutilSafeCall(hipMemcpyToSymbol( dims_initialise_chunk_kernel_zero, dims_initialise_chunk_kernel_zero_h, sizeof(dims_initialise_chunk_kernel_zero)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
char *p_a[1];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 1);
ops_halo_exchanges(args,1,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[5].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
hipLaunchKernelGGL(( ops_initialise_chunk_kernel_zero), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0],x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[5].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 1);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[5].mpi_time += t2-t1;
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
#ifdef OPS_LAZY
void ops_par_loop_initialise_chunk_kernel_zero(char const *name, ops_block block, int dim, int* range,
ops_arg arg0) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 5;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 5;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 1;
desc->args = (ops_arg*)malloc(1*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->function = ops_par_loop_initialise_chunk_kernel_zero_execute;
if (OPS_diags > 1) {
ops_timing_realloc(5,"initialise_chunk_kernel_zero");
}
ops_enqueue_kernel(desc);
}
#endif
| e3d71c9424cb775b2738d8f09be860b6bf567e31.cu | //
// auto-generated by ops.py
//
__constant__ int dims_initialise_chunk_kernel_zero [1][1];
static int dims_initialise_chunk_kernel_zero_h [1][1] = {0};
//user function
__device__
void initialise_chunk_kernel_zero_gpu(ACC<double> &var) {
var(0,0) = 0.0;
}
__global__ void ops_initialise_chunk_kernel_zero(
double* __restrict arg0,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_initialise_chunk_kernel_zero[0][0];
if (idx_x < size0 && idx_y < size1) {
ACC<double> argp0(dims_initialise_chunk_kernel_zero[0][0], arg0);
initialise_chunk_kernel_zero_gpu(argp0);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_initialise_chunk_kernel_zero(char const *name, ops_block block, int dim, int* range,
ops_arg arg0) {
#else
void ops_par_loop_initialise_chunk_kernel_zero_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[1] = { arg0};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,1,range,5)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(5,"initialise_chunk_kernel_zero");
OPS_kernels[5].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[2];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 1,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
if (xdim0 != dims_initialise_chunk_kernel_zero_h[0][0]) {
dims_initialise_chunk_kernel_zero_h[0][0] = xdim0;
cutilSafeCall(cudaMemcpyToSymbol( dims_initialise_chunk_kernel_zero, dims_initialise_chunk_kernel_zero_h, sizeof(dims_initialise_chunk_kernel_zero)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
char *p_a[1];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 1);
ops_halo_exchanges(args,1,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[5].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
ops_initialise_chunk_kernel_zero<<<grid, tblock >>> ( (double *)p_a[0],x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[5].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 1);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[5].mpi_time += t2-t1;
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
#ifdef OPS_LAZY
void ops_par_loop_initialise_chunk_kernel_zero(char const *name, ops_block block, int dim, int* range,
ops_arg arg0) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 5;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 5;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 1;
desc->args = (ops_arg*)malloc(1*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->function = ops_par_loop_initialise_chunk_kernel_zero_execute;
if (OPS_diags > 1) {
ops_timing_realloc(5,"initialise_chunk_kernel_zero");
}
ops_enqueue_kernel(desc);
}
#endif
|
9ecffb443615d8ca4f545ddf28092a90a2b2c0b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "avro_gpu.h"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
using cudf::detail::device_span;
namespace cudf {
namespace io {
namespace avro {
namespace gpu {
constexpr int num_warps = 16;
constexpr int max_shared_schema_len = 1000;
/*
* Avro varint encoding - see
* https://avro.apache.org/docs/1.2.0/spec.html#binary_encoding
*/
static inline int64_t __device__ avro_decode_zigzag_varint(const uint8_t *&cur, const uint8_t *end)
{
uint64_t u = 0;
if (cur < end) {
u = *cur++;
if (u > 0x7f) {
uint64_t scale = 128;
u &= 0x7f;
while (cur < end) {
uint32_t c = *cur++;
u += (c & 0x7f) * scale;
scale <<= 7;
if (c < 0x80) break;
}
}
}
return (int64_t)((u >> 1u) ^ -(int64_t)(u & 1));
}
/**
* @brief Decode a row of values given an avro schema
*
* @param[in] schema Schema description
* @param[in] schema_g Global schema in device mem
* @param[in] schema_len Number of schema entries
* @param[in] row Current row
* @param[in] max_rows Total number of rows
* @param[in] cur Current input data pointer
* @param[in] end End of input data
* @param[in] global_Dictionary Global dictionary entries
*
* @return data pointer at the end of the row (start of next row)
*/
static const uint8_t *__device__ avro_decode_row(const schemadesc_s *schema,
schemadesc_s *schema_g,
uint32_t schema_len,
size_t row,
size_t max_rows,
const uint8_t *cur,
const uint8_t *end,
device_span<nvstrdesc_s> global_dictionary)
{
uint32_t array_start = 0, array_repeat_count = 0;
int array_children = 0;
for (uint32_t i = 0; i < schema_len;) {
uint32_t kind = schema[i].kind;
int skip = 0;
if (kind == type_union) {
int skip_after;
if (cur >= end) break;
skip = (*cur++) >> 1; // NOTE: Assumes 1-byte union member
skip_after = schema[i].count - skip - 1;
++i;
while (skip > 0 && i < schema_len) {
if (schema[i].kind >= type_record) { skip += schema[i].count; }
++i;
--skip;
}
if (i >= schema_len || skip_after < 0) break;
kind = schema[i].kind;
skip = skip_after;
}
void *dataptr = schema[i].dataptr;
switch (kind) {
case type_null:
if (dataptr != nullptr && row < max_rows) {
atomicAnd(static_cast<uint32_t *>(dataptr) + (row >> 5), ~(1 << (row & 0x1f)));
atomicAdd(&schema_g[i].count, 1);
}
break;
case type_int:
case type_long:
case type_bytes:
case type_string:
case type_enum: {
int64_t v = avro_decode_zigzag_varint(cur, end);
if (kind == type_int) {
if (dataptr != nullptr && row < max_rows) {
static_cast<int32_t *>(dataptr)[row] = static_cast<int32_t>(v);
}
} else if (kind == type_long) {
if (dataptr != nullptr && row < max_rows) { static_cast<int64_t *>(dataptr)[row] = v; }
} else { // string or enum
size_t count = 0;
const char *ptr = 0;
if (kind == type_enum) { // dictionary
size_t idx = schema[i].count + v;
if (idx < global_dictionary.size()) {
ptr = global_dictionary[idx].ptr;
count = global_dictionary[idx].count;
}
} else if (v >= 0 && cur + v <= end) { // string
ptr = reinterpret_cast<const char *>(cur);
count = (size_t)v;
cur += count;
}
if (dataptr != nullptr && row < max_rows) {
static_cast<nvstrdesc_s *>(dataptr)[row].ptr = ptr;
static_cast<nvstrdesc_s *>(dataptr)[row].count = count;
}
}
} break;
case type_float:
if (dataptr != nullptr && row < max_rows) {
uint32_t v;
if (cur + 3 < end) {
v = unaligned_load32(cur);
cur += 4;
} else {
v = 0;
}
static_cast<uint32_t *>(dataptr)[row] = v;
} else {
cur += 4;
}
break;
case type_double:
if (dataptr != nullptr && row < max_rows) {
uint64_t v;
if (cur + 7 < end) {
v = unaligned_load64(cur);
cur += 8;
} else {
v = 0;
}
static_cast<uint64_t *>(dataptr)[row] = v;
} else {
cur += 8;
}
break;
case type_boolean:
if (dataptr != nullptr && row < max_rows) {
uint8_t v = (cur < end) ? *cur : 0;
static_cast<uint8_t *>(dataptr)[row] = (v) ? 1 : 0;
}
cur++;
break;
case type_array: {
int32_t array_block_count = avro_decode_zigzag_varint(cur, end);
if (array_block_count < 0) {
avro_decode_zigzag_varint(cur, end); // block size in bytes, ignored
array_block_count = -array_block_count;
}
array_start = i;
array_repeat_count = array_block_count;
array_children = 1;
if (array_repeat_count == 0) {
skip += schema[i].count; // Should always be 1
}
} break;
}
if (array_repeat_count != 0) {
array_children--;
if (schema[i].kind >= type_record) { array_children += schema[i].count; }
}
i++;
while (skip > 0 && i < schema_len) {
if (schema[i].kind >= type_record) { skip += schema[i].count; }
++i;
--skip;
}
// If within an array, check if we reached the last item
if (array_repeat_count != 0 && array_children <= 0 && cur < end) {
if (!--array_repeat_count) {
i = array_start; // Restart at the array parent
} else {
i = array_start + 1; // Restart after the array parent
array_children = schema[array_start].count;
}
}
}
return cur;
}
/**
* @brief Decode column data
*
* @param[in] blocks Data block descriptions
* @param[in] schema Schema description
* @param[in] global_Dictionary Global dictionary entries
* @param[in] avro_data Raw block data
* @param[in] num_blocks Number of blocks
* @param[in] schema_len Number of entries in schema
* @param[in] min_row_size Minimum size in bytes of a row
* @param[in] max_rows Maximum number of rows to load
* @param[in] first_row Crop all rows below first_row
*/
// blockDim {32,num_warps,1}
extern "C" __global__ void __launch_bounds__(num_warps * 32, 2)
gpuDecodeAvroColumnData(block_desc_s *blocks,
schemadesc_s *schema_g,
device_span<nvstrdesc_s> global_dictionary,
const uint8_t *avro_data,
uint32_t num_blocks,
uint32_t schema_len,
uint32_t min_row_size,
size_t max_rows,
size_t first_row)
{
__shared__ __align__(8) schemadesc_s g_shared_schema[max_shared_schema_len];
__shared__ __align__(8) block_desc_s blk_g[num_warps];
schemadesc_s *schema;
block_desc_s *const blk = &blk_g[threadIdx.y];
uint32_t block_id = blockIdx.x * num_warps + threadIdx.y;
size_t cur_row;
uint32_t rows_remaining;
const uint8_t *cur, *end;
// Fetch schema into shared mem if possible
if (schema_len <= max_shared_schema_len) {
for (int i = threadIdx.y * 32 + threadIdx.x; i < schema_len; i += num_warps * 32) {
g_shared_schema[i] = schema_g[i];
}
__syncthreads();
schema = g_shared_schema;
} else {
schema = schema_g;
}
if (block_id < num_blocks and threadIdx.x == 0) { *blk = blocks[block_id]; }
__syncthreads();
if (block_id >= num_blocks) { return; }
cur_row = blk->first_row;
rows_remaining = blk->num_rows;
cur = avro_data + blk->offset;
end = cur + blk->size;
while (rows_remaining > 0 && cur < end) {
uint32_t nrows;
const uint8_t *start = cur;
if (cur_row > first_row + max_rows) break;
if (cur + min_row_size * rows_remaining == end) {
nrows = min(rows_remaining, 32);
cur += threadIdx.x * min_row_size;
} else {
nrows = 1;
}
if (threadIdx.x < nrows) {
cur = avro_decode_row(schema,
schema_g,
schema_len,
cur_row - first_row + threadIdx.x,
max_rows,
cur,
end,
global_dictionary);
}
if (nrows <= 1) {
cur = start + shuffle(static_cast<uint32_t>(cur - start));
} else {
cur = start + nrows * min_row_size;
}
__syncwarp();
cur_row += nrows;
rows_remaining -= nrows;
}
}
/**
* @brief Launches kernel for decoding column data
*
* @param[in] blocks Data block descriptions
* @param[in] schema Schema description
* @param[in] global_dictionary Global dictionary entries
* @param[in] avro_data Raw block data
* @param[in] num_blocks Number of blocks
* @param[in] schema_len Number of entries in schema
* @param[in] max_rows Maximum number of rows to load
* @param[in] first_row Crop all rows below first_row
* @param[in] min_row_size Minimum size in bytes of a row
* @param[in] stream CUDA stream to use, default 0
*/
void DecodeAvroColumnData(block_desc_s *blocks,
schemadesc_s *schema,
device_span<nvstrdesc_s> global_dictionary,
const uint8_t *avro_data,
uint32_t num_blocks,
uint32_t schema_len,
size_t max_rows,
size_t first_row,
uint32_t min_row_size,
rmm::cuda_stream_view stream)
{
// num_warps warps per threadblock
dim3 const dim_block(32, num_warps);
// 1 warp per datablock, num_warps datablocks per threadblock
dim3 const dim_grid((num_blocks + num_warps - 1) / num_warps, 1);
hipLaunchKernelGGL(( gpuDecodeAvroColumnData), dim3(dim_grid), dim3(dim_block), 0, stream.value(), blocks,
schema,
global_dictionary,
avro_data,
num_blocks,
schema_len,
min_row_size,
max_rows,
first_row);
}
} // namespace gpu
} // namespace avro
} // namespace io
} // namespace cudf
| 9ecffb443615d8ca4f545ddf28092a90a2b2c0b8.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "avro_gpu.h"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
using cudf::detail::device_span;
namespace cudf {
namespace io {
namespace avro {
namespace gpu {
constexpr int num_warps = 16;
constexpr int max_shared_schema_len = 1000;
/*
* Avro varint encoding - see
* https://avro.apache.org/docs/1.2.0/spec.html#binary_encoding
*/
static inline int64_t __device__ avro_decode_zigzag_varint(const uint8_t *&cur, const uint8_t *end)
{
uint64_t u = 0;
if (cur < end) {
u = *cur++;
if (u > 0x7f) {
uint64_t scale = 128;
u &= 0x7f;
while (cur < end) {
uint32_t c = *cur++;
u += (c & 0x7f) * scale;
scale <<= 7;
if (c < 0x80) break;
}
}
}
return (int64_t)((u >> 1u) ^ -(int64_t)(u & 1));
}
/**
* @brief Decode a row of values given an avro schema
*
* @param[in] schema Schema description
* @param[in] schema_g Global schema in device mem
* @param[in] schema_len Number of schema entries
* @param[in] row Current row
* @param[in] max_rows Total number of rows
* @param[in] cur Current input data pointer
* @param[in] end End of input data
* @param[in] global_Dictionary Global dictionary entries
*
* @return data pointer at the end of the row (start of next row)
*/
static const uint8_t *__device__ avro_decode_row(const schemadesc_s *schema,
schemadesc_s *schema_g,
uint32_t schema_len,
size_t row,
size_t max_rows,
const uint8_t *cur,
const uint8_t *end,
device_span<nvstrdesc_s> global_dictionary)
{
uint32_t array_start = 0, array_repeat_count = 0;
int array_children = 0;
for (uint32_t i = 0; i < schema_len;) {
uint32_t kind = schema[i].kind;
int skip = 0;
if (kind == type_union) {
int skip_after;
if (cur >= end) break;
skip = (*cur++) >> 1; // NOTE: Assumes 1-byte union member
skip_after = schema[i].count - skip - 1;
++i;
while (skip > 0 && i < schema_len) {
if (schema[i].kind >= type_record) { skip += schema[i].count; }
++i;
--skip;
}
if (i >= schema_len || skip_after < 0) break;
kind = schema[i].kind;
skip = skip_after;
}
void *dataptr = schema[i].dataptr;
switch (kind) {
case type_null:
if (dataptr != nullptr && row < max_rows) {
atomicAnd(static_cast<uint32_t *>(dataptr) + (row >> 5), ~(1 << (row & 0x1f)));
atomicAdd(&schema_g[i].count, 1);
}
break;
case type_int:
case type_long:
case type_bytes:
case type_string:
case type_enum: {
int64_t v = avro_decode_zigzag_varint(cur, end);
if (kind == type_int) {
if (dataptr != nullptr && row < max_rows) {
static_cast<int32_t *>(dataptr)[row] = static_cast<int32_t>(v);
}
} else if (kind == type_long) {
if (dataptr != nullptr && row < max_rows) { static_cast<int64_t *>(dataptr)[row] = v; }
} else { // string or enum
size_t count = 0;
const char *ptr = 0;
if (kind == type_enum) { // dictionary
size_t idx = schema[i].count + v;
if (idx < global_dictionary.size()) {
ptr = global_dictionary[idx].ptr;
count = global_dictionary[idx].count;
}
} else if (v >= 0 && cur + v <= end) { // string
ptr = reinterpret_cast<const char *>(cur);
count = (size_t)v;
cur += count;
}
if (dataptr != nullptr && row < max_rows) {
static_cast<nvstrdesc_s *>(dataptr)[row].ptr = ptr;
static_cast<nvstrdesc_s *>(dataptr)[row].count = count;
}
}
} break;
case type_float:
if (dataptr != nullptr && row < max_rows) {
uint32_t v;
if (cur + 3 < end) {
v = unaligned_load32(cur);
cur += 4;
} else {
v = 0;
}
static_cast<uint32_t *>(dataptr)[row] = v;
} else {
cur += 4;
}
break;
case type_double:
if (dataptr != nullptr && row < max_rows) {
uint64_t v;
if (cur + 7 < end) {
v = unaligned_load64(cur);
cur += 8;
} else {
v = 0;
}
static_cast<uint64_t *>(dataptr)[row] = v;
} else {
cur += 8;
}
break;
case type_boolean:
if (dataptr != nullptr && row < max_rows) {
uint8_t v = (cur < end) ? *cur : 0;
static_cast<uint8_t *>(dataptr)[row] = (v) ? 1 : 0;
}
cur++;
break;
case type_array: {
int32_t array_block_count = avro_decode_zigzag_varint(cur, end);
if (array_block_count < 0) {
avro_decode_zigzag_varint(cur, end); // block size in bytes, ignored
array_block_count = -array_block_count;
}
array_start = i;
array_repeat_count = array_block_count;
array_children = 1;
if (array_repeat_count == 0) {
skip += schema[i].count; // Should always be 1
}
} break;
}
if (array_repeat_count != 0) {
array_children--;
if (schema[i].kind >= type_record) { array_children += schema[i].count; }
}
i++;
while (skip > 0 && i < schema_len) {
if (schema[i].kind >= type_record) { skip += schema[i].count; }
++i;
--skip;
}
// If within an array, check if we reached the last item
if (array_repeat_count != 0 && array_children <= 0 && cur < end) {
if (!--array_repeat_count) {
i = array_start; // Restart at the array parent
} else {
i = array_start + 1; // Restart after the array parent
array_children = schema[array_start].count;
}
}
}
return cur;
}
/**
* @brief Decode column data
*
* @param[in] blocks Data block descriptions
* @param[in] schema Schema description
* @param[in] global_Dictionary Global dictionary entries
* @param[in] avro_data Raw block data
* @param[in] num_blocks Number of blocks
* @param[in] schema_len Number of entries in schema
* @param[in] min_row_size Minimum size in bytes of a row
* @param[in] max_rows Maximum number of rows to load
* @param[in] first_row Crop all rows below first_row
*/
// blockDim {32,num_warps,1}
extern "C" __global__ void __launch_bounds__(num_warps * 32, 2)
gpuDecodeAvroColumnData(block_desc_s *blocks,
schemadesc_s *schema_g,
device_span<nvstrdesc_s> global_dictionary,
const uint8_t *avro_data,
uint32_t num_blocks,
uint32_t schema_len,
uint32_t min_row_size,
size_t max_rows,
size_t first_row)
{
__shared__ __align__(8) schemadesc_s g_shared_schema[max_shared_schema_len];
__shared__ __align__(8) block_desc_s blk_g[num_warps];
schemadesc_s *schema;
block_desc_s *const blk = &blk_g[threadIdx.y];
uint32_t block_id = blockIdx.x * num_warps + threadIdx.y;
size_t cur_row;
uint32_t rows_remaining;
const uint8_t *cur, *end;
// Fetch schema into shared mem if possible
if (schema_len <= max_shared_schema_len) {
for (int i = threadIdx.y * 32 + threadIdx.x; i < schema_len; i += num_warps * 32) {
g_shared_schema[i] = schema_g[i];
}
__syncthreads();
schema = g_shared_schema;
} else {
schema = schema_g;
}
if (block_id < num_blocks and threadIdx.x == 0) { *blk = blocks[block_id]; }
__syncthreads();
if (block_id >= num_blocks) { return; }
cur_row = blk->first_row;
rows_remaining = blk->num_rows;
cur = avro_data + blk->offset;
end = cur + blk->size;
while (rows_remaining > 0 && cur < end) {
uint32_t nrows;
const uint8_t *start = cur;
if (cur_row > first_row + max_rows) break;
if (cur + min_row_size * rows_remaining == end) {
nrows = min(rows_remaining, 32);
cur += threadIdx.x * min_row_size;
} else {
nrows = 1;
}
if (threadIdx.x < nrows) {
cur = avro_decode_row(schema,
schema_g,
schema_len,
cur_row - first_row + threadIdx.x,
max_rows,
cur,
end,
global_dictionary);
}
if (nrows <= 1) {
cur = start + shuffle(static_cast<uint32_t>(cur - start));
} else {
cur = start + nrows * min_row_size;
}
__syncwarp();
cur_row += nrows;
rows_remaining -= nrows;
}
}
/**
* @brief Launches kernel for decoding column data
*
* @param[in] blocks Data block descriptions
* @param[in] schema Schema description
* @param[in] global_dictionary Global dictionary entries
* @param[in] avro_data Raw block data
* @param[in] num_blocks Number of blocks
* @param[in] schema_len Number of entries in schema
* @param[in] max_rows Maximum number of rows to load
* @param[in] first_row Crop all rows below first_row
* @param[in] min_row_size Minimum size in bytes of a row
* @param[in] stream CUDA stream to use, default 0
*/
void DecodeAvroColumnData(block_desc_s *blocks,
schemadesc_s *schema,
device_span<nvstrdesc_s> global_dictionary,
const uint8_t *avro_data,
uint32_t num_blocks,
uint32_t schema_len,
size_t max_rows,
size_t first_row,
uint32_t min_row_size,
rmm::cuda_stream_view stream)
{
// num_warps warps per threadblock
dim3 const dim_block(32, num_warps);
// 1 warp per datablock, num_warps datablocks per threadblock
dim3 const dim_grid((num_blocks + num_warps - 1) / num_warps, 1);
gpuDecodeAvroColumnData<<<dim_grid, dim_block, 0, stream.value()>>>(blocks,
schema,
global_dictionary,
avro_data,
num_blocks,
schema_len,
min_row_size,
max_rows,
first_row);
}
} // namespace gpu
} // namespace avro
} // namespace io
} // namespace cudf
|
07556ae8d68420234f3b4601c995c8de554d9776.hip | // !!! This is a file automatically generated by hipify!!!
/*
* matrix_mulitiplication.cu
*
* MATRIX_SIZE * MATRIX_SIZE
*
* @author chenyang li
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#define THREAD_NUM 256
// MATRIX_SIZE * MATRIX_SIZE
#define MATRIX_SIZE 1000
//
float A[MATRIX_SIZE*MATRIX_SIZE], B[MATRIX_SIZE * MATRIX_SIZE], C[MATRIX_SIZE * MATRIX_SIZE];
float *gpu_A, *gpu_B, *gpu_C;
/* GPU */
void printDeviceProps(const hipDeviceProp_t *prop) {
printf("Device Name: %s\n", prop->name);
printf("totalGlobalMem: %ld\n", prop->totalGlobalMem);
printf("sharedMemPerBlock: %d\n", prop->sharedMemPerBlock);
printf("regsPerBlock: %d\n", prop->regsPerBlock);
printf("warpSize: %d\n", prop->warpSize);
printf("memPitch: %d\n", prop->memPitch);
printf("maxThreadPerBlock: %d\n", prop->maxThreadsPerBlock);
printf("maxThreadsDim[0-2]: %d %d %d\n", prop->maxThreadsDim[0], prop->maxThreadsDim[1], prop->maxThreadsDim[2]);
printf("maxGridSize[0-2]: %d %d %d\n", prop->maxGridSize[0], prop->maxGridSize[1], prop->maxGridSize[2]);
printf("totalConstMem: %d\n", prop->totalConstMem);
printf("major: %d & minor: %d\n", prop->major, prop->minor);
printf("clockRate: %d\n", prop->clockRate);
printf("textureAlignment: %d\n", prop->textureAlignment);
printf("deviceOverlap: %d\n", prop->deviceOverlap);
printf("multiProcessorCount: %d\n", prop->multiProcessorCount);
}
/* CUDA */
bool initCUDA() {
int count, i;
hipDeviceProp_t prop;
hipGetDeviceCount(&count);
if (0 == count) {
fprintf(stderr, "There is no device.\n");
return false;
}
for (i = 0; i < count; i++) {
if (hipGetDeviceProperties(&prop, i) == hipSuccess) {
if (prop.major >= 1) {
break;
}
}
}
if (i == count) {
fprintf(stderr, "There is no device.\n");
return false;
}
hipSetDevice(i);
printDeviceProps(&prop);
return true;
}
/* */
void generateMatrix(float *mat, int size) {
int i, j;
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
mat[i * size + j] = rand() % 10;
}
}
}
/* */
void printMatrix(float *mat, int size) {
int i, j;
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
printf("%f ", mat[i * size + j]);
}
printf("\n");
}
printf("\n");
}
// kernel
__global__ static void matrixMultiplication(const float *A, const float *B, float *C, int size) {
// Block0
const int block_id = blockIdx.x;
//0
const int thread_id = threadIdx.x;
int i;
int index, row, column;
float s;
// Block
index = block_id * THREAD_NUM + thread_id;
/* C[row][column] */
row = index / size;
column = index % size;
s = 0.0f;
if (row < size && column < size) {
// A[row][0], A[row][1], A[row][2] ... A[row][size]
// B[0]column], B[1][column], B[2][column] ... B[size][column]
for (i = 0; i < size; i++) {
s += A[row * size + i] * B[i * size + column];
}
C[row * size + column] = s;
}
}
int main(void) {
if (!initCUDA()) {
return 0;
}
const int block_num = (MATRIX_SIZE * MATRIX_SIZE + THREAD_NUM - 1) / THREAD_NUM;
int i, j;
/* */
generateMatrix(A, MATRIX_SIZE);
generateMatrix(B, MATRIX_SIZE);
/* */
hipMalloc((void**)&gpu_A, sizeof(float) * MATRIX_SIZE * MATRIX_SIZE);
hipMalloc((void**)&gpu_B, sizeof(float) * MATRIX_SIZE * MATRIX_SIZE);
hipMalloc((void**)&gpu_C, sizeof(float) * MATRIX_SIZE * MATRIX_SIZE);
/* */
hipMemcpy(gpu_A, A, sizeof(float) * MATRIX_SIZE * MATRIX_SIZE, hipMemcpyHostToDevice);
hipMemcpy(gpu_B, B, sizeof(float) * MATRIX_SIZE * MATRIX_SIZE, hipMemcpyHostToDevice);
// kernel
matrixMultiplication << <block_num, THREAD_NUM, 0 >> > (gpu_A, gpu_B, gpu_C, MATRIX_SIZE);
//
hipMemcpy(C, gpu_C, sizeof(float) * MATRIX_SIZE * MATRIX_SIZE, hipMemcpyDeviceToHost);
/* */
hipFree(gpu_A);
hipFree(gpu_B);
hipFree(gpu_C);
/* Optional */
// printMatrix(A, MATRIX_SIZE);
// printMatrix(B, MATRIX_SIZE);
// printMatrix(C, MATRIX_SIZE);
system("pause");
// return 0;
} | 07556ae8d68420234f3b4601c995c8de554d9776.cu | /*
* matrix_mulitiplication.cu 并行矩阵乘法
*
* 假设两个待相乘矩阵均为方块矩阵(MATRIX_SIZE * MATRIX_SIZE)
*
* @author chenyang li
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#define THREAD_NUM 256
// 矩阵大小为 MATRIX_SIZE * MATRIX_SIZE
#define MATRIX_SIZE 1000
// 将数组定义为全局,避免在栈内分配大块内存
float A[MATRIX_SIZE*MATRIX_SIZE], B[MATRIX_SIZE * MATRIX_SIZE], C[MATRIX_SIZE * MATRIX_SIZE];
float *gpu_A, *gpu_B, *gpu_C;
/* 打印GPU设备信息 */
void printDeviceProps(const cudaDeviceProp *prop) {
printf("Device Name: %s\n", prop->name);
printf("totalGlobalMem: %ld\n", prop->totalGlobalMem);
printf("sharedMemPerBlock: %d\n", prop->sharedMemPerBlock);
printf("regsPerBlock: %d\n", prop->regsPerBlock);
printf("warpSize: %d\n", prop->warpSize);
printf("memPitch: %d\n", prop->memPitch);
printf("maxThreadPerBlock: %d\n", prop->maxThreadsPerBlock);
printf("maxThreadsDim[0-2]: %d %d %d\n", prop->maxThreadsDim[0], prop->maxThreadsDim[1], prop->maxThreadsDim[2]);
printf("maxGridSize[0-2]: %d %d %d\n", prop->maxGridSize[0], prop->maxGridSize[1], prop->maxGridSize[2]);
printf("totalConstMem: %d\n", prop->totalConstMem);
printf("major: %d & minor: %d\n", prop->major, prop->minor);
printf("clockRate: %d\n", prop->clockRate);
printf("textureAlignment: %d\n", prop->textureAlignment);
printf("deviceOverlap: %d\n", prop->deviceOverlap);
printf("multiProcessorCount: %d\n", prop->multiProcessorCount);
}
/* CUDA 初始化 */
bool initCUDA() {
int count, i;
cudaDeviceProp prop;
cudaGetDeviceCount(&count);
if (0 == count) {
fprintf(stderr, "There is no device.\n");
return false;
}
for (i = 0; i < count; i++) {
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if (prop.major >= 1) {
break;
}
}
}
if (i == count) {
fprintf(stderr, "There is no device.\n");
return false;
}
cudaSetDevice(i);
printDeviceProps(&prop);
return true;
}
/* 构造二维数组,使用一维数组存储 */
void generateMatrix(float *mat, int size) {
int i, j;
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
mat[i * size + j] = rand() % 10;
}
}
}
/* 打印矩阵 */
void printMatrix(float *mat, int size) {
int i, j;
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
printf("%f ", mat[i * size + j]);
}
printf("\n");
}
printf("\n");
}
// kernel函数,实现矩阵乘法
__global__ static void matrixMultiplication(const float *A, const float *B, float *C, int size) {
// 当前线程所在Block的编号(从0开始)
const int block_id = blockIdx.x;
//当期线程编号(从0开始)
const int thread_id = threadIdx.x;
int i;
int index, row, column;
float s;
// 当前线程全局索引(非Block内的线程索引)
index = block_id * THREAD_NUM + thread_id;
/* 当前线程将计算C[row][column] */
row = index / size;
column = index % size;
s = 0.0f;
if (row < size && column < size) {
// A[row][0], A[row][1], A[row][2] ... A[row][size]
// B[0]column], B[1][column], B[2][column] ... B[size][column]
for (i = 0; i < size; i++) {
s += A[row * size + i] * B[i * size + column];
}
C[row * size + column] = s;
}
}
int main(void) {
if (!initCUDA()) {
return 0;
}
const int block_num = (MATRIX_SIZE * MATRIX_SIZE + THREAD_NUM - 1) / THREAD_NUM;
int i, j;
/* 构建矩阵 */
generateMatrix(A, MATRIX_SIZE);
generateMatrix(B, MATRIX_SIZE);
/* 分配显存 */
cudaMalloc((void**)&gpu_A, sizeof(float) * MATRIX_SIZE * MATRIX_SIZE);
cudaMalloc((void**)&gpu_B, sizeof(float) * MATRIX_SIZE * MATRIX_SIZE);
cudaMalloc((void**)&gpu_C, sizeof(float) * MATRIX_SIZE * MATRIX_SIZE);
/* 将数组从主内存拷贝至显存 */
cudaMemcpy(gpu_A, A, sizeof(float) * MATRIX_SIZE * MATRIX_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_B, B, sizeof(float) * MATRIX_SIZE * MATRIX_SIZE, cudaMemcpyHostToDevice);
// 执行kernel函数
matrixMultiplication << <block_num, THREAD_NUM, 0 >> > (gpu_A, gpu_B, gpu_C, MATRIX_SIZE);
// 将数组从显存拷贝至主内存
cudaMemcpy(C, gpu_C, sizeof(float) * MATRIX_SIZE * MATRIX_SIZE, cudaMemcpyDeviceToHost);
/* 释放显存空间 */
cudaFree(gpu_A);
cudaFree(gpu_B);
cudaFree(gpu_C);
/* Optional */
// printMatrix(A, MATRIX_SIZE);
// printMatrix(B, MATRIX_SIZE);
// printMatrix(C, MATRIX_SIZE);
system("pause");
// return 0;
} |
dfb234b7a904110d0e207845bec0ad3f9f87ee8e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cctype>
#include <cassert>
#include <cstdio>
#include <ctime>
#include <cstdlib>
#define DATA_SIZE 1048576
#define BLOCK_NUM 32
#define THREAD_NUM 256
#ifndef nullptr
#define nullptr 0
#endif
using namespace std;
/////////////////////////////////////////////////////
__global__ static void Kernel_SquareSum( int* pIn, size_t* pDataSize,
int*pOut, clock_t* pTime )
{
//
extern __shared__ int sharedData[];
const size_t computeSize =*pDataSize / THREAD_NUM;
const size_t tID = size_t(threadIdx.x );//
const size_t bID = size_t(blockIdx.x );//
int offset = 1; //
//
if ( tID == 0 ) pTime[bID] =clock( );//
//
for ( size_t i = bID * THREAD_NUM+ tID;
i < DATA_SIZE;
i += BLOCK_NUM * THREAD_NUM )
{
sharedData[tID] += pIn[i] * pIn[i];
}
//
__syncthreads( );
if ( tID < 128 )sharedData[tID] += sharedData[tID + 128];
__syncthreads( );
if ( tID < 64 )sharedData[tID] += sharedData[tID + 64];
__syncthreads( );
if ( tID < 32 )sharedData[tID] += sharedData[tID + 32];
//__syncthreads( );
if ( tID < 16 )sharedData[tID] += sharedData[tID + 16];
//__syncthreads( );
if ( tID < 8 ) sharedData[tID]+= sharedData[tID + 8];
//__syncthreads( );
if ( tID < 4 ) sharedData[tID]+= sharedData[tID + 4];
//__syncthreads( );
if ( tID < 2 ) sharedData[tID]+= sharedData[tID + 2];
//__syncthreads( );
if ( tID < 1 ) sharedData[tID]+= sharedData[tID + 1];
if ( tID == 0 )// ID
{
pOut[bID] = sharedData[0];
pTime[bID + BLOCK_NUM] = clock( );
}
}
bool CUDA_SquareSum( int* pOut,clock_t* pTime,
int* pIn, size_t dataSize )
{
assert( pIn != nullptr );
assert( pOut != nullptr );
int* pDevIn = nullptr;
int* pDevOut = nullptr;
size_t* pDevDataSize = nullptr;
clock_t* pDevTime = nullptr;
// 1
hipError_t cudaStatus = hipSetDevice( 0 );//
if ( cudaStatus != hipSuccess )
{
fprintf( stderr, "cudaSetDevice()" );
return false;
}
switch ( true)
{
default:
// 2
cudaStatus = hipMalloc( (void**)&pDevIn,dataSize * sizeof( int) );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "cudaMalloc()" );
break;
}
cudaStatus = hipMalloc( (void**)&pDevOut,BLOCK_NUM * sizeof( int) );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "cudaMalloc()" );
break;
}
cudaStatus = hipMalloc( (void**)&pDevDataSize,sizeof( size_t ) );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "cudaMalloc()" );
break;
}
cudaStatus = hipMalloc( (void**)&pDevTime,BLOCK_NUM * 2 * sizeof( clock_t ) );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "cudaMalloc()" );
break;
}
// 3
cudaStatus = hipMemcpy( pDevIn, pIn, dataSize * sizeof( int ),hipMemcpyHostToDevice );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "cudaMemcpy()" );
break;
}
cudaStatus = hipMemcpy( pDevDataSize, &dataSize, sizeof( size_t ), hipMemcpyHostToDevice );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "cudaMemcpy()" );
break;
}
// 4
hipLaunchKernelGGL(( Kernel_SquareSum), dim3(BLOCK_NUM), dim3(THREAD_NUM), THREAD_NUM* sizeof( int), 0,
pDevIn, pDevDataSize, pDevOut, pDevTime );
// 5
cudaStatus = hipGetLastError( );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "" );
break;
}
// 6
cudaStatus = hipDeviceSynchronize( );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "" );
break;
}
// 7
cudaStatus = hipMemcpy( pOut, pDevOut, BLOCK_NUM * sizeof( int ),hipMemcpyDeviceToHost );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "" );
break;
}
cudaStatus = hipMemcpy( pTime, pDevTime, BLOCK_NUM * 2 * sizeof( clock_t ), hipMemcpyDeviceToHost );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "" );
break;
}
// 8
hipFree( pDevIn );
hipFree( pDevOut );
hipFree( pDevDataSize );
hipFree( pDevTime );
return true;
}
// 8
hipFree( pDevIn );
hipFree( pDevOut );
hipFree( pDevDataSize );
hipFree( pDevTime );
return false;
}
void GenerateData( int* pData,size_t dataSize )//
{
assert( pData != nullptr );
for ( size_t i = 0; i <dataSize; i++ )
{
srand( i + 3 );
pData[i] = rand( ) % 100;
}
}
int main( int argc, char** argv )//
{
int* pData = nullptr;
int* pResult = nullptr;
clock_t* pTime = nullptr;
// CUDAhost
hipError_t cudaStatus = hipHostMalloc( &pData, DATA_SIZE * sizeof( int ) );
if ( cudaStatus != hipSuccess )
{
fprintf( stderr, "" );
return 1;
}
cudaStatus = hipHostMalloc( &pResult, BLOCK_NUM * sizeof( int ) );
if ( cudaStatus != hipSuccess )
{
fprintf( stderr, "" );
return 1;
}
cudaStatus = hipHostMalloc( &pTime, BLOCK_NUM * 2 * sizeof( clock_t ) );
if ( cudaStatus != hipSuccess )
{
fprintf( stderr, "" );
return 1;
}
GenerateData( pData, DATA_SIZE );//
CUDA_SquareSum( pResult, pTime, pData, DATA_SIZE );//
// CPU
int totalResult=0;
for ( int i = 0; i < BLOCK_NUM; ++i )
{
totalResult += pResult[i];
}
//
clock_t startTime = pTime[0];
clock_t endTime = pTime[BLOCK_NUM];
for ( int i = 0; i < BLOCK_NUM; ++i )
{
if ( startTime > pTime[i] )startTime = pTime[i];
if ( endTime < pTime[i +BLOCK_NUM] ) endTime = pTime[i + BLOCK_NUM];
}
clock_t elapsed = endTime - startTime;
//
char* pOverFlow = nullptr;
if ( totalResult < 0 )pOverFlow = "";
else pOverFlow = "";
//
printf( "CUDA%d%s\n%d\n",
totalResult, pOverFlow, elapsed );
hipDeviceProp_t prop;
if ( hipGetDeviceProperties(&prop, 0 ) == hipSuccess )
{
float actualTime = float( elapsed ) / float(prop.clockRate );
printf( "%.2fms\n", actualTime );
printf( "%.2fMB/s\n",
float( DATA_SIZE * sizeof( int ) >>20 ) * 1000.0f / actualTime );
printf( "GPU%s\n", prop.name );
}
hipHostFree( pData );
hipHostFree( pResult );
hipHostFree( pTime );
return 0;
} | dfb234b7a904110d0e207845bec0ad3f9f87ee8e.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cuda_runtime.h>
#include <cctype>
#include <cassert>
#include <cstdio>
#include <ctime>
#include <cstdlib>
#define DATA_SIZE 1048576
#define BLOCK_NUM 32
#define THREAD_NUM 256
#ifndef nullptr
#define nullptr 0
#endif
using namespace std;
////////////////////////在设备上运行的内核函数/////////////////////////////
__global__ static void Kernel_SquareSum( int* pIn, size_t* pDataSize,
int*pOut, clock_t* pTime )
{
// 声明一个动态分配的共享存储器
extern __shared__ int sharedData[];
const size_t computeSize =*pDataSize / THREAD_NUM;
const size_t tID = size_t(threadIdx.x );// 线程
const size_t bID = size_t(blockIdx.x );// 块
int offset = 1; // 记录每轮增倍的步距
// 开始计时
if ( tID == 0 ) pTime[bID] =clock( );// 选择任意一个线程进行计时
// 执行计算
for ( size_t i = bID * THREAD_NUM+ tID;
i < DATA_SIZE;
i += BLOCK_NUM * THREAD_NUM )
{
sharedData[tID] += pIn[i] * pIn[i];
}
// 同步一个块中的其它线程
__syncthreads( );
if ( tID < 128 )sharedData[tID] += sharedData[tID + 128];
__syncthreads( );
if ( tID < 64 )sharedData[tID] += sharedData[tID + 64];
__syncthreads( );
if ( tID < 32 )sharedData[tID] += sharedData[tID + 32];
//__syncthreads( );
if ( tID < 16 )sharedData[tID] += sharedData[tID + 16];
//__syncthreads( );
if ( tID < 8 ) sharedData[tID]+= sharedData[tID + 8];
//__syncthreads( );
if ( tID < 4 ) sharedData[tID]+= sharedData[tID + 4];
//__syncthreads( );
if ( tID < 2 ) sharedData[tID]+= sharedData[tID + 2];
//__syncthreads( );
if ( tID < 1 ) sharedData[tID]+= sharedData[tID + 1];
if ( tID == 0 )// 如果线程ID为,那么计算结果,并记录时钟
{
pOut[bID] = sharedData[0];
pTime[bID + BLOCK_NUM] = clock( );
}
}
bool CUDA_SquareSum( int* pOut,clock_t* pTime,
int* pIn, size_t dataSize )
{
assert( pIn != nullptr );
assert( pOut != nullptr );
int* pDevIn = nullptr;
int* pDevOut = nullptr;
size_t* pDevDataSize = nullptr;
clock_t* pDevTime = nullptr;
// 1、设置设备
cudaError_t cudaStatus = cudaSetDevice( 0 );// 只要机器安装了英伟达显卡,那么会调用成功
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "调用cudaSetDevice()函数失败!" );
return false;
}
switch ( true)
{
default:
// 2、分配显存空间
cudaStatus = cudaMalloc( (void**)&pDevIn,dataSize * sizeof( int) );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "调用cudaMalloc()函数初始化显卡中数组时失败!" );
break;
}
cudaStatus = cudaMalloc( (void**)&pDevOut,BLOCK_NUM * sizeof( int) );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "调用cudaMalloc()函数初始化显卡中返回值时失败!" );
break;
}
cudaStatus = cudaMalloc( (void**)&pDevDataSize,sizeof( size_t ) );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "调用cudaMalloc()函数初始化显卡中数据大小时失败!" );
break;
}
cudaStatus = cudaMalloc( (void**)&pDevTime,BLOCK_NUM * 2 * sizeof( clock_t ) );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "调用cudaMalloc()函数初始化显卡中耗费用时变量失败!" );
break;
}
// 3、将宿主程序数据复制到显存中
cudaStatus = cudaMemcpy( pDevIn, pIn, dataSize * sizeof( int ),cudaMemcpyHostToDevice );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "调用cudaMemcpy()函数初始化宿主程序数据数组到显卡时失败!" );
break;
}
cudaStatus = cudaMemcpy( pDevDataSize, &dataSize, sizeof( size_t ), cudaMemcpyHostToDevice );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "调用cudaMemcpy()函数初始化宿主程序数据大小到显卡时失败!" );
break;
}
// 4、执行程序,宿主程序等待显卡执行完毕
Kernel_SquareSum<<<BLOCK_NUM, THREAD_NUM, THREAD_NUM* sizeof( int)>>>
( pDevIn, pDevDataSize, pDevOut, pDevTime );
// 5、查询内核初始化的时候是否出错
cudaStatus = cudaGetLastError( );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "显卡执行程序时失败!" );
break;
}
// 6、与内核同步等待执行完毕
cudaStatus = cudaDeviceSynchronize( );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "在与内核同步的过程中发生问题!" );
break;
}
// 7、获取数据
cudaStatus = cudaMemcpy( pOut, pDevOut, BLOCK_NUM * sizeof( int ),cudaMemcpyDeviceToHost );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "在将结果数据从显卡复制到宿主程序中失败!" );
break;
}
cudaStatus = cudaMemcpy( pTime, pDevTime, BLOCK_NUM * 2 * sizeof( clock_t ), cudaMemcpyDeviceToHost );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "在将耗费用时数据从显卡复制到宿主程序中失败!" );
break;
}
// 8、释放空间
cudaFree( pDevIn );
cudaFree( pDevOut );
cudaFree( pDevDataSize );
cudaFree( pDevTime );
return true;
}
// 8、释放空间
cudaFree( pDevIn );
cudaFree( pDevOut );
cudaFree( pDevDataSize );
cudaFree( pDevTime );
return false;
}
void GenerateData( int* pData,size_t dataSize )// 产生数据
{
assert( pData != nullptr );
for ( size_t i = 0; i <dataSize; i++ )
{
srand( i + 3 );
pData[i] = rand( ) % 100;
}
}
int main( int argc, char** argv )// 函数的主入口
{
int* pData = nullptr;
int* pResult = nullptr;
clock_t* pTime = nullptr;
// 使用CUDA内存分配器分配host端
cudaError_t cudaStatus = cudaMallocHost( &pData, DATA_SIZE * sizeof( int ) );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "在主机中分配资源失败!" );
return 1;
}
cudaStatus = cudaMallocHost( &pResult, BLOCK_NUM * sizeof( int ) );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "在主机中分配资源失败!" );
return 1;
}
cudaStatus = cudaMallocHost( &pTime, BLOCK_NUM * 2 * sizeof( clock_t ) );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "在主机中分配资源失败!" );
return 1;
}
GenerateData( pData, DATA_SIZE );// 通过随机数产生数据
CUDA_SquareSum( pResult, pTime, pData, DATA_SIZE );// 执行平方和
// 在CPU中将结果组合起来
int totalResult=0;
for ( int i = 0; i < BLOCK_NUM; ++i )
{
totalResult += pResult[i];
}
// 计算执行的时间
clock_t startTime = pTime[0];
clock_t endTime = pTime[BLOCK_NUM];
for ( int i = 0; i < BLOCK_NUM; ++i )
{
if ( startTime > pTime[i] )startTime = pTime[i];
if ( endTime < pTime[i +BLOCK_NUM] ) endTime = pTime[i + BLOCK_NUM];
}
clock_t elapsed = endTime - startTime;
// 判断是否溢出
char* pOverFlow = nullptr;
if ( totalResult < 0 )pOverFlow = "(溢出)";
else pOverFlow = "";
// 显示基准测试
printf( "用CUDA计算平方和的结果是:%d%s\n耗费用时:%d\n",
totalResult, pOverFlow, elapsed );
cudaDeviceProp prop;
if ( cudaGetDeviceProperties(&prop, 0 ) == cudaSuccess )
{
float actualTime = float( elapsed ) / float(prop.clockRate );
printf( "实际执行时间为:%.2fms\n", actualTime );
printf( "带宽为:%.2fMB/s\n",
float( DATA_SIZE * sizeof( int ) >>20 ) * 1000.0f / actualTime );
printf( "GPU设备型号:%s\n", prop.name );
}
cudaFreeHost( pData );
cudaFreeHost( pResult );
cudaFreeHost( pTime );
return 0;
} |
3212d5451185e9798e18269c217c49647af3dcaf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_j0 (int n, double *result, double *x)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = j0(x[id]);
}
} | 3212d5451185e9798e18269c217c49647af3dcaf.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_j0 (int n, double *result, double *x)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = j0(x[id]);
}
} |
1652f53dea58dee74e8410cfc4a1249231d6ae19.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/
/*(c) 2016 Brian Tarasinski*/
/*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/
//kernel to transform to pauli basis (up, x, y, down)
//to be run on a complete complex density matrix, once for each bit
//this operation is its own inverse (can also be used in opposite direction)
__global__ void swap(double *dm, unsigned int bit1, unsigned int bit2, unsigned int no_qubits) {
unsigned int addr = threadIdx.x + blockDim.x*blockIdx.x;
if (addr >= (1<<2*no_qubits)) return;
unsigned int bit1_mask = (0x3 << (2*bit1));
unsigned int bit2_mask = (0x3 << (2*bit2));
unsigned int addr2 = ( addr & ~(bit1_mask | bit2_mask)) |
((addr & bit1_mask) << (2*(bit2 - bit1))) |
((addr & bit2_mask) >> (2*(bit2 - bit1)));
double t;
if (addr > addr2) {
t = dm[addr2];
dm[addr2] = dm[addr];
dm[addr] = t;
}
} | 1652f53dea58dee74e8410cfc4a1249231d6ae19.cu | #include "includes.h"
/*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/
/*(c) 2016 Brian Tarasinski*/
/*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/
//kernel to transform to pauli basis (up, x, y, down)
//to be run on a complete complex density matrix, once for each bit
//this operation is its own inverse (can also be used in opposite direction)
__global__ void swap(double *dm, unsigned int bit1, unsigned int bit2, unsigned int no_qubits) {
unsigned int addr = threadIdx.x + blockDim.x*blockIdx.x;
if (addr >= (1<<2*no_qubits)) return;
unsigned int bit1_mask = (0x3 << (2*bit1));
unsigned int bit2_mask = (0x3 << (2*bit2));
unsigned int addr2 = ( addr & ~(bit1_mask | bit2_mask)) |
((addr & bit1_mask) << (2*(bit2 - bit1))) |
((addr & bit2_mask) >> (2*(bit2 - bit1)));
double t;
if (addr > addr2) {
t = dm[addr2];
dm[addr2] = dm[addr];
dm[addr] = t;
}
} |
22e377549eafb9a8534570904d3cbb4fed2c5c5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <iostream>
#include <math.h>
#include <cstdlib>
#define TIMING
#define MIN_SIZE 250000
#define SIZE_INCREMENT 250000
#define MAX_SIZE 10000000
#define SAMPLE_SIZE 1
#ifdef TIMING
double avgCPUTime, avgGPUTime;
double cpuStartTime, cpuEndTime;
#endif // TIMING
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
void toPGM(int n, int numb, double* arr) {
double max = 0;
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (fabs(arr[(i*n) + j]) > max) {
max = fabs(arr[(i*n) + j]);
}
}
}
FILE *fp;
char name[15];
sprintf(name, "output%04d.pgm", numb);
printf("%s", name);
fp = fopen(name, "w");
fprintf(fp, "%s\n%d %d\n%s\n", "P2", n, n, "255");
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
fprintf(fp, "%d ", (int)((arr[(i*n) + j]) / max * 127) + 127);
}
fprintf(fp, "\n");
}
fclose(fp);
}
__host__
__device__
double initialCondition(double x, double y) {
double sigma=0.001;//tight point
//double sigma = 0.1;//wider point
double mu = 0.5;//center
double max = (1.0 / (2.0*M_PI*sigma*sigma))*exp(-0.5*(((0.5 - mu) / sigma)*((0.5 - mu) / sigma) + ((0.5 - mu) / sigma)*((0.5 - mu) / sigma)));
double result = (1.0 / (2.0*M_PI*sigma*sigma))*exp(-0.5*(((x - mu) / sigma)*((x - mu) / sigma) + ((y - mu) / sigma)*((y - mu) / sigma))) / max;
return result;
}
__host__
__device__
double f(int x, int y, int n, double *arr1, double *arr0) {
//Blindly trust that his is right...
double ans = (.01*(arr1[(x - 1) + (n*(y))] + arr1[(x + 1) + (n*(y))] + arr1[(x)+(n*(y - 1))] + arr1[(x)+(n*(y + 1))] - (4 * arr1[(x)+(n*(y))])) + ((2 * arr1[(x)+(n*(y))]) - arr0[(x)+(n*(y))]));
return ans;
}
__global__
void wave(int n, double *arr0, double *arr1, double *arr2) {
//int id = threadIdx.x + blockDim.x*blockIdx.x;
int strideX = gridDim.x*blockDim.x;
int strideY = gridDim.y*blockDim.y;
for (int i = threadIdx.x + blockDim.x*blockIdx.x + 1; i < n - 1; i += strideX) {
for (int j = threadIdx.y + blockDim.y*blockIdx.y + 1; j < n - 1; j += strideY) {
arr2[(i*n) + j] = f(j, i, n, arr1, arr0);
}
}
}
__global__
void initForWave(double startX, double endX, int n, double* arr0, double* arr1, double* arr2) {
int strideX = gridDim.x*blockDim.x;
int strideY = gridDim.y*blockDim.y;
for (int i = threadIdx.x + blockDim.x*blockIdx.x + 1; i < n - 1; i += strideX) {
for (int j = threadIdx.y + blockDim.y*blockIdx.y + 1; j < n - 1; j += strideY) {
arr0[(i*n) + j] = initialCondition(((double)j) / (n - 1), ((double)i) / (n - 1));
arr1[(i*n) + j] = arr0[(i*n) + j];
}
}
}
int main(void) {
int output = 1;
hipDeviceReset();
hipEvent_t start, stop;
int n = 1024;
int N = n*n; // 1M elements
int steps = n*500;
int outputIncrement = steps/1000;
hipEventCreate(&start);
hipEventCreate(&stop);
double *arr0; //= new float[N];
double *arr1; //= new float[N];
double *arr2; //= new float[N];
double *temp, *localArr0;
hipEventRecord(start);
localArr0 = (double*)malloc(N * sizeof(double));
hipMalloc(&arr0, N * sizeof(double));
hipMalloc(&arr1, N * sizeof(double));
hipMalloc(&arr2, N * sizeof(double));
/*if (output == 1) {
writeheader(N, steps);
}*/
int threadBlockSize = 1024;
int numThreadBlocks = (n + threadBlockSize - 1) / threadBlockSize;
hipDeviceSynchronize();
initForWave << <numThreadBlocks, threadBlockSize >> > (0.0, 1.0, n, arr0, arr1, arr2);
if (output == 1) {
hipMemcpy((void*)localArr0, (void*)arr0, N * sizeof(double), hipMemcpyDeviceToHost);
toPGM(n, 0, localArr0);
}
// Run kernel on 1M elements on the CPU
for (int i = 0; i < steps; i++) {
wave << <numThreadBlocks, threadBlockSize >> > (n, arr0, arr1, arr2);
hipDeviceSynchronize();
temp = arr0;
arr0 = arr1;
arr1 = arr2;
arr2 = temp;
if (output == 1 && i % outputIncrement == 0) {
hipMemcpy((void*)localArr0, (void*)arr0, N * sizeof(double), hipMemcpyDeviceToHost);
toPGM(n, (i / outputIncrement) + 1, localArr0);
}
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
//std::cout << maxError << "\nTime used (ms): " << elapsedTime << std::endl;
free(localArr0);
hipFree(arr0);
hipFree(arr1);
hipFree(arr2);
return 0;
}
| 22e377549eafb9a8534570904d3cbb4fed2c5c5d.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <iostream>
#include <math.h>
#include <cstdlib>
#define TIMING
#define MIN_SIZE 250000
#define SIZE_INCREMENT 250000
#define MAX_SIZE 10000000
#define SAMPLE_SIZE 1
#ifdef TIMING
double avgCPUTime, avgGPUTime;
double cpuStartTime, cpuEndTime;
#endif // TIMING
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
void toPGM(int n, int numb, double* arr) {
double max = 0;
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (fabs(arr[(i*n) + j]) > max) {
max = fabs(arr[(i*n) + j]);
}
}
}
FILE *fp;
char name[15];
sprintf(name, "output%04d.pgm", numb);
printf("%s", name);
fp = fopen(name, "w");
fprintf(fp, "%s\n%d %d\n%s\n", "P2", n, n, "255");
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
fprintf(fp, "%d ", (int)((arr[(i*n) + j]) / max * 127) + 127);
}
fprintf(fp, "\n");
}
fclose(fp);
}
__host__
__device__
double initialCondition(double x, double y) {
double sigma=0.001;//tight point
//double sigma = 0.1;//wider point
double mu = 0.5;//center
double max = (1.0 / (2.0*M_PI*sigma*sigma))*exp(-0.5*(((0.5 - mu) / sigma)*((0.5 - mu) / sigma) + ((0.5 - mu) / sigma)*((0.5 - mu) / sigma)));
double result = (1.0 / (2.0*M_PI*sigma*sigma))*exp(-0.5*(((x - mu) / sigma)*((x - mu) / sigma) + ((y - mu) / sigma)*((y - mu) / sigma))) / max;
return result;
}
__host__
__device__
double f(int x, int y, int n, double *arr1, double *arr0) {
//Blindly trust that his is right...
double ans = (.01*(arr1[(x - 1) + (n*(y))] + arr1[(x + 1) + (n*(y))] + arr1[(x)+(n*(y - 1))] + arr1[(x)+(n*(y + 1))] - (4 * arr1[(x)+(n*(y))])) + ((2 * arr1[(x)+(n*(y))]) - arr0[(x)+(n*(y))]));
return ans;
}
__global__
void wave(int n, double *arr0, double *arr1, double *arr2) {
//int id = threadIdx.x + blockDim.x*blockIdx.x;
int strideX = gridDim.x*blockDim.x;
int strideY = gridDim.y*blockDim.y;
for (int i = threadIdx.x + blockDim.x*blockIdx.x + 1; i < n - 1; i += strideX) {
for (int j = threadIdx.y + blockDim.y*blockIdx.y + 1; j < n - 1; j += strideY) {
arr2[(i*n) + j] = f(j, i, n, arr1, arr0);
}
}
}
__global__
void initForWave(double startX, double endX, int n, double* arr0, double* arr1, double* arr2) {
int strideX = gridDim.x*blockDim.x;
int strideY = gridDim.y*blockDim.y;
for (int i = threadIdx.x + blockDim.x*blockIdx.x + 1; i < n - 1; i += strideX) {
for (int j = threadIdx.y + blockDim.y*blockIdx.y + 1; j < n - 1; j += strideY) {
arr0[(i*n) + j] = initialCondition(((double)j) / (n - 1), ((double)i) / (n - 1));
arr1[(i*n) + j] = arr0[(i*n) + j];
}
}
}
int main(void) {
int output = 1;
cudaDeviceReset();
cudaEvent_t start, stop;
int n = 1024;
int N = n*n; // 1M elements
int steps = n*500;
int outputIncrement = steps/1000;
cudaEventCreate(&start);
cudaEventCreate(&stop);
double *arr0; //= new float[N];
double *arr1; //= new float[N];
double *arr2; //= new float[N];
double *temp, *localArr0;
cudaEventRecord(start);
localArr0 = (double*)malloc(N * sizeof(double));
cudaMalloc(&arr0, N * sizeof(double));
cudaMalloc(&arr1, N * sizeof(double));
cudaMalloc(&arr2, N * sizeof(double));
/*if (output == 1) {
writeheader(N, steps);
}*/
int threadBlockSize = 1024;
int numThreadBlocks = (n + threadBlockSize - 1) / threadBlockSize;
cudaDeviceSynchronize();
initForWave << <numThreadBlocks, threadBlockSize >> > (0.0, 1.0, n, arr0, arr1, arr2);
if (output == 1) {
cudaMemcpy((void*)localArr0, (void*)arr0, N * sizeof(double), cudaMemcpyDeviceToHost);
toPGM(n, 0, localArr0);
}
// Run kernel on 1M elements on the CPU
for (int i = 0; i < steps; i++) {
wave << <numThreadBlocks, threadBlockSize >> > (n, arr0, arr1, arr2);
cudaDeviceSynchronize();
temp = arr0;
arr0 = arr1;
arr1 = arr2;
arr2 = temp;
if (output == 1 && i % outputIncrement == 0) {
cudaMemcpy((void*)localArr0, (void*)arr0, N * sizeof(double), cudaMemcpyDeviceToHost);
toPGM(n, (i / outputIncrement) + 1, localArr0);
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
//std::cout << maxError << "\nTime used (ms): " << elapsedTime << std::endl;
free(localArr0);
cudaFree(arr0);
cudaFree(arr1);
cudaFree(arr2);
return 0;
}
|
4117a82fd1bcc6fbbeed39b3413549cfecd19d93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"../include/gpuConvolution.cuh"
#include<malloc.h>
#define _USE_MATH_DEFINES
#include<math.h>
#include<stdio.h>
// performs seperable convolution with specified filter (same filter applied to rows and columns)
void sepConvolve2D(ImMatG * inputMat, double *d_filterKernel, size_t kernelSize, ImMatG * outputMat){
int patchSize = 16;
double * d_temp;
hipMalloc(&d_temp, inputMat->getLength()*sizeof(double));
row_convolve << < dim3(inputMat->cols / patchSize, inputMat->rows / patchSize, 1), dim3(patchSize, patchSize, 1), (patchSize + kernelSize / 2 * 2)*patchSize*sizeof(double) >> >(inputMat->data_d, inputMat->rows, inputMat->cols, d_filterKernel, kernelSize, d_temp);
col_convolve << < dim3(inputMat->cols / patchSize, inputMat->rows / patchSize, 1), dim3(patchSize, patchSize, 1), (patchSize + kernelSize / 2 * 2)*patchSize*sizeof(double) >> >(d_temp, inputMat->rows, inputMat->cols, d_filterKernel, kernelSize, outputMat->data_d);
hipFree(d_temp);
}
// return gaussian filter 1D kernel. return pointer to array
// kernelSize size of filter elements
// sigma vriance of filter
double * gaussianKernel1D(int kernelSize, double sigma){
double *kernel = (double*)malloc(kernelSize*sizeof(double));
double sum = 0;
// compute elements
for (int i = 0; i < kernelSize; i++){
double elem = 1 / (sqrt(2 * M_PI)*sigma)*exp(-pow((double)i - kernelSize / 2, 2) / (2 * pow(sigma, 2)));
sum += elem;
kernel[i] = elem;
}
// normalize
for (int i = 0; i < kernelSize; i++){
kernel[i]=kernel[i] / sum;
}
return kernel;
}
// ====================== KERNEL FUNCTIONS =================================
/*performs row wise convolution on image block*/
__global__ void row_convolve(double * d_inputData, int ROWS, int COLS, double * d_filterKernel, size_t kernelSize, double * d_outputData){
extern __shared__ double buffer[];
int kernelRadius = kernelSize / 2;
// load data in shared memory
// indexes for pixel coordinates to be loades in to shared memory.
int colPixIndex = threadIdx.x + blockDim.x*blockIdx.x;
int rowPixIndex = threadIdx.y + blockDim.y* blockIdx.y;
int linPixIndex = colPixIndex + rowPixIndex*COLS;
// load patch to be processed
int linBufIdx = threadIdx.x + kernelRadius + threadIdx.y*(blockDim.x + kernelRadius * 2);
buffer[linBufIdx] = d_inputData[linPixIndex];
// load apron
if (threadIdx.x < kernelRadius){
int idxBuf = linBufIdx-kernelRadius;
if ( colPixIndex-kernelRadius>= 0){
buffer[idxBuf] = d_inputData[linPixIndex-kernelRadius];
}
else{
buffer[idxBuf] = 0;
}
}
if (threadIdx.x >= blockDim.x - kernelRadius){
int idxBuf = linBufIdx + kernelRadius;
if ( colPixIndex + kernelRadius< COLS){
buffer[idxBuf] = d_inputData[linPixIndex+kernelRadius];
}
else{
buffer[idxBuf] = 0;
}
}
__syncthreads();
//// convolve
//
double sum = 0;
for (int i = 0; i < kernelSize; i++){
sum += buffer[linBufIdx+(i-kernelRadius)] * d_filterKernel[kernelSize - i - 1];
}
d_outputData[linPixIndex] = sum;
}
// performs columnwise convolution
__global__ void col_convolve(double * d_inputData, int ROWS, int COLS, double * d_filterKernel, size_t kernelSize, double * d_outputData){
extern __shared__ double buffer[];
int kernelRadius = kernelSize / 2;
// load data in shared memory
// indexes for pixel coordinates to be loades in to shared memory.
int colPixIndex = threadIdx.x + blockDim.x*blockIdx.x;
int rowPixIndex = threadIdx.y + blockDim.y* blockIdx.y;
int linPixIndex = colPixIndex + rowPixIndex*COLS;
// load patch to be processed
int linBufIdx = threadIdx.x+(threadIdx.y+kernelRadius)*blockDim.x;
buffer[linBufIdx] = d_inputData[linPixIndex];
// load apron
if (threadIdx.y < kernelRadius){
int idxBuf = linBufIdx - kernelRadius*blockDim.x;
if (rowPixIndex - kernelRadius >= 0){
buffer[idxBuf] = d_inputData[linPixIndex - kernelRadius*COLS];
}
else{
buffer[idxBuf] = 0;
}
}
if (threadIdx.y >= blockDim.y - kernelRadius){
int idxBuf = linBufIdx + kernelRadius*blockDim.x;
if (rowPixIndex + kernelRadius< ROWS){
buffer[idxBuf] = d_inputData[linPixIndex + kernelRadius*COLS];
}
else{
buffer[idxBuf] = 0;
}
}
__syncthreads();
////// convolve
////
double sum = 0;
for (int i = 0; i < kernelSize; i++){
sum += buffer[linBufIdx + (i - kernelRadius)*blockDim.x] * d_filterKernel[kernelSize - i - 1];
}
d_outputData[linPixIndex] = sum;
}
| 4117a82fd1bcc6fbbeed39b3413549cfecd19d93.cu | #include"../include/gpuConvolution.cuh"
#include<malloc.h>
#define _USE_MATH_DEFINES
#include<math.h>
#include<stdio.h>
// performs seperable convolution with specified filter (same filter applied to rows and columns)
void sepConvolve2D(ImMatG * inputMat, double *d_filterKernel, size_t kernelSize, ImMatG * outputMat){
int patchSize = 16;
double * d_temp;
cudaMalloc(&d_temp, inputMat->getLength()*sizeof(double));
row_convolve << < dim3(inputMat->cols / patchSize, inputMat->rows / patchSize, 1), dim3(patchSize, patchSize, 1), (patchSize + kernelSize / 2 * 2)*patchSize*sizeof(double) >> >(inputMat->data_d, inputMat->rows, inputMat->cols, d_filterKernel, kernelSize, d_temp);
col_convolve << < dim3(inputMat->cols / patchSize, inputMat->rows / patchSize, 1), dim3(patchSize, patchSize, 1), (patchSize + kernelSize / 2 * 2)*patchSize*sizeof(double) >> >(d_temp, inputMat->rows, inputMat->cols, d_filterKernel, kernelSize, outputMat->data_d);
cudaFree(d_temp);
}
// return gaussian filter 1D kernel. return pointer to array
// kernelSize size of filter elements
// sigma vriance of filter
double * gaussianKernel1D(int kernelSize, double sigma){
double *kernel = (double*)malloc(kernelSize*sizeof(double));
double sum = 0;
// compute elements
for (int i = 0; i < kernelSize; i++){
double elem = 1 / (sqrt(2 * M_PI)*sigma)*exp(-pow((double)i - kernelSize / 2, 2) / (2 * pow(sigma, 2)));
sum += elem;
kernel[i] = elem;
}
// normalize
for (int i = 0; i < kernelSize; i++){
kernel[i]=kernel[i] / sum;
}
return kernel;
}
// ====================== KERNEL FUNCTIONS =================================
/*performs row wise convolution on image block*/
__global__ void row_convolve(double * d_inputData, int ROWS, int COLS, double * d_filterKernel, size_t kernelSize, double * d_outputData){
extern __shared__ double buffer[];
int kernelRadius = kernelSize / 2;
// load data in shared memory
// indexes for pixel coordinates to be loades in to shared memory.
int colPixIndex = threadIdx.x + blockDim.x*blockIdx.x;
int rowPixIndex = threadIdx.y + blockDim.y* blockIdx.y;
int linPixIndex = colPixIndex + rowPixIndex*COLS;
// load patch to be processed
int linBufIdx = threadIdx.x + kernelRadius + threadIdx.y*(blockDim.x + kernelRadius * 2);
buffer[linBufIdx] = d_inputData[linPixIndex];
// load apron
if (threadIdx.x < kernelRadius){
int idxBuf = linBufIdx-kernelRadius;
if ( colPixIndex-kernelRadius>= 0){
buffer[idxBuf] = d_inputData[linPixIndex-kernelRadius];
}
else{
buffer[idxBuf] = 0;
}
}
if (threadIdx.x >= blockDim.x - kernelRadius){
int idxBuf = linBufIdx + kernelRadius;
if ( colPixIndex + kernelRadius< COLS){
buffer[idxBuf] = d_inputData[linPixIndex+kernelRadius];
}
else{
buffer[idxBuf] = 0;
}
}
__syncthreads();
//// convolve
//
double sum = 0;
for (int i = 0; i < kernelSize; i++){
sum += buffer[linBufIdx+(i-kernelRadius)] * d_filterKernel[kernelSize - i - 1];
}
d_outputData[linPixIndex] = sum;
}
// performs columnwise convolution
__global__ void col_convolve(double * d_inputData, int ROWS, int COLS, double * d_filterKernel, size_t kernelSize, double * d_outputData){
extern __shared__ double buffer[];
int kernelRadius = kernelSize / 2;
// load data in shared memory
// indexes for pixel coordinates to be loades in to shared memory.
int colPixIndex = threadIdx.x + blockDim.x*blockIdx.x;
int rowPixIndex = threadIdx.y + blockDim.y* blockIdx.y;
int linPixIndex = colPixIndex + rowPixIndex*COLS;
// load patch to be processed
int linBufIdx = threadIdx.x+(threadIdx.y+kernelRadius)*blockDim.x;
buffer[linBufIdx] = d_inputData[linPixIndex];
// load apron
if (threadIdx.y < kernelRadius){
int idxBuf = linBufIdx - kernelRadius*blockDim.x;
if (rowPixIndex - kernelRadius >= 0){
buffer[idxBuf] = d_inputData[linPixIndex - kernelRadius*COLS];
}
else{
buffer[idxBuf] = 0;
}
}
if (threadIdx.y >= blockDim.y - kernelRadius){
int idxBuf = linBufIdx + kernelRadius*blockDim.x;
if (rowPixIndex + kernelRadius< ROWS){
buffer[idxBuf] = d_inputData[linPixIndex + kernelRadius*COLS];
}
else{
buffer[idxBuf] = 0;
}
}
__syncthreads();
////// convolve
////
double sum = 0;
for (int i = 0; i < kernelSize; i++){
sum += buffer[linBufIdx + (i - kernelRadius)*blockDim.x] * d_filterKernel[kernelSize - i - 1];
}
d_outputData[linPixIndex] = sum;
}
|
5522047de5f27eeedff5a1cb70adcc1c6db36ca5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void computeFloatSum(const float* __restrict__ sumBuffer, float* result) {
__shared__ float tempBuffer[WORK_GROUP_SIZE];
const unsigned int thread = threadIdx.x;
float sum = 0;
for (unsigned int index = thread; index < SUM_BUFFER_SIZE; index += blockDim.x)
sum += sumBuffer[index];
tempBuffer[thread] = sum;
for (int i = 1; i < WORK_GROUP_SIZE; i *= 2) {
__syncthreads();
if (thread%(i*2) == 0 && thread+i < WORK_GROUP_SIZE)
tempBuffer[thread] += tempBuffer[thread+i];
}
if (thread == 0)
result[SUM_OUTPUT_INDEX] = tempBuffer[0];
}
extern "C" __global__ void computeDoubleSum(const double* __restrict__ sumBuffer, double* result) {
__shared__ double tempBuffer[WORK_GROUP_SIZE];
const unsigned int thread = threadIdx.x;
double sum = 0;
for (unsigned int index = thread; index < SUM_BUFFER_SIZE; index += blockDim.x)
sum += sumBuffer[index];
tempBuffer[thread] = sum;
for (int i = 1; i < WORK_GROUP_SIZE; i *= 2) {
__syncthreads();
if (thread%(i*2) == 0 && thread+i < WORK_GROUP_SIZE)
tempBuffer[thread] += tempBuffer[thread+i];
}
if (thread == 0)
result[SUM_OUTPUT_INDEX] = tempBuffer[0];
}
extern "C" __global__ void applyPositionDeltas(real4* __restrict__ posq, real4* __restrict__ posqCorrection, mixed4* __restrict__ posDelta) {
for (unsigned int index = blockIdx.x*blockDim.x+threadIdx.x; index < NUM_ATOMS; index += blockDim.x*gridDim.x) {
#ifdef USE_MIXED_PRECISION
real4 pos1 = posq[index];
real4 pos2 = posqCorrection[index];
mixed4 pos = make_mixed4(pos1.x+(mixed)pos2.x, pos1.y+(mixed)pos2.y, pos1.z+(mixed)pos2.z, pos1.w);
#else
real4 pos = posq[index];
#endif
pos.x += posDelta[index].x;
pos.y += posDelta[index].y;
pos.z += posDelta[index].z;
#ifdef USE_MIXED_PRECISION
posq[index] = make_real4((real) pos.x, (real) pos.y, (real) pos.z, (real) pos.w);
posqCorrection[index] = make_real4(pos.x-(real) pos.x, pos.y-(real) pos.y, pos.z-(real) pos.z, 0);
#else
posq[index] = pos;
#endif
posDelta[index] = make_mixed4(0, 0, 0, 0);
}
}
extern "C" __global__ void generateRandomNumbers(int numValues, float4* __restrict__ random, uint4* __restrict__ seed) {
uint4 state = seed[blockIdx.x*blockDim.x+threadIdx.x];
unsigned int carry = 0;
for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < numValues; index += blockDim.x*gridDim.x) {
// Generate three uniform random numbers.
state.x = state.x * 69069 + 1;
state.y ^= state.y << 13;
state.y ^= state.y >> 17;
state.y ^= state.y << 5;
unsigned int k = (state.z >> 2) + (state.w >> 3) + (carry >> 2);
unsigned int m = state.w + state.w + state.z + carry;
state.z = state.w;
state.w = m;
carry = k >> 30;
float x1 = (float)max(state.x + state.y + state.w, 0x00000001u) / (float)0xffffffff;
state.x = state.x * 69069 + 1;
state.y ^= state.y << 13;
state.y ^= state.y >> 17;
state.y ^= state.y << 5;
k = (state.z >> 2) + (state.w >> 3) + (carry >> 2);
m = state.w + state.w + state.z + carry;
state.z = state.w;
state.w = m;
carry = k >> 30;
float x2 = (float)max(state.x + state.y + state.w, 0x00000001u) / (float)0xffffffff;
state.x = state.x * 69069 + 1;
state.y ^= state.y << 13;
state.y ^= state.y >> 17;
state.y ^= state.y << 5;
k = (state.z >> 2) + (state.w >> 3) + (carry >> 2);
m = state.w + state.w + state.z + carry;
state.z = state.w;
state.w = m;
carry = k >> 30;
float x3 = (float)max(state.x + state.y + state.w, 0x00000001u) / (float)0xffffffff;
// Record the values.
random[index] = make_float4(x1, x2, x3, 0.0f);
}
seed[blockIdx.x*blockDim.x+threadIdx.x] = state;
}
| 5522047de5f27eeedff5a1cb70adcc1c6db36ca5.cu | extern "C" __global__ void computeFloatSum(const float* __restrict__ sumBuffer, float* result) {
__shared__ float tempBuffer[WORK_GROUP_SIZE];
const unsigned int thread = threadIdx.x;
float sum = 0;
for (unsigned int index = thread; index < SUM_BUFFER_SIZE; index += blockDim.x)
sum += sumBuffer[index];
tempBuffer[thread] = sum;
for (int i = 1; i < WORK_GROUP_SIZE; i *= 2) {
__syncthreads();
if (thread%(i*2) == 0 && thread+i < WORK_GROUP_SIZE)
tempBuffer[thread] += tempBuffer[thread+i];
}
if (thread == 0)
result[SUM_OUTPUT_INDEX] = tempBuffer[0];
}
extern "C" __global__ void computeDoubleSum(const double* __restrict__ sumBuffer, double* result) {
__shared__ double tempBuffer[WORK_GROUP_SIZE];
const unsigned int thread = threadIdx.x;
double sum = 0;
for (unsigned int index = thread; index < SUM_BUFFER_SIZE; index += blockDim.x)
sum += sumBuffer[index];
tempBuffer[thread] = sum;
for (int i = 1; i < WORK_GROUP_SIZE; i *= 2) {
__syncthreads();
if (thread%(i*2) == 0 && thread+i < WORK_GROUP_SIZE)
tempBuffer[thread] += tempBuffer[thread+i];
}
if (thread == 0)
result[SUM_OUTPUT_INDEX] = tempBuffer[0];
}
extern "C" __global__ void applyPositionDeltas(real4* __restrict__ posq, real4* __restrict__ posqCorrection, mixed4* __restrict__ posDelta) {
for (unsigned int index = blockIdx.x*blockDim.x+threadIdx.x; index < NUM_ATOMS; index += blockDim.x*gridDim.x) {
#ifdef USE_MIXED_PRECISION
real4 pos1 = posq[index];
real4 pos2 = posqCorrection[index];
mixed4 pos = make_mixed4(pos1.x+(mixed)pos2.x, pos1.y+(mixed)pos2.y, pos1.z+(mixed)pos2.z, pos1.w);
#else
real4 pos = posq[index];
#endif
pos.x += posDelta[index].x;
pos.y += posDelta[index].y;
pos.z += posDelta[index].z;
#ifdef USE_MIXED_PRECISION
posq[index] = make_real4((real) pos.x, (real) pos.y, (real) pos.z, (real) pos.w);
posqCorrection[index] = make_real4(pos.x-(real) pos.x, pos.y-(real) pos.y, pos.z-(real) pos.z, 0);
#else
posq[index] = pos;
#endif
posDelta[index] = make_mixed4(0, 0, 0, 0);
}
}
extern "C" __global__ void generateRandomNumbers(int numValues, float4* __restrict__ random, uint4* __restrict__ seed) {
uint4 state = seed[blockIdx.x*blockDim.x+threadIdx.x];
unsigned int carry = 0;
for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < numValues; index += blockDim.x*gridDim.x) {
// Generate three uniform random numbers.
state.x = state.x * 69069 + 1;
state.y ^= state.y << 13;
state.y ^= state.y >> 17;
state.y ^= state.y << 5;
unsigned int k = (state.z >> 2) + (state.w >> 3) + (carry >> 2);
unsigned int m = state.w + state.w + state.z + carry;
state.z = state.w;
state.w = m;
carry = k >> 30;
float x1 = (float)max(state.x + state.y + state.w, 0x00000001u) / (float)0xffffffff;
state.x = state.x * 69069 + 1;
state.y ^= state.y << 13;
state.y ^= state.y >> 17;
state.y ^= state.y << 5;
k = (state.z >> 2) + (state.w >> 3) + (carry >> 2);
m = state.w + state.w + state.z + carry;
state.z = state.w;
state.w = m;
carry = k >> 30;
float x2 = (float)max(state.x + state.y + state.w, 0x00000001u) / (float)0xffffffff;
state.x = state.x * 69069 + 1;
state.y ^= state.y << 13;
state.y ^= state.y >> 17;
state.y ^= state.y << 5;
k = (state.z >> 2) + (state.w >> 3) + (carry >> 2);
m = state.w + state.w + state.z + carry;
state.z = state.w;
state.w = m;
carry = k >> 30;
float x3 = (float)max(state.x + state.y + state.w, 0x00000001u) / (float)0xffffffff;
// Record the values.
random[index] = make_float4(x1, x2, x3, 0.0f);
}
seed[blockIdx.x*blockDim.x+threadIdx.x] = state;
}
|
8604c466ec5249a789f76e2a1b3d2b633e2b5f47.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* @file creorderDataAndFindStartCell.cuh
* @author Kamil Szewc ([email protected])
* @since 14-12-2014
*/
#include "../sph.h"
/**
* @brief Creates array of cell start indices, cell end indices
* @param[out] cellStart Array of cell start indices
* @param[out] cellEnd Array of cell end indices
* @param[out] pSort Not used anymore
* @param[in] gridParticleHash Sorted array of grid hashes
* @param[in] gridParticleIndex Sorted array of particle indices
* @param[in] p Not used anymore
* @param[in] numParticles Number of particles
*/
__global__ void reorderDataAndFindCellStart(
uint *cellStart, // output: cell start index
uint *cellEnd, // output: cell end index
Particle *pSort, // output: reordered particle array
uint *gridParticleHash, // input: sorted grid hashes
uint *gridParticleIndex,// input: sorted particle indices
Particle *p, // input: particle array
uint numParticles) // input: number of particles
{
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
uint index = blockIdx.x*blockDim.x + threadIdx.x;
uint hash;
if (index < numParticles)
{
hash = gridParticleHash[index];
sharedHash[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0)
{
sharedHash[0] = gridParticleHash[index - 1];
}
}
__syncthreads();
if (index < numParticles)
{
if (index == 0 || hash != sharedHash[threadIdx.x])
{
cellStart[hash] = index;
if (index > 0)
{
cellEnd[sharedHash[threadIdx.x]] = index;
}
}
if (index == numParticles - 1)
{
cellEnd[hash] = index + 1;
}
}
}
| 8604c466ec5249a789f76e2a1b3d2b633e2b5f47.cu | /*
* @file creorderDataAndFindStartCell.cuh
* @author Kamil Szewc ([email protected])
* @since 14-12-2014
*/
#include "../sph.h"
/**
* @brief Creates array of cell start indices, cell end indices
* @param[out] cellStart Array of cell start indices
* @param[out] cellEnd Array of cell end indices
* @param[out] pSort Not used anymore
* @param[in] gridParticleHash Sorted array of grid hashes
* @param[in] gridParticleIndex Sorted array of particle indices
* @param[in] p Not used anymore
* @param[in] numParticles Number of particles
*/
__global__ void reorderDataAndFindCellStart(
uint *cellStart, // output: cell start index
uint *cellEnd, // output: cell end index
Particle *pSort, // output: reordered particle array
uint *gridParticleHash, // input: sorted grid hashes
uint *gridParticleIndex,// input: sorted particle indices
Particle *p, // input: particle array
uint numParticles) // input: number of particles
{
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
uint index = blockIdx.x*blockDim.x + threadIdx.x;
uint hash;
if (index < numParticles)
{
hash = gridParticleHash[index];
sharedHash[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0)
{
sharedHash[0] = gridParticleHash[index - 1];
}
}
__syncthreads();
if (index < numParticles)
{
if (index == 0 || hash != sharedHash[threadIdx.x])
{
cellStart[hash] = index;
if (index > 0)
{
cellEnd[sharedHash[threadIdx.x]] = index;
}
}
if (index == numParticles - 1)
{
cellEnd[hash] = index + 1;
}
}
}
|
ed6e771c0aaaec03ee42908496d6828e1f563ba2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cstdint>
#include <stdio.h>
#include <vector>
#include <assert.h>
#include "GlobalHeader.h"
#define FULL_MASK 0xffffffff
#define HALF_MASK 0x0f0f0f0f
#define QUARTER_MASK 0x03030303
#define SINGULAR_MASK 0x80808080
#define N_THREADS 1024
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char* file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__ __constant__
embedV_t* c_model;
__device__ __constant__
embed_t* c_norms;
//rows determined as the amount of rows in a block
// A is query vector, B is the model ( rows ), C is output matrix
__global__ void DotProduct
(const int limit, const embed_t* A, embed_t* C, unsigned int* pos, const embed_t normA) {
__shared__ embed_t fastA[numEmbeds];
const unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x < numEmbeds) {
fastA[threadIdx.x] = A[threadIdx.x]; // only one embeding is on A
}
__syncthreads();
if (id < limit) {
embed_t acum = 0;
const unsigned int row = id / 8; // Get row
const unsigned int interiorId = threadIdx.x % 8; // Get id within row
for (unsigned int i = interiorId; i < numEmbeds; i += 8) {
acum += fastA[i] * c_model[row].data[i]; // Accumulate within the accumulator
}
acum += __shfl_down_sync(FULL_MASK, acum, 4); // Reduction
acum += __shfl_down_sync(HALF_MASK, acum, 2); // Reduction
acum += __shfl_down_sync(QUARTER_MASK, acum, 1); // Reduction
if (interiorId == 0) { // Final step and write results
C[row] = acum / (normA * c_norms[row]);
pos[row] = row;
}
}
}
__global__ void FirstMerge
(const int64_t N, embed_t *sims, unsigned int* pos, const int64_t length, const int64_t pad) {
const int64_t id = blockIdx.x * blockDim.x + threadIdx.x;
const int64_t start=id*N;
const int64_t end=start+N;
if (start<length) {
// Insertion sort, as N SHOULD be small
for(int64_t i=start+1; i<end; i++)
{
if (i<length){
/*if (i >= pad || i < 0) {
printf("ERRORR1 %i\n", i);
}*/
const embed_t temp=sims[i];
const int64_t position=pos[i];
int64_t j=i-1;
while((j>=start) && (temp>sims[j]) )
{
sims[j+1]=sims[j];
pos[j+1]=pos[j];
j=j-1;
/*if (j >= pad || j < -1) {
printf("ERRORR3 %i\n", j);
}*/
}
sims[(j+1)]=temp;
pos[(j+1)]=position;
}
else if (i<pad) {
for (int64_t i=0;i<N;++i) {
/*if (id+i >= pad || id+i < -1) {
printf("ERRORR4 %i\n", i);
}*/
sims[id+i]=0;
pos[id+i]=0;
}
}
}
}
}
__global__ void BotchedMergeSort
(const int N, embed_t *sims, unsigned int* pos, const unsigned long stride) {
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
id=id*N;
unsigned int posA=0,posB=0;
if (id<stride) {
unsigned int buffPos[maxN];
embed_t buffSims[maxN];
embed_t elemA=sims[(id+stride)];
unsigned int posAuxA=pos[(id+stride)];
embed_t elemB=sims[id];
unsigned int posAuxB=pos[id];
for(unsigned int i=0;i<N;++i) {
if (posAuxA==posAuxB) {
++posA;
elemA=sims[(id+posA+stride)];
posAuxA=pos[(id+posA+stride)];
}
if (elemA>elemB && posA<N) {
++posA;
buffSims[i]=elemA;
buffPos[i]=posAuxA;
elemA=sims[(id+posA+stride)];
posAuxA=pos[(id+posA+stride)];
}
else {
++posB;
buffSims[i]=elemB;
buffPos[i]=posAuxB;
elemB=sims[id+posB];
posAuxB=pos[id+posB];
}
}
memcpy(sims + id, buffSims, N * sizeof(embed_t));
memcpy(pos + id, buffPos, N * sizeof(unsigned int));
}
}
embed_t *A_d;
embed_t *C_d;
unsigned int *positions, *pos_d;
// FUNCTIONS DEFINED IN CUDAHELP.CU
extern "C"
void reservePinnedMemory(embed_t* &ptr, size_t bytes);
extern "C"
void freePinnedMemory(void* ptr);
// Load memory into cuda constants. This memory will be freed automatically at the end of the cuda context
extern "C"
void loadModel(embed_t * norms, embedV_t * model, uint32_t numRows, uint32_t N)
{
assert(N <= maxN);
fprintf(stdout, "Reserving memory for %i rows, and N %i\n", numRows, N);
const size_t numBytesModel = sizeof(embedV_t) * numRows;
const size_t numBytesNorms = sizeof(embed_t) * numRows;
unsigned int numRowsMod=numRows;
if (numRows%N!=0) numRowsMod=(N-numRows%N)+numRows;
numRowsMod+=numRowsMod%2*N;
const unsigned int numBytesQuery = sizeof(embedV_t);
const unsigned int numBytesSims = sizeof(unsigned int) * numRowsMod;
embedV_t* modelSym;
embed_t* normsSym;
gpuErrchk(hipMalloc((embed_t**)&modelSym, numBytesModel));
gpuErrchk(hipMalloc((embed_t**)&normsSym, numBytesNorms));
gpuErrchk(hipMemcpy(modelSym, model, numBytesModel, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(normsSym, norms, numBytesNorms, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpyToSymbol(c_model, (void**)&modelSym, sizeof(modelSym)));
gpuErrchk(hipMemcpyToSymbol(c_norms, (void**)&normsSym, sizeof(normsSym)));
gpuErrchk(hipMalloc((embed_t**)&A_d, numBytesQuery));
gpuErrchk(hipMalloc((embed_t**)&C_d, numBytesSims));
gpuErrchk(hipMalloc((unsigned int**)&pos_d, numBytesSims));
{
embed_t* tmp;
static_assert(sizeof(embed_t) == sizeof(unsigned int), "the embed type needs to be of 4 bytes");
reservePinnedMemory(tmp, sizeof(embed_t) * numRowsMod);
positions = reinterpret_cast<unsigned int*>(tmp);
}
gpuErrchk(hipDeviceSynchronize());// Comment this on release
}
extern "C"
void freeModel()
{
gpuErrchk(hipFree(A_d));
gpuErrchk(hipFree(C_d));
gpuErrchk(hipFree(pos_d));
freePinnedMemory(positions);
embed_t* sym;
gpuErrchk(hipMemcpyFromSymbol(&sym, c_model, sizeof(embed_t*)));
gpuErrchk(hipFree(sym));
gpuErrchk(hipMemcpyFromSymbol(&sym, c_norms, sizeof(embed_t*)));
gpuErrchk(hipFree(sym));
}
// MAIN FUNCTION TO RUN
extern "C"
void runCuda(uint32_t numRows, embedV_t queryTerm, embed_t normA, uint32_t N, int &returnCode, std::vector<unsigned int> &res)
{
assert(N <= maxN);
unsigned int nBlocks=(numRows/128)+1;
unsigned int nBlocksOriginal=nBlocks;
float elapsedTime;
unsigned int numRowsMod=numRows;
if (numRows%N!=0) numRowsMod=(N-numRows%N)+numRows;
numRowsMod+=numRowsMod%2*N;
hipEvent_t start, stop;
const unsigned int numBytesQuery = sizeof(embedV_t);
// const unsigned int numBytesSims = sizeof(unsigned int) * numRowsMod;
gpuErrchk(hipEventCreate(&start));
gpuErrchk(hipEventCreate(&stop));
gpuErrchk(hipMemcpyAsync(A_d, queryTerm.data, numBytesQuery, hipMemcpyHostToDevice));
gpuErrchk(hipEventRecord(start, 0));
hipLaunchKernelGGL(( DotProduct), dim3(nBlocks), dim3(N_THREADS) , 0, 0, numRows*8, A_d, C_d, pos_d,normA);
gpuErrchk(hipPeekAtLastError());
//gpuErrchk(hipDeviceSynchronize());// Comment this on release
hipLaunchKernelGGL(( FirstMerge), dim3(nBlocks), dim3(N_THREADS) , 0, 0, N,C_d,pos_d,numRows,numRowsMod);
gpuErrchk(hipPeekAtLastError());
//gpuErrchk(hipDeviceSynchronize());// Comment this on release
unsigned long toReduce=((numRowsMod/N)/2);
while(toReduce>0) {
nBlocks=((toReduce*N)/ N_THREADS)+1;
hipLaunchKernelGGL(( BotchedMergeSort) , dim3(nBlocks), dim3(N_THREADS) , 0, 0, N, C_d, pos_d, toReduce * N);
gpuErrchk(hipPeekAtLastError());
//gpuErrchk(hipDeviceSynchronize()); // Comment this on release
if (toReduce>1){
toReduce+=toReduce%2;
}
toReduce=toReduce/2;
}
// Because we don't use the similarities rigt now...
// gpuErrchk(hipMemcpyAsync(similarities, C_d, sizeof(embed_t)*N, hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpyAsync(positions, pos_d, sizeof(unsigned int)*N, hipMemcpyDeviceToHost));
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "ERROR: %s \n", hipGetErrorString(error));
returnCode=1;
}
gpuErrchk(hipEventRecord(stop, 0));
gpuErrchk(hipEventSynchronize(stop));
gpuErrchk(hipEventElapsedTime(&elapsedTime, start, stop));
printf("\nSimilarities\n");
printf("Vector Size: %d\n", numRows);
printf("nThreads: %d\n", N_THREADS);
printf("nBlocks: %d\n", nBlocksOriginal+1);
printf("Total Time with Events %4.6f ms\n", elapsedTime);
printf("Bandwidth %4.3f GB/s\n", (numRows *numEmbeds* sizeof(float)) / (1000000 * elapsedTime));
res.resize(N);
for (unsigned int i=0;i<N;++i) {
res[i] = positions[i];
}
}
| ed6e771c0aaaec03ee42908496d6828e1f563ba2.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdint>
#include <stdio.h>
#include <vector>
#include <assert.h>
#include "GlobalHeader.h"
#define FULL_MASK 0xffffffff
#define HALF_MASK 0x0f0f0f0f
#define QUARTER_MASK 0x03030303
#define SINGULAR_MASK 0x80808080
#define N_THREADS 1024
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__ __constant__
embedV_t* c_model;
__device__ __constant__
embed_t* c_norms;
//rows determined as the amount of rows in a block
// A is query vector, B is the model ( rows ), C is output matrix
__global__ void DotProduct
(const int limit, const embed_t* A, embed_t* C, unsigned int* pos, const embed_t normA) {
__shared__ embed_t fastA[numEmbeds];
const unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x < numEmbeds) {
fastA[threadIdx.x] = A[threadIdx.x]; // only one embeding is on A
}
__syncthreads();
if (id < limit) {
embed_t acum = 0;
const unsigned int row = id / 8; // Get row
const unsigned int interiorId = threadIdx.x % 8; // Get id within row
for (unsigned int i = interiorId; i < numEmbeds; i += 8) {
acum += fastA[i] * c_model[row].data[i]; // Accumulate within the accumulator
}
acum += __shfl_down_sync(FULL_MASK, acum, 4); // Reduction
acum += __shfl_down_sync(HALF_MASK, acum, 2); // Reduction
acum += __shfl_down_sync(QUARTER_MASK, acum, 1); // Reduction
if (interiorId == 0) { // Final step and write results
C[row] = acum / (normA * c_norms[row]);
pos[row] = row;
}
}
}
__global__ void FirstMerge
(const int64_t N, embed_t *sims, unsigned int* pos, const int64_t length, const int64_t pad) {
const int64_t id = blockIdx.x * blockDim.x + threadIdx.x;
const int64_t start=id*N;
const int64_t end=start+N;
if (start<length) {
// Insertion sort, as N SHOULD be small
for(int64_t i=start+1; i<end; i++)
{
if (i<length){
/*if (i >= pad || i < 0) {
printf("ERRORR1 %i\n", i);
}*/
const embed_t temp=sims[i];
const int64_t position=pos[i];
int64_t j=i-1;
while((j>=start) && (temp>sims[j]) )
{
sims[j+1]=sims[j];
pos[j+1]=pos[j];
j=j-1;
/*if (j >= pad || j < -1) {
printf("ERRORR3 %i\n", j);
}*/
}
sims[(j+1)]=temp;
pos[(j+1)]=position;
}
else if (i<pad) {
for (int64_t i=0;i<N;++i) {
/*if (id+i >= pad || id+i < -1) {
printf("ERRORR4 %i\n", i);
}*/
sims[id+i]=0;
pos[id+i]=0;
}
}
}
}
}
__global__ void BotchedMergeSort
(const int N, embed_t *sims, unsigned int* pos, const unsigned long stride) {
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
id=id*N;
unsigned int posA=0,posB=0;
if (id<stride) {
unsigned int buffPos[maxN];
embed_t buffSims[maxN];
embed_t elemA=sims[(id+stride)];
unsigned int posAuxA=pos[(id+stride)];
embed_t elemB=sims[id];
unsigned int posAuxB=pos[id];
for(unsigned int i=0;i<N;++i) {
if (posAuxA==posAuxB) {
++posA;
elemA=sims[(id+posA+stride)];
posAuxA=pos[(id+posA+stride)];
}
if (elemA>elemB && posA<N) {
++posA;
buffSims[i]=elemA;
buffPos[i]=posAuxA;
elemA=sims[(id+posA+stride)];
posAuxA=pos[(id+posA+stride)];
}
else {
++posB;
buffSims[i]=elemB;
buffPos[i]=posAuxB;
elemB=sims[id+posB];
posAuxB=pos[id+posB];
}
}
memcpy(sims + id, buffSims, N * sizeof(embed_t));
memcpy(pos + id, buffPos, N * sizeof(unsigned int));
}
}
embed_t *A_d;
embed_t *C_d;
unsigned int *positions, *pos_d;
// FUNCTIONS DEFINED IN CUDAHELP.CU
extern "C"
void reservePinnedMemory(embed_t* &ptr, size_t bytes);
extern "C"
void freePinnedMemory(void* ptr);
// Load memory into cuda constants. This memory will be freed automatically at the end of the cuda context
extern "C"
void loadModel(embed_t * norms, embedV_t * model, uint32_t numRows, uint32_t N)
{
assert(N <= maxN);
fprintf(stdout, "Reserving memory for %i rows, and N %i\n", numRows, N);
const size_t numBytesModel = sizeof(embedV_t) * numRows;
const size_t numBytesNorms = sizeof(embed_t) * numRows;
unsigned int numRowsMod=numRows;
if (numRows%N!=0) numRowsMod=(N-numRows%N)+numRows;
numRowsMod+=numRowsMod%2*N;
const unsigned int numBytesQuery = sizeof(embedV_t);
const unsigned int numBytesSims = sizeof(unsigned int) * numRowsMod;
embedV_t* modelSym;
embed_t* normsSym;
gpuErrchk(cudaMalloc((embed_t**)&modelSym, numBytesModel));
gpuErrchk(cudaMalloc((embed_t**)&normsSym, numBytesNorms));
gpuErrchk(cudaMemcpy(modelSym, model, numBytesModel, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(normsSym, norms, numBytesNorms, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpyToSymbol(c_model, (void**)&modelSym, sizeof(modelSym)));
gpuErrchk(cudaMemcpyToSymbol(c_norms, (void**)&normsSym, sizeof(normsSym)));
gpuErrchk(cudaMalloc((embed_t**)&A_d, numBytesQuery));
gpuErrchk(cudaMalloc((embed_t**)&C_d, numBytesSims));
gpuErrchk(cudaMalloc((unsigned int**)&pos_d, numBytesSims));
{
embed_t* tmp;
static_assert(sizeof(embed_t) == sizeof(unsigned int), "the embed type needs to be of 4 bytes");
reservePinnedMemory(tmp, sizeof(embed_t) * numRowsMod);
positions = reinterpret_cast<unsigned int*>(tmp);
}
gpuErrchk(cudaDeviceSynchronize());// Comment this on release
}
extern "C"
void freeModel()
{
gpuErrchk(cudaFree(A_d));
gpuErrchk(cudaFree(C_d));
gpuErrchk(cudaFree(pos_d));
freePinnedMemory(positions);
embed_t* sym;
gpuErrchk(cudaMemcpyFromSymbol(&sym, c_model, sizeof(embed_t*)));
gpuErrchk(cudaFree(sym));
gpuErrchk(cudaMemcpyFromSymbol(&sym, c_norms, sizeof(embed_t*)));
gpuErrchk(cudaFree(sym));
}
// MAIN FUNCTION TO RUN
extern "C"
void runCuda(uint32_t numRows, embedV_t queryTerm, embed_t normA, uint32_t N, int &returnCode, std::vector<unsigned int> &res)
{
assert(N <= maxN);
unsigned int nBlocks=(numRows/128)+1;
unsigned int nBlocksOriginal=nBlocks;
float elapsedTime;
unsigned int numRowsMod=numRows;
if (numRows%N!=0) numRowsMod=(N-numRows%N)+numRows;
numRowsMod+=numRowsMod%2*N;
cudaEvent_t start, stop;
const unsigned int numBytesQuery = sizeof(embedV_t);
// const unsigned int numBytesSims = sizeof(unsigned int) * numRowsMod;
gpuErrchk(cudaEventCreate(&start));
gpuErrchk(cudaEventCreate(&stop));
gpuErrchk(cudaMemcpyAsync(A_d, queryTerm.data, numBytesQuery, cudaMemcpyHostToDevice));
gpuErrchk(cudaEventRecord(start, 0));
DotProduct<<<nBlocks, N_THREADS >>>(numRows*8, A_d, C_d, pos_d,normA);
gpuErrchk(cudaPeekAtLastError());
//gpuErrchk(cudaDeviceSynchronize());// Comment this on release
FirstMerge<<<nBlocks, N_THREADS >>>(N,C_d,pos_d,numRows,numRowsMod);
gpuErrchk(cudaPeekAtLastError());
//gpuErrchk(cudaDeviceSynchronize());// Comment this on release
unsigned long toReduce=((numRowsMod/N)/2);
while(toReduce>0) {
nBlocks=((toReduce*N)/ N_THREADS)+1;
BotchedMergeSort <<<nBlocks, N_THREADS >>> (N, C_d, pos_d, toReduce * N);
gpuErrchk(cudaPeekAtLastError());
//gpuErrchk(cudaDeviceSynchronize()); // Comment this on release
if (toReduce>1){
toReduce+=toReduce%2;
}
toReduce=toReduce/2;
}
// Because we don't use the similarities rigt now...
// gpuErrchk(cudaMemcpyAsync(similarities, C_d, sizeof(embed_t)*N, cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpyAsync(positions, pos_d, sizeof(unsigned int)*N, cudaMemcpyDeviceToHost));
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error));
returnCode=1;
}
gpuErrchk(cudaEventRecord(stop, 0));
gpuErrchk(cudaEventSynchronize(stop));
gpuErrchk(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("\nSimilarities\n");
printf("Vector Size: %d\n", numRows);
printf("nThreads: %d\n", N_THREADS);
printf("nBlocks: %d\n", nBlocksOriginal+1);
printf("Total Time with Events %4.6f ms\n", elapsedTime);
printf("Bandwidth %4.3f GB/s\n", (numRows *numEmbeds* sizeof(float)) / (1000000 * elapsedTime));
res.resize(N);
for (unsigned int i=0;i<N;++i) {
res[i] = positions[i];
}
}
|
713f37656933571645e80775417d274952889d48.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "__stratifycounts.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *strata = NULL;
hipMalloc(&strata, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
unsigned int *bi = NULL;
hipMalloc(&bi, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
__stratifycounts), dim3(gridBlock),dim3(threadBlock), 0, 0, strata,n,a,bi);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
__stratifycounts), dim3(gridBlock),dim3(threadBlock), 0, 0, strata,n,a,bi);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
__stratifycounts), dim3(gridBlock),dim3(threadBlock), 0, 0, strata,n,a,bi);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 713f37656933571645e80775417d274952889d48.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "__stratifycounts.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *strata = NULL;
cudaMalloc(&strata, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
unsigned int *bi = NULL;
cudaMalloc(&bi, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
__stratifycounts<<<gridBlock,threadBlock>>>(strata,n,a,bi);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
__stratifycounts<<<gridBlock,threadBlock>>>(strata,n,a,bi);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
__stratifycounts<<<gridBlock,threadBlock>>>(strata,n,a,bi);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
4f5ee6526664f0b6042bbab56dfc406b93dfe547.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef CUDA_ERR_HANDLING
#define CUDA_ERR_HANDLING
//#include <hip/hip_runtime.h>
// Inspired by
// http://stackoverflow.com/a/14038590
// Call this macro just after a kernel call to check for invalid launch arguments
#define cudaErrchkKernel() { cudaErrchkAPI( hipPeekAtLastError() ); }
// Call cudaErrchkKernelAndSync to also check for execution errors
#define cudaErrchkKernelAndSync() { cudaErrchkAPI( hipPeekAtLastError() ); cudaErrchkAPI( hipDeviceSynchronize() ); }
// Wrap any CUDA API calls with cudaErrchkAPI to check for any errors
#define cudaErrchkAPI(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#endif // CUDA_ERR_HANDLING
| 4f5ee6526664f0b6042bbab56dfc406b93dfe547.cu | #ifndef CUDA_ERR_HANDLING
#define CUDA_ERR_HANDLING
//#include <cuda.h>
// Inspired by
// http://stackoverflow.com/a/14038590
// Call this macro just after a kernel call to check for invalid launch arguments
#define cudaErrchkKernel() { cudaErrchkAPI( cudaPeekAtLastError() ); }
// Call cudaErrchkKernelAndSync to also check for execution errors
#define cudaErrchkKernelAndSync() { cudaErrchkAPI( cudaPeekAtLastError() ); cudaErrchkAPI( cudaDeviceSynchronize() ); }
// Wrap any CUDA API calls with cudaErrchkAPI to check for any errors
#define cudaErrchkAPI(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#endif // CUDA_ERR_HANDLING
|
f1c00fae05e01a7e4d14c0129446b8067a69df59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include <thrust/transform_reduce.h>
#include "tensors/tensor_operators.h"
#include "functional/functional.h"
#include "functional/tensor.h"
#include "tensors/gpu/backend.h"
#include "tensors/gpu/cuda_helpers.h"
#include "3rd_party/reduce_all.h"
namespace marian {
namespace gpu {
struct isnan_test {
__host__ __device__ bool operator()(const float a) const { return isnan(a); }
};
__device__ inline float stableLogit(float x) {
if(x >= 0) {
float z = expf(-x);
return 1.0 / (1.0 + z);
} else {
float z = expf(x);
return z / (1.0 + z);
}
}
bool IsNan(Tensor in) {
// hipSetDevice(in->getDevice().no);
// thrust::device_ptr<float> begin = thrust::device_pointer_cast(in->data());
// thrust::device_ptr<float> end
// = thrust::device_pointer_cast(in->data() + in->size());
// return thrust::transform_reduce(
// begin, end, isnan_test(), 0, thrust::plus<bool>());
return false;
}
void ConcatCont(Tensor out, const std::vector<Tensor>& inputs, int axis) {
hipSetDevice(out->getDevice().no);
int step = 1;
for(int i = 0; i < axis; ++i)
step *= out->shape()[i];
size_t offset1 = 0;
for(int i = 0; i < step; ++i) {
for(auto in : inputs) {
size_t size = in->shape().elements() / step;
size_t offset2 = i * size;
hipMemcpy(out->data() + offset1,
in->data() + offset2,
size * sizeof(float),
hipMemcpyDeviceToDevice);
offset1 += size;
}
}
hipStreamSynchronize(0);
}
template <bool add>
__global__ void gInsertCols(float* out,
const float* in,
size_t rows,
size_t cols,
size_t cols_out,
size_t cols_in,
size_t offset_out,
size_t offset_in) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols_out + offset_out;
const float* rowIn = in + j * cols_in + offset_in;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
if(add)
rowOut[i] += rowIn[i];
else
rowOut[i] = rowIn[i];
}
}
}
}
void Concatenate1(Tensor out, const std::vector<Tensor>& inputs) {
hipSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
size_t offset = 0;
int cols_out = out->shape().back();
for(auto in : inputs) {
ABORT_IF(rows != in->shape().elements() / in->shape().back(),
"First dimension must be equal");
int cols_in = in->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols_in);
hipLaunchKernelGGL(( gInsertCols<false>), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), rows, cols_in, cols_out, cols_in, offset, 0);
offset += cols_in;
}
hipStreamSynchronize(0);
}
__global__ void gJoin2(float* out, size_t rowBatch, size_t cols,
const float* in1, size_t inStride1,
const float* in2, size_t inStride2) {
int outStride = inStride1 + inStride2;
int rows = rowBatch * outStride;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols;
int curBatch = j / outStride;
int curPos = j % outStride;
int jIn1 = (curBatch * inStride1) + curPos;
int jIn2 = (curBatch * inStride2) + curPos - inStride1;
const float* rowIn1 = in1 + jIn1 * cols;
const float* rowIn2 = in2 + jIn2 * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
if(curPos < inStride1)
rowOut[i] = rowIn1[i];
else
rowOut[i] = rowIn2[i];
}
}
}
}
}
void Concatenate2(Tensor out, Tensor in1, Tensor in2) {
hipSetDevice(out->getDevice().no);
size_t rows = out->shape().elements() / out->shape().back();
size_t cols = out->shape().back();
size_t rowStride1 = in1->shape()[-2];
size_t rowStride2 = in2->shape()[-2];
size_t rowBatch = rows / out->shape()[-2];
int blocks = ::min(MAX_BLOCKS, (int)rows);
int threads = ::min(MAX_THREADS, (int)cols);
hipLaunchKernelGGL(( gJoin2), dim3(blocks), dim3(threads), 0, 0, out->data(),
rowBatch,
cols,
in1->data(),
rowStride1,
in2->data(),
rowStride2);
hipStreamSynchronize(0);
}
void Concatenate(Tensor out, const std::vector<Tensor>& inputs, int ax) {
if(ax == out->shape().size() - 1)
Concatenate1(out, inputs);
else if(ax == out->shape().size() - 2 && inputs.size() == 2)
Concatenate2(out, inputs[0], inputs[1]);
else
ConcatCont(out, inputs, ax);
}
void Split1(std::vector<Tensor>& outputs, const Tensor in) {
hipSetDevice(in->getDevice().no);
size_t offset = 0;
int rows = in->shape().elements() / in->shape().back();
int cols_in = in->shape().back();
for(auto out : outputs) {
ABORT_IF(rows != out->shape().elements() / out->shape().back(),
"First dimension must be equal");
int cols_out = out->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols_out);
hipLaunchKernelGGL(( gInsertCols<true>), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), rows, cols_out, cols_out, cols_in, 0, offset);
offset += cols_out;
}
hipStreamSynchronize(0);
}
// @TODO: this function is just a temporary fix until I come up with
// something better for the situation below.
__global__ void gAddRow(float* out, const float* in, int length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
out[index] = in[index] + out[index];
}
}
}
void SplitCont(std::vector<Tensor>& outputs, const Tensor in, int axis) {
hipSetDevice(in->getDevice().no);
int step = 1;
for(int i = 0; i < axis; ++i)
step *= in->shape()[i];
int offset1 = 0;
for(int i = 0; i < step; ++i) {
for(auto out : outputs) {
int size = out->shape().elements() / step;
int offset2 = i * size;
// BUG: this is does not add gradients
//hipMemcpyAsync(out->data() + offset2,
// in->data() + offset1,
// size * sizeof(float),
// hipMemcpyDeviceToDevice);
// @TODO: this is a quick but bad fix for the above bug
int threads = ::min(MAX_THREADS, size);
int blocks = ::min(MAX_BLOCKS, size / threads + (size % threads != 0));
hipLaunchKernelGGL(( gAddRow), dim3(blocks), dim3(threads), 0, 0, out->data() + offset2,
in->data() + offset1,
size);
offset1 += size;
}
}
hipStreamSynchronize(0);
}
void Deconcatenate(std::vector<Tensor>& outputs, const Tensor in, int ax) {
if(ax == in->shape().size() - 1)
Split1(outputs, in);
else
SplitCont(outputs, in, ax);
}
template <bool add>
__global__ void gTransposeND(
functional::Tensor<float> out,
const functional::Tensor<float> in,
const functional::Array<int, functional::Shape::size()> permute) {
constexpr size_t N = functional::Shape::size();
functional::Array<int, N> oDims;
functional::Array<int, N> pDims;
int length = out.shape().elements();
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
out.shape().dims(index, oDims);
for(int i = 0; i < N; ++i)
pDims[permute[i]] = oDims[i];
if(add)
out[index] += in[pDims];
else
out[index] = in[pDims];
}
}
}
template <bool add>
__global__ void gTranspose0213(float* out, const float* in,
int rows,
int cols,
int stride1,
int stride2) {
int stride = stride1 * stride2;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols;
int z = j / stride;
int y = (j % stride) / stride1;
int x = (j % stride) % stride1;
int j2 = z * stride + x * stride2 + y;
const float* rowIn = in + j2 * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
if(add)
rowOut[i] += rowIn[i];
else
rowOut[i] = rowIn[i];
}
}
}
}
}
void TransposeND(Tensor out, Tensor in, const std::vector<int>& vAxis) {
hipSetDevice(out->getDevice().no);
if(vAxis == std::vector<int>({0, 2, 1, 3})) {
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
int stride1 = out->shape()[-2];
int stride2 = out->shape()[-3];
hipLaunchKernelGGL(( gTranspose0213<false>), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(),
rows, cols, stride1, stride2);
}
else {
functional::Array<int, functional::Shape::size()> axes;
int diff = functional::Shape::size() - vAxis.size();
for(int i = 0; i < axes.size(); ++i)
if(i < diff)
axes[i] = i;
else
axes[i] = vAxis[i - diff] + diff;
int length = out->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
hipLaunchKernelGGL(( gTransposeND<false>), dim3(blocks), dim3(threads), 0, 0, out, in, axes);
}
}
void TransposeNDGrad(Tensor out, Tensor in, const std::vector<int>& vAxis) {
hipSetDevice(out->getDevice().no);
if(vAxis == std::vector<int>({0, 2, 1, 3})) {
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
int stride1 = out->shape()[-2];
int stride2 = out->shape()[-3];
hipLaunchKernelGGL(( gTranspose0213<true>), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(),
rows, cols, stride1, stride2);
}
else {
functional::Array<int, functional::Shape::size()> axes;
int diff = functional::Shape::size() - vAxis.size();
for(int i = 0; i < axes.size(); ++i)
if(i < diff)
axes[i] = i;
else
axes[i] = vAxis[i - diff] + diff;
int length = out->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
hipLaunchKernelGGL(( gTransposeND<true>), dim3(blocks), dim3(threads), 0, 0, out, in, axes);
}
}
__global__ void gSoftmax(float* out,
functional::Shape outShape,
const float* in,
const float* mask,
const functional::Shape maskShape) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
bool broadcast = outShape != maskShape;
functional::Array<int, functional::Shape::size()> dims;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = -CUDA_FLT_MAX; // mask
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float mVal = 1.f;
if(mask) {
int mIndex = id + j * cols;
if(broadcast) {
outShape.dims(mIndex, dims);
mIndex = maskShape.bindex(dims);
}
mVal = mask[mIndex];
}
if(mVal && sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float mVal = 1.f;
if(mask) {
int mIndex = id + j * cols;
if(broadcast) {
outShape.dims(mIndex, dims);
mIndex = maskShape.bindex(dims);
}
mVal = mask[mIndex];
}
float ex = 0;
if(mVal)
ex = __expf(sp[id] - max);
so[id] = ex;
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
so[id] = so[id] / _sum[0];
}
}
}
}
}
void Softmax(Tensor out, Tensor in, Tensor mask) {
hipSetDevice(out->getDevice().no);
size_t m = out->shape().elements() / out->shape().back();
size_t k = out->shape().back();
int blocks = ::min(MAX_BLOCKS, (int)m);
int threads = ::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
if(mask)
hipLaunchKernelGGL(( gSoftmax), dim3(blocks), dim3(threads), shared, 0,
out->data(), out->shape(), in->data(), mask->data(), mask->shape());
else
hipLaunchKernelGGL(( gSoftmax), dim3(blocks), dim3(threads), shared, 0,
out->data(), out->shape(), in->data(), 0, out->shape());
}
__global__ void gLogSoftmax(float* out,
const functional::Shape outShape,
const float* in) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float sm = sp[id] - max;
float ex = __expf(sm);
so[id] = sm;
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols)
so[id] -= __logf(_sum[0]);
}
}
}
}
void LogSoftmax(Tensor out, Tensor in) {
hipSetDevice(out->getDevice().no);
size_t m = out->shape().elements() / out->shape().back();
size_t k = out->shape().back();
int blocks = ::min(MAX_BLOCKS, (int)m);
int threads = ::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gLogSoftmax), dim3(blocks), dim3(threads), shared, 0,
out->data(), out->shape(), in->data());
}
///////////////////////////////////////////////////////
__global__ void gSoftmaxGrad(float* grad,
const float* adj,
const float* val,
const int rows,
const int cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
float* gradRow = grad + j * cols;
const float* adjRow = adj + j * cols;
const float* valRow = val + j * cols;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += valRow[id] * adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float val = valRow[id] * (adjRow[id] - _sum[0]);
if(val)
gradRow[id] += val;
}
}
}
}
}
void SoftmaxGrad(Tensor grad, Tensor adj, Tensor val) {
hipSetDevice(adj->getDevice().no);
// grad and val are both m-by-k matrices, passed as input.
// A weighted average of each row of grad (according to the weights
// specified in val) is computed and subtracted from Out.
// adj is multiplied for each element to get backward step in autodiff
int m = grad->shape().elements() / grad->shape().back();
int k = grad->shape().back();
int blocks = ::min(MAX_BLOCKS, m);
int threads = ::min(MAX_THREADS, k);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gSoftmaxGrad), dim3(blocks), dim3(threads), shared, 0,
grad->data(), adj->data(), val->data(), m, k);
}
__global__ void gLogSoftmaxGrad(float* grad,
const float* adj,
const float* val,
const int rows,
const int cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
float* gradRow = grad + j * cols;
const float* adjRow = adj + j * cols;
const float* valRow = val + j * cols;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols)
gradRow[id] += adjRow[id] - (expf(valRow[id]) * _sum[0]);
}
}
}
}
void LogSoftmaxGrad(Tensor grad, Tensor adj, Tensor val) {
hipSetDevice(adj->getDevice().no);
// grad and val are both m-by-k matrices, passed as input.
// A weighted average of each row of grad (according to the weights
// specified in val) is computed and subtracted from Out.
// adj is multiplied for each element to get backward step in autodiff
int m = grad->shape().elements() / grad->shape().back();
int k = grad->shape().back();
int blocks = ::min(MAX_BLOCKS, m);
int threads = ::min(MAX_THREADS, k);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gLogSoftmaxGrad), dim3(blocks), dim3(threads), shared, 0,
grad->data(), adj->data(), val->data(), m, k);
}
///////////////////////////////////////////////////////
__global__ void gArgmax(float* out,
const float* data,
size_t rows,
size_t cols) {
size_t row = blockIdx.x;
size_t startInd = row * cols;
float maxScore = -99999;
size_t maxInd;
for(size_t col = 0; col < cols; ++col) {
size_t ind = startInd + col;
float score = data[ind];
if(score > maxScore) {
maxScore = score;
maxInd = col;
}
}
out[row] = maxInd;
}
///////////////////////////////////////////////////////
__global__ void gCopyRows(float* out,
const float* in,
size_t cols,
const size_t* sourceRowIdx,
size_t rows) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
size_t dstId = j;
size_t srcId = sourceRowIdx[j];
float* rowOut = out + dstId * cols;
const float* rowIn = in + srcId * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
rowOut[i] = rowIn[i];
}
}
}
}
void CopyRows(Tensor out, const Tensor in, const std::vector<size_t>& indices) {
hipSetDevice(out->getDevice().no);
size_t cols = in->shape().back();
size_t rowsToCopy = indices.size();
int threads = ::min(MAX_THREADS, (int)cols);
int blocks = ::min(MAX_BLOCKS, (int)rowsToCopy);
size_t* d_indices;
CUDA_CHECK(hipMalloc(&d_indices, rowsToCopy * sizeof(size_t)));
CUDA_CHECK(hipMemcpy(d_indices,
indices.data(),
rowsToCopy * sizeof(size_t),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gCopyRows), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), cols, d_indices, rowsToCopy);
CUDA_CHECK(hipFree(d_indices));
}
__global__ void gPasteRows(float* out,
const float* in,
size_t cols,
const size_t* targetRowIdx,
size_t rows) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
size_t dstId = targetRowIdx[j];
size_t srcId = j;
float* rowOut = out + dstId * cols;
const float* rowIn = in + srcId * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
atomicAdd(rowOut + i, rowIn[i]);
}
}
}
}
void PasteRows(Tensor out,
const Tensor in,
const std::vector<size_t>& indices) {
hipSetDevice(out->getDevice().no);
size_t cols = in->shape().back();
size_t rowsToCopy = indices.size();
int threads = ::min(MAX_THREADS, (int)cols);
int blocks = ::min(MAX_BLOCKS, (int)rowsToCopy);
// @TODO: turn into tensor
size_t* d_indices;
CUDA_CHECK(hipMalloc(&d_indices, rowsToCopy * sizeof(size_t)));
CUDA_CHECK(hipMemcpy(d_indices,
indices.data(),
rowsToCopy * sizeof(size_t),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gPasteRows), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), cols, d_indices, rowsToCopy);
CUDA_CHECK(hipFree(d_indices));
}
/////////////
__global__ void gCopyCols(float* out,
const float* in,
size_t rows,
size_t colsIn,
const size_t* sourceColIdx,
size_t colsOut) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* rowIn = in + j * colsIn;
float* rowOut = out + j * colsOut;
for(int tid = 0; tid < colsOut; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < colsOut)
rowOut[i] = rowIn[sourceColIdx[i]];
}
}
}
}
void CopyCols(Tensor out, const Tensor in, const std::vector<size_t>& indices) {
hipSetDevice(out->getDevice().no);
size_t rows = in->shape().elements() / in->shape().back();
size_t cols = in->shape().back();
size_t colsToCopy = indices.size();
int threads = ::min(MAX_THREADS, (int)colsToCopy);
int blocks = ::min(MAX_BLOCKS, (int)rows);
size_t* d_indices;
CUDA_CHECK(hipMalloc(&d_indices, colsToCopy * sizeof(size_t)));
CUDA_CHECK(hipMemcpy(d_indices,
indices.data(),
colsToCopy * sizeof(size_t),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gCopyCols), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), rows, cols, d_indices, colsToCopy);
CUDA_CHECK(hipFree(d_indices));
}
__global__ void gPasteCols(float* out,
const float* in,
size_t rows,
size_t colsOut,
const size_t* targetColIdx,
size_t colsIn) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* rowIn = in + j * colsIn;
float* rowOut = out + j * colsOut;
for(int tid = 0; tid < colsIn; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < colsIn)
rowOut[targetColIdx[i]] += rowIn[i];
}
}
}
}
void PasteCols(Tensor out,
const Tensor in,
const std::vector<size_t>& indices) {
hipSetDevice(out->getDevice().no);
size_t rows = in->shape().elements() / in->shape().back();
size_t cols = in->shape().back();
size_t colsToCopy = indices.size();
int threads = ::min(MAX_THREADS, (int)colsToCopy);
int blocks = ::min(MAX_BLOCKS, (int)rows);
size_t* d_indices;
CUDA_CHECK(hipMalloc(&d_indices, colsToCopy * sizeof(size_t)));
CUDA_CHECK(hipMemcpy(d_indices,
indices.data(),
colsToCopy * sizeof(size_t),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gPasteCols), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), rows, cols, d_indices, colsToCopy);
CUDA_CHECK(hipFree(d_indices));
}
__global__ void gSelect(float* out,
functional::Shape outShape,
const float* in,
const functional::Shape inShape,
int axis,
size_t* d_indices) {
int length = outShape.elements();
functional::Array<int, functional::Shape::size()> dims;
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
outShape.dims(index, dims);
dims[axis] = d_indices[dims[axis]];
int inIndex = inShape.index(dims);
out[index] = in[inIndex];
}
}
}
__global__ void gInsert(float* out,
functional::Shape outShape,
const float* in,
const functional::Shape inShape,
int axis,
size_t* d_indices) {
int length = inShape.elements();
functional::Array<int, functional::Shape::size()> dims;
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
inShape.dims(index, dims);
dims[axis] = d_indices[dims[index]];
int outIndex = outShape.index(dims);
out[outIndex] += in[index];
}
}
}
void Select(Tensor out,
const Tensor in,
int axis,
const std::vector<size_t>& indices,
Ptr<Allocator> allocator) {
hipSetDevice(out->getDevice().no);
int length = out->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
auto mp_indices = allocator->alloc<size_t>(indices.size());
CudaCopy(indices.data(),
indices.data() + indices.size(),
mp_indices->data<size_t>());
int axisGPU = axis + functional::Shape::size() - out->shape().size();
hipLaunchKernelGGL(( gSelect), dim3(blocks), dim3(threads), 0, 0, out->data(),
out->shape(),
in->data(),
in->shape(),
axisGPU,
mp_indices->data<size_t>());
allocator->free(mp_indices);
}
void Insert(Tensor out,
const Tensor in,
int axis,
const std::vector<size_t>& indices,
Ptr<Allocator> allocator) {
hipSetDevice(in->getDevice().no);
int length = in->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
auto mp_indices = allocator->alloc<size_t>(indices.size());
CudaCopy(indices.data(),
indices.data() + indices.size(),
mp_indices->data<size_t>());
int axisGPU = axis + functional::Shape::size() - out->shape().size();
hipLaunchKernelGGL(( gInsert), dim3(blocks), dim3(threads), 0, 0, out->data(),
out->shape(),
in->data(),
in->shape(),
axisGPU,
mp_indices->data<size_t>());
allocator->free(mp_indices);
}
__global__ void gGRUFastForward(float* out,
const float* state,
const float* xW,
const float* sU,
const float* b,
const float* mask,
size_t rows,
size_t cols,
bool final) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOut = out + j * cols;
const float* rowState = state + j * cols;
const float* xWrow = xW + j * cols * 3;
const float* sUrow = sU + j * cols * 3;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float r = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float z = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float h;
if(final)
h = tanhf(xWrow[l] + (sUrow[l] + b[l]) * r);
else
h = tanhf(xWrow[l] + sUrow[l] * r + b[l]);
float out = (1.0f - z) * h + z * rowState[i];
rowOut[i] = m * out + (1 - m) * rowState[i];
}
}
}
}
}
void GRUFastForward(Tensor out, std::vector<Tensor> inputs, bool final) {
hipSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gGRUFastForward), dim3(blocks), dim3(threads), 0, 0,
out->data(), // output
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
rows,
cols,
final);
}
__global__ void gGRUFastBackward(float* outState,
float* outXW,
float* outSU,
float* outB,
const float* state,
const float* xW,
const float* sU,
const float* b,
const float* mask,
const float* adj,
size_t rows,
size_t cols,
bool final) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOutState = outState + j * cols;
float* rowOutXW = outXW + j * cols * 3;
float* rowOutSU = outSU + j * cols * 3;
const float* rowState = state + j * cols;
const float* rowXW = xW + j * cols * 3;
const float* rowSU = sU + j * cols * 3;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + cols;
int l = i + 2 * cols;
float r = stableLogit(rowXW[i] + rowSU[i] + b[i]);
float z = stableLogit(rowXW[k] + rowSU[k] + b[k]);
float h;
if(final)
h = tanhf(rowXW[l] + (rowSU[l] + b[l]) * r);
else
h = tanhf(rowXW[l] + rowSU[l] * r + b[l]);
float adj = rowAdj[i];
float t = (1 - z) * (1 - h * h);
// df/ds
if(outState)
rowOutState[i] += (m * z - m + 1) * adj;
// df/d(xW_r) ...
float dfdxW_r = m * r * (1 - r) * t * adj;
if(final)
dfdxW_r *= rowSU[l] + b[l];
else
dfdxW_r *= rowSU[l];
if(outXW)
rowOutXW[i] += dfdxW_r;
if(outSU)
rowOutSU[i] += dfdxW_r;
if(outB)
atomicAdd(outB + i, dfdxW_r);
// df/d(xW_z) ...
float dfdxW_z = m * (1 - z) * z * (rowState[i] - h) * adj;
if(outXW)
rowOutXW[k] += dfdxW_z;
if(outSU)
rowOutSU[k] += dfdxW_z;
if(outB)
atomicAdd(outB + k, dfdxW_z);
// df/d(xW_x) ...
float dfdxW_x = m * t * adj;
if(outXW)
rowOutXW[l] += dfdxW_x;
if(outSU)
rowOutSU[l] += dfdxW_x * r;
if(outB)
if(final)
atomicAdd(outB + l, dfdxW_x * r);
else
atomicAdd(outB + l, dfdxW_x);
}
}
}
}
}
void GRUFastBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj,
bool final) {
hipSetDevice(adj->getDevice().no);
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gGRUFastBackward), dim3(blocks), dim3(threads), 0, 0,
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
adj->data(),
rows,
cols,
final);
}
__global__ void gCrossEntropyPick(float* out,
const functional::Shape outShape,
const float* in,
const functional::Shape inShape,
const float* pick) {
int rows = inShape.elements() / inShape.back();
int cols = inShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 1; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += __expf(sp[id] - max);
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
// cross-entropy
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id == (int)pick[j]) {
out[j] = __logf(_sum[0]) - sp[id] + max;
}
}
}
}
}
void CrossEntropyPick(Tensor out, Tensor in, Tensor pick) {
hipSetDevice(out->getDevice().no);
int rows = in->shape().elements() / in->shape().back();
int cols = in->shape().back();
int blocks = ::min(MAX_BLOCKS, (int)rows);
int threads = ::min(MAX_THREADS, (int)cols);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gCrossEntropyPick), dim3(blocks), dim3(threads), shared, 0,
out->data(), out->shape(), in->data(), in->shape(), pick->data());
}
__global__ void gCrossEntropyPickBackward(float* out,
const functional::Shape outShape,
const float* adj,
const float* in,
const float* pick) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* sp = in + j * cols;
float* so = out + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 1; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = __expf(sp[id] - max);
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
// cross-entropy
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float sub = (float)(id == (int)pick[j]);
so[id] += adj[j] * (__expf(sp[id] - max) / _sum[0] - sub);
}
}
}
}
}
void CrossEntropyPickBackward(Tensor out, Tensor adj, Tensor a, Tensor pick) {
hipSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = ::min(MAX_BLOCKS, (int)rows);
int threads = ::min(MAX_THREADS, (int)cols);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gCrossEntropyPickBackward), dim3(blocks), dim3(threads), shared, 0,
out->data(), out->shape(), adj->data(), a->data(), pick->data());
}
float L2Norm(Tensor in) {
hipSetDevice(in->getDevice().no);
int size = in->shape().elements();
int threads = ::min(MAX_THREADS, size);
int blocks = ::min(MAX_BLOCKS, size / threads + (size % threads != 0));
uint8_t* data;
hipMalloc(&data, blocks * sizeof(float));
Tensor out(new TensorBase(New<MemoryPiece>(data, blocks * sizeof(float)),
{1, blocks},
in->getBackend()));
using namespace functional;
ReduceAll(_1 * _1, out, in);
float dataCpu = sqrtf(out->get(0));
out.reset();
hipFree(data);
return dataCpu;
}
__global__ void gAtt(float* out,
const float* va,
const float* ctx,
const float* state,
int m, // total rows (batch x time x beam)
int k, // depth
int b, // batch size
int t // time of ctx
) {
int rows = m;
int cols = k;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* vaRow = va;
const float* ctxRow = ctx + (j % (b * t)) * cols;
const float* stateRow = state + ((j / (b * t)) * b + j % b) * cols;
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float z = ctxRow[id] + stateRow[id];
float ex = tanhf(z) * vaRow[id];
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
out[j] = _sum[0];
__syncthreads();
}
}
}
void Att(Tensor out, Tensor va, Tensor context, Tensor state) {
hipSetDevice(out->getDevice().no);
size_t m = out->shape().elements() / out->shape().back();
size_t k = context->shape()[-1];
size_t b = context->shape()[-2];
size_t t = context->shape()[-3];
int blocks = ::min(MAX_BLOCKS, (int)m);
int threads = ::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gAtt), dim3(blocks), dim3(threads), shared, 0,
out->data(), va->data(), context->data(), state->data(), m, k, b, t);
}
__global__ void gAttBack(float* gVa,
float* gContext,
float* gState,
const float* va,
const float* context,
const float* state,
const float* adj,
int m, // rows
int k, // cols
int n // batch size
) {
int rows = m;
int cols = k;
for(int bid = 0; bid < m; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* gcRow = gContext + j * cols;
float* gsRow = gState + (j % n) * cols;
const float* cRow = context + j * cols;
const float* sRow = state + (j % n) * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float z = cRow[id] + sRow[id];
float t = tanhf(z);
float r = va[id] * (1.f - t * t);
gcRow[id] += r * adj[j];
gsRow[id] += r * adj[j];
atomicAdd(gVa + id, t * adj[j]);
}
}
}
}
}
void AttBack(Tensor gVa,
Tensor gContext,
Tensor gState,
Tensor va,
Tensor context,
Tensor state,
Tensor adj) {
hipSetDevice(adj->getDevice().no);
size_t m = adj->shape().elements() / adj->shape()[-1];
size_t k = context->shape()[-1];
size_t n = context->shape()[-2];
int blocks = ::min(MAX_BLOCKS, (int)n);
int threads = ::min(MAX_THREADS, (int)k);
hipLaunchKernelGGL(( gAttBack), dim3(blocks), dim3(threads), 0, 0, gVa->data(),
gContext->data(),
gState->data(),
va->data(),
context->data(),
state->data(),
adj->data(),
m,
k,
n);
}
__global__ void gLNormalization(float* out,
const float* in,
const float* alpha,
const float* beta,
int rows,
int cols,
float eps = 1e-9) {
extern __shared__ float _share[];
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0f;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float mean = _sum[0] / cols;
__syncthreads();
float* _sqSum = _share + blockDim.x;
_sqSum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = sp[id] - mean;
_sqSum[threadIdx.x] += ex * ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sqSum[threadIdx.x] += _sqSum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
float sigma = sqrtf(eps + (_sqSum[0] / cols));
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float t = alpha[id] * ((sp[id] - mean) / sigma);
if(beta != nullptr)
t += beta[id];
so[id] = t;
}
}
}
}
}
void LayerNormalization(Tensor out,
Tensor in,
Tensor gamma,
Tensor beta,
float eps) {
hipSetDevice(out->getDevice().no);
int rows = in->shape().elements() / in->shape().back();
int cols = in->shape().back();
int blocks = ::min(MAX_BLOCKS, (int)rows);
int threads = ::min(MAX_THREADS, (int)cols);
int shared = 2 * threads * sizeof(float);
hipLaunchKernelGGL(( gLNormalization), dim3(blocks), dim3(threads), shared, 0, out->data(),
in->data(),
gamma->data(),
beta ? beta->data() : nullptr,
rows,
cols,
eps);
}
__global__ void gLayerNormalizationGrad(float* gradX,
float* gradGamma,
float* gradBeta,
float* adj,
float* y,
float* x,
float* gamma,
float* beta,
int rows,
int cols,
float eps = 1e-9) {
extern __shared__ float shared[];
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* sum_adj = shared;
float* sum_adj_x = shared + blockDim.x;
float* sum_x = shared + 2 * blockDim.x;
float* sum_sqr = shared + 3 * blockDim.x;
const float* xRow = x + j * cols;
const float* yRow = y + j * cols;
const float* adjRow = adj + j * cols;
float* gradXRow = gradX + j * cols;
sum_x[threadIdx.x] = 0.0f;
sum_adj[threadIdx.x] = 0.0f;
sum_adj_x[threadIdx.x] = 0.0f;
sum_sqr[threadIdx.x] = 0.0f;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
sum_x[threadIdx.x] += xRow[id];
sum_adj_x[threadIdx.x]
+= adjRow[id] * (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id];
sum_adj[threadIdx.x] += adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
sum_x[threadIdx.x] += sum_x[threadIdx.x + skip];
sum_adj[threadIdx.x] += sum_adj[threadIdx.x + skip];
sum_adj_x[threadIdx.x] += sum_adj_x[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float mean = sum_x[0] / cols;
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = xRow[id] - mean;
sum_sqr[threadIdx.x] += ex * ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
sum_sqr[threadIdx.x] += sum_sqr[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
float sigma = sqrtf(eps + (sum_sqr[0] / cols));
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float grad_x = 0.0f;
float x_hat = (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id];
grad_x += cols * adjRow[id];
grad_x -= sum_adj[0];
grad_x -= sum_adj_x[0] * x_hat;
grad_x /= (cols * sigma);
float valX = gamma[id] * grad_x;
float sign = (0.f < valX) - (valX < 0.f);
valX = fabs(valX) > 1000 ? sign * 1000 : valX;
gradXRow[id] += valX;
atomicAdd(gradGamma + id, adjRow[id] * x_hat);
if(beta) {
atomicAdd(gradBeta + id, adjRow[id]);
}
}
}
}
}
}
void LayerNormalizationGrad(Tensor gradX,
Tensor gradGamma,
Tensor gradBeta,
Tensor adj,
Tensor y,
Tensor x,
Tensor gamma,
Tensor beta,
float eps) {
hipSetDevice(adj->getDevice().no);
int rows = y->shape().elements() / y->shape()[-1];
int cols = y->shape()[-1];
int threads = ::min(MAX_THREADS, cols);
int blocks = ::min(MAX_BLOCKS, rows);
int shared = sizeof(float) * threads * 4;
hipLaunchKernelGGL(( gLayerNormalizationGrad), dim3(blocks), dim3(threads), shared, 0,
gradX->data(),
gradGamma->data(),
(gradBeta) ? gradBeta->data() : nullptr,
adj->data(),
y->data(),
x->data(),
gamma->data(),
(beta) ? beta->data() : nullptr,
rows,
cols,
eps);
}
template <bool add>
__global__ void gShift(float* out, const float* in, int length, int offset) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
if(add) {
if(index - offset >= 0 && index - offset < length)
out[index] += in[index - offset];
}
else {
if(index - offset < 0 || index - offset >= length)
out[index] = 0;
else
out[index] = in[index - offset];
}
}
}
}
void Shift(Tensor out, Tensor in, marian::Shape shift, bool invert) {
ABORT_IF(in->shape().size() != shift.size(), "bad dimensions");
// BUGBUG: This can only shift along the first axis. Shifting, e.g., along the last axis cannot be implemented this way.
int offset = 0;
for(int i = 0; i < shift.size(); ++i)
offset += in->shape().stride(i) * shift[i];
if(invert)
offset = -offset;
hipSetDevice(out->getDevice().no);
int length = out->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
hipLaunchKernelGGL(( gShift<false>), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), length, offset);
}
void ShiftGrad(Tensor out, Tensor in, marian::Shape shift, bool invert) {
ABORT_IF(in->shape().size() != shift.size(), "bad dimensions");
// BUGBUG: This can only shift along the first axis. Shifting, e.g., along the last axis cannot be implemented this way.
int offset = 0;
for(int i = 0; i < shift.size(); ++i)
offset += in->shape().stride(i) * shift[i];
if(invert)
offset = -offset;
hipSetDevice(out->getDevice().no);
int length = out->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
hipLaunchKernelGGL(( gShift<true>), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), length, offset);
}
__global__ void gSetSparse(float* out,
const size_t* indices,
const float* values,
int length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
out[indices[index]] = values[index];
}
}
}
void SetSparse(float* out,
const std::vector<size_t>& indices,
const std::vector<float>& values) {
int length = indices.size();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
size_t* d_indices;
CUDA_CHECK(hipMalloc(&d_indices, length * sizeof(size_t)));
CUDA_CHECK(hipMemcpy(d_indices,
indices.data(),
length * sizeof(size_t),
hipMemcpyHostToDevice));
float* d_values;
CUDA_CHECK(hipMalloc(&d_values, length * sizeof(float)));
CUDA_CHECK(hipMemcpy(
d_values, values.data(), length * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gSetSparse), dim3(blocks), dim3(threads), 0, 0, out, d_indices, d_values, length);
hipFree(d_indices);
hipFree(d_values);
}
/******************************************************************************/
__global__ void gLSTMCellForward(float* out,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* mask,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOut = out + j * cols;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float gc = tanhf(xWrow[l] + sUrow[l] + b[l]);
float cout = gf * rowCell[i] + gi * gc;
rowOut[i] = m * cout + (1 - m) * rowCell[i];
}
}
}
}
}
void LSTMCellForward(Tensor out, std::vector<Tensor> inputs) {
hipSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gLSTMCellForward), dim3(blocks), dim3(threads), 0, 0,
out->data(), // output
inputs[0]->data(), // cell state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
rows,
cols);
}
__global__ void gLSTMOutputForward(float* out,
const float* cell,
const float* xW,
const float* sU,
const float* b,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + 3 * cols;
float go = stableLogit(xWrow[k] + sUrow[k] + b[k]);
rowOut[i] = go * tanhf(rowCell[i]);
}
}
}
}
}
void LSTMOutputForward(Tensor out, std::vector<Tensor> inputs) {
hipSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gLSTMOutputForward), dim3(blocks), dim3(threads), 0, 0, out->data(), // output
inputs[0]->data(), // cell state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
rows,
cols);
}
__global__ void gLSTMCellBackward(float* outCell,
float* outXW,
float* outSU,
float* outB,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* mask,
const float* adj,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOutCell = outCell + j * cols;
float* rowOutXW = outXW + j * cols * 4;
float* rowOutSU = outSU + j * cols * 4;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float gc = tanhf(xWrow[l] + sUrow[l] + b[l]);
float adj = rowAdj[i];
// dc/dc_{t-1}
if(outCell)
rowOutCell[i] += (m * gf - m + 1) * adj;
// dc/d(b_f) = dc/d(xW_f) ...
float dcdxf = m * rowCell[i] * gf * (1 - gf) * adj;
if(outXW)
rowOutXW[i] += dcdxf;
if(outSU)
rowOutSU[i] += dcdxf;
if(outB)
atomicAdd(outB + i, dcdxf);
// dc/d(b_i) ...
float dcdb_i = m * gc * gi * (1 - gi) * adj;
if(outXW)
rowOutXW[k] += dcdb_i;
if(outSU)
rowOutSU[k] += dcdb_i;
if(outB)
atomicAdd(outB + k, dcdb_i);
// dc/d(b_c) ...
float dcdxc = m * gi * (1 - gc * gc) * adj;
if(outXW)
rowOutXW[l] += dcdxc;
if(outSU)
rowOutSU[l] += dcdxc;
if(outB)
atomicAdd(outB + l, dcdxc);
}
}
}
}
}
void LSTMCellBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj) {
hipSetDevice(adj->getDevice().no);
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gLSTMCellBackward), dim3(blocks), dim3(threads), 0, 0,
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
adj->data(),
rows,
cols);
}
__global__ void gLSTMOutputBackward(float* outCell,
float* outXW,
float* outSU,
float* outB,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* adj,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOutCell = outCell + j * cols;
float* rowOutXW = outXW + j * cols * 4;
float* rowOutSU = outSU + j * cols * 4;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + 3 * cols;
float go = stableLogit(xWrow[k] + sUrow[k] + b[k]);
float t = tanhf(rowCell[i]);
float adj = rowAdj[i];
// dc/dc_{t-1}
if(outCell)
rowOutCell[i] += go * (1 - t * t) * adj;
// dc/d(b_o) = dc/d(xW_f) ...
float dcdxo = t * go * (1 - go) * adj;
if(outXW)
rowOutXW[k] += dcdxo;
if(outSU)
rowOutSU[k] += dcdxo;
if(outB)
atomicAdd(outB + k, dcdxo);
}
}
}
}
}
void LSTMOutputBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj) {
hipSetDevice(adj->getDevice().no);
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gLSTMOutputBackward), dim3(blocks), dim3(threads), 0, 0,
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
adj->data(),
rows,
cols);
}
__global__ void gHighwayForward(float* out,
const float* in1,
const float* in2,
const float* t,
size_t length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
float sigma = stableLogit(t[index]);
out[index] = in1[index] * sigma + in2[index] * (1.f - sigma);
}
}
}
void HighwayForward(Tensor out,
const Tensor in1,
const Tensor in2,
const Tensor t) {
hipSetDevice(out->getDevice().no);
int length = out->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
hipLaunchKernelGGL(( gHighwayForward), dim3(blocks), dim3(threads), 0, 0,
out->data(), in1->data(), in2->data(), t->data(), length);
}
__global__ void gHighwayBackward(float* out1,
float* out2,
float* outt,
const float* in1,
const float* in2,
const float* t,
const float* adj,
size_t length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
float sigma = stableLogit(t[index]);
out1[index] = sigma * adj[index];
out2[index] = (1.f - sigma) * adj[index];
outt[index]
= sigma * (1.f - sigma) * (in1[index] - in2[index]) * adj[index];
}
}
}
void HighwayBackward(Tensor out1,
Tensor out2,
Tensor outt,
const Tensor in1,
const Tensor in2,
const Tensor t,
const Tensor adj) {
hipSetDevice(out1->getDevice().no);
int length = out1->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
hipLaunchKernelGGL(( gHighwayBackward), dim3(blocks), dim3(threads), 0, 0, out1->data(),
out2->data(),
outt->data(),
in1->data(),
in2->data(),
t->data(),
adj->data(),
length);
}
__global__ void gMaxPoolingForward(float* out,
int outRows,
int outCols,
float* in,
int inRows,
int inCols,
float* mask,
int numKernels,
int maskCols,
int width,
int lastWidth) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= outRows * outCols)
return;
int rowId = tid / outRows;
int colId = tid % outRows;
float* b = in + (rowId * inCols) + (colId * width);
float* localMask = mask + (rowId / numKernels) * maskCols + colId * width;
if(colId == outRows - 1) {
width = lastWidth;
}
float currentMax = b[0] * localMask[0];
for(int i = 1; i < width; ++i) {
if(b[i] * localMask[i] > currentMax) {
currentMax = b[i] * localMask[i];
}
}
out[rowId + (colId * outCols)] = currentMax;
}
void PoolingWithMaskingForward(Tensor out,
Tensor in,
Tensor mask,
int width,
bool isEven) {
int n = out->shape().elements();
int threads = ::min(n, MAX_THREADS);
int blocks = n / threads + (n % threads != 0);
auto& inShape = in->shape();
int inRows = inShape[0] * inShape[1];
int inCols = inShape[2];
auto& outShape = out->shape();
int outRows = outShape[2];
int outCols = outShape[0] * outShape[1];
int lastWidth
= ((inCols - isEven) % width == 0) ? width : (inCols - isEven) % width;
hipLaunchKernelGGL(( gMaxPoolingForward), dim3(blocks), dim3(threads), 0, 0, out->data(),
outRows,
outCols,
in->data(),
inRows,
inCols,
mask->data(),
outShape[1],
mask->shape()[2],
width,
lastWidth);
}
__global__ void gMaxPoolingBackward(float* adj,
int adjRows,
int adjCols,
float* in,
float* adjIn,
int inRows,
int inCols,
float* mask,
int numKernels,
int maskCols,
int width,
int lastWidth) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= adjRows * adjCols)
return;
int rowId = tid / adjRows;
int colId = tid % adjRows;
float* b = in + (rowId * inCols) + (colId * width);
if(colId == adjRows - 1) {
width = lastWidth;
}
float* localMask = mask + (rowId / numKernels) * maskCols + colId * width;
size_t currentMaxIdx = 0;
for(int i = 1; i < width; ++i) {
if(b[i] * localMask[i] > b[currentMaxIdx] * localMask[currentMaxIdx]) {
currentMaxIdx = i;
}
}
adjIn[(rowId * inCols) + (colId * width) + currentMaxIdx]
+= adj[rowId + (colId * adjCols)];
}
void PoolingWithMaskingBackward(Tensor adj,
Tensor adjIn,
Tensor in,
Tensor mask,
int width,
bool isEven) {
int n = adj->shape().elements();
int threads = ::min(n, 512);
int blocks = n / threads + (n % threads != 0);
auto& inShape = in->shape();
int inRows = inShape[0] * inShape[1];
int inCols = inShape[2];
auto& adjShape = adj->shape();
int adjRows = adjShape[2];
int adjCols = adjShape[0] * adjShape[1];
int lastWidth
= ((inCols - isEven) % width == 0) ? width : (inCols - isEven) % width;
hipLaunchKernelGGL(( gMaxPoolingBackward), dim3(blocks), dim3(threads), 0, 0, adj->data(),
adjRows,
adjCols,
in->data(),
adjIn->data(),
inRows,
inCols,
mask->data(),
adjShape[1],
mask->shape()[2],
width,
lastWidth);
}
}
} // namespace marian
| f1c00fae05e01a7e4d14c0129446b8067a69df59.cu | //#include <thrust/transform_reduce.h>
#include "tensors/tensor_operators.h"
#include "functional/functional.h"
#include "functional/tensor.h"
#include "tensors/gpu/backend.h"
#include "tensors/gpu/cuda_helpers.h"
#include "3rd_party/reduce_all.h"
namespace marian {
namespace gpu {
struct isnan_test {
__host__ __device__ bool operator()(const float a) const { return isnan(a); }
};
__device__ inline float stableLogit(float x) {
if(x >= 0) {
float z = expf(-x);
return 1.0 / (1.0 + z);
} else {
float z = expf(x);
return z / (1.0 + z);
}
}
bool IsNan(Tensor in) {
// cudaSetDevice(in->getDevice().no);
// thrust::device_ptr<float> begin = thrust::device_pointer_cast(in->data());
// thrust::device_ptr<float> end
// = thrust::device_pointer_cast(in->data() + in->size());
// return thrust::transform_reduce(
// begin, end, isnan_test(), 0, thrust::plus<bool>());
return false;
}
void ConcatCont(Tensor out, const std::vector<Tensor>& inputs, int axis) {
cudaSetDevice(out->getDevice().no);
int step = 1;
for(int i = 0; i < axis; ++i)
step *= out->shape()[i];
size_t offset1 = 0;
for(int i = 0; i < step; ++i) {
for(auto in : inputs) {
size_t size = in->shape().elements() / step;
size_t offset2 = i * size;
cudaMemcpy(out->data() + offset1,
in->data() + offset2,
size * sizeof(float),
cudaMemcpyDeviceToDevice);
offset1 += size;
}
}
cudaStreamSynchronize(0);
}
template <bool add>
__global__ void gInsertCols(float* out,
const float* in,
size_t rows,
size_t cols,
size_t cols_out,
size_t cols_in,
size_t offset_out,
size_t offset_in) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols_out + offset_out;
const float* rowIn = in + j * cols_in + offset_in;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
if(add)
rowOut[i] += rowIn[i];
else
rowOut[i] = rowIn[i];
}
}
}
}
void Concatenate1(Tensor out, const std::vector<Tensor>& inputs) {
cudaSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
size_t offset = 0;
int cols_out = out->shape().back();
for(auto in : inputs) {
ABORT_IF(rows != in->shape().elements() / in->shape().back(),
"First dimension must be equal");
int cols_in = in->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols_in);
gInsertCols<false><<<blocks, threads>>>(
out->data(), in->data(), rows, cols_in, cols_out, cols_in, offset, 0);
offset += cols_in;
}
cudaStreamSynchronize(0);
}
__global__ void gJoin2(float* out, size_t rowBatch, size_t cols,
const float* in1, size_t inStride1,
const float* in2, size_t inStride2) {
int outStride = inStride1 + inStride2;
int rows = rowBatch * outStride;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols;
int curBatch = j / outStride;
int curPos = j % outStride;
int jIn1 = (curBatch * inStride1) + curPos;
int jIn2 = (curBatch * inStride2) + curPos - inStride1;
const float* rowIn1 = in1 + jIn1 * cols;
const float* rowIn2 = in2 + jIn2 * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
if(curPos < inStride1)
rowOut[i] = rowIn1[i];
else
rowOut[i] = rowIn2[i];
}
}
}
}
}
void Concatenate2(Tensor out, Tensor in1, Tensor in2) {
cudaSetDevice(out->getDevice().no);
size_t rows = out->shape().elements() / out->shape().back();
size_t cols = out->shape().back();
size_t rowStride1 = in1->shape()[-2];
size_t rowStride2 = in2->shape()[-2];
size_t rowBatch = rows / out->shape()[-2];
int blocks = std::min(MAX_BLOCKS, (int)rows);
int threads = std::min(MAX_THREADS, (int)cols);
gJoin2<<<blocks, threads>>>(out->data(),
rowBatch,
cols,
in1->data(),
rowStride1,
in2->data(),
rowStride2);
cudaStreamSynchronize(0);
}
void Concatenate(Tensor out, const std::vector<Tensor>& inputs, int ax) {
if(ax == out->shape().size() - 1)
Concatenate1(out, inputs);
else if(ax == out->shape().size() - 2 && inputs.size() == 2)
Concatenate2(out, inputs[0], inputs[1]);
else
ConcatCont(out, inputs, ax);
}
void Split1(std::vector<Tensor>& outputs, const Tensor in) {
cudaSetDevice(in->getDevice().no);
size_t offset = 0;
int rows = in->shape().elements() / in->shape().back();
int cols_in = in->shape().back();
for(auto out : outputs) {
ABORT_IF(rows != out->shape().elements() / out->shape().back(),
"First dimension must be equal");
int cols_out = out->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols_out);
gInsertCols<true><<<blocks, threads>>>(
out->data(), in->data(), rows, cols_out, cols_out, cols_in, 0, offset);
offset += cols_out;
}
cudaStreamSynchronize(0);
}
// @TODO: this function is just a temporary fix until I come up with
// something better for the situation below.
__global__ void gAddRow(float* out, const float* in, int length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
out[index] = in[index] + out[index];
}
}
}
void SplitCont(std::vector<Tensor>& outputs, const Tensor in, int axis) {
cudaSetDevice(in->getDevice().no);
int step = 1;
for(int i = 0; i < axis; ++i)
step *= in->shape()[i];
int offset1 = 0;
for(int i = 0; i < step; ++i) {
for(auto out : outputs) {
int size = out->shape().elements() / step;
int offset2 = i * size;
// BUG: this is does not add gradients
//cudaMemcpyAsync(out->data() + offset2,
// in->data() + offset1,
// size * sizeof(float),
// cudaMemcpyDeviceToDevice);
// @TODO: this is a quick but bad fix for the above bug
int threads = std::min(MAX_THREADS, size);
int blocks = std::min(MAX_BLOCKS, size / threads + (size % threads != 0));
gAddRow<<<blocks, threads>>>(out->data() + offset2,
in->data() + offset1,
size);
offset1 += size;
}
}
cudaStreamSynchronize(0);
}
void Deconcatenate(std::vector<Tensor>& outputs, const Tensor in, int ax) {
if(ax == in->shape().size() - 1)
Split1(outputs, in);
else
SplitCont(outputs, in, ax);
}
template <bool add>
__global__ void gTransposeND(
functional::Tensor<float> out,
const functional::Tensor<float> in,
const functional::Array<int, functional::Shape::size()> permute) {
constexpr size_t N = functional::Shape::size();
functional::Array<int, N> oDims;
functional::Array<int, N> pDims;
int length = out.shape().elements();
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
out.shape().dims(index, oDims);
for(int i = 0; i < N; ++i)
pDims[permute[i]] = oDims[i];
if(add)
out[index] += in[pDims];
else
out[index] = in[pDims];
}
}
}
template <bool add>
__global__ void gTranspose0213(float* out, const float* in,
int rows,
int cols,
int stride1,
int stride2) {
int stride = stride1 * stride2;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols;
int z = j / stride;
int y = (j % stride) / stride1;
int x = (j % stride) % stride1;
int j2 = z * stride + x * stride2 + y;
const float* rowIn = in + j2 * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
if(add)
rowOut[i] += rowIn[i];
else
rowOut[i] = rowIn[i];
}
}
}
}
}
void TransposeND(Tensor out, Tensor in, const std::vector<int>& vAxis) {
cudaSetDevice(out->getDevice().no);
if(vAxis == std::vector<int>({0, 2, 1, 3})) {
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
int stride1 = out->shape()[-2];
int stride2 = out->shape()[-3];
gTranspose0213<false><<<blocks, threads>>>(out->data(), in->data(),
rows, cols, stride1, stride2);
}
else {
functional::Array<int, functional::Shape::size()> axes;
int diff = functional::Shape::size() - vAxis.size();
for(int i = 0; i < axes.size(); ++i)
if(i < diff)
axes[i] = i;
else
axes[i] = vAxis[i - diff] + diff;
int length = out->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
gTransposeND<false><<<blocks, threads>>>(out, in, axes);
}
}
void TransposeNDGrad(Tensor out, Tensor in, const std::vector<int>& vAxis) {
cudaSetDevice(out->getDevice().no);
if(vAxis == std::vector<int>({0, 2, 1, 3})) {
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
int stride1 = out->shape()[-2];
int stride2 = out->shape()[-3];
gTranspose0213<true><<<blocks, threads>>>(out->data(), in->data(),
rows, cols, stride1, stride2);
}
else {
functional::Array<int, functional::Shape::size()> axes;
int diff = functional::Shape::size() - vAxis.size();
for(int i = 0; i < axes.size(); ++i)
if(i < diff)
axes[i] = i;
else
axes[i] = vAxis[i - diff] + diff;
int length = out->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
gTransposeND<true><<<blocks, threads>>>(out, in, axes);
}
}
__global__ void gSoftmax(float* out,
functional::Shape outShape,
const float* in,
const float* mask,
const functional::Shape maskShape) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
bool broadcast = outShape != maskShape;
functional::Array<int, functional::Shape::size()> dims;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = -CUDA_FLT_MAX; // mask
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float mVal = 1.f;
if(mask) {
int mIndex = id + j * cols;
if(broadcast) {
outShape.dims(mIndex, dims);
mIndex = maskShape.bindex(dims);
}
mVal = mask[mIndex];
}
if(mVal && sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float mVal = 1.f;
if(mask) {
int mIndex = id + j * cols;
if(broadcast) {
outShape.dims(mIndex, dims);
mIndex = maskShape.bindex(dims);
}
mVal = mask[mIndex];
}
float ex = 0;
if(mVal)
ex = __expf(sp[id] - max);
so[id] = ex;
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
so[id] = so[id] / _sum[0];
}
}
}
}
}
void Softmax(Tensor out, Tensor in, Tensor mask) {
cudaSetDevice(out->getDevice().no);
size_t m = out->shape().elements() / out->shape().back();
size_t k = out->shape().back();
int blocks = std::min(MAX_BLOCKS, (int)m);
int threads = std::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
if(mask)
gSoftmax<<<blocks, threads, shared>>>(
out->data(), out->shape(), in->data(), mask->data(), mask->shape());
else
gSoftmax<<<blocks, threads, shared>>>(
out->data(), out->shape(), in->data(), 0, out->shape());
}
__global__ void gLogSoftmax(float* out,
const functional::Shape outShape,
const float* in) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float sm = sp[id] - max;
float ex = __expf(sm);
so[id] = sm;
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols)
so[id] -= __logf(_sum[0]);
}
}
}
}
void LogSoftmax(Tensor out, Tensor in) {
cudaSetDevice(out->getDevice().no);
size_t m = out->shape().elements() / out->shape().back();
size_t k = out->shape().back();
int blocks = std::min(MAX_BLOCKS, (int)m);
int threads = std::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
gLogSoftmax<<<blocks, threads, shared>>>(
out->data(), out->shape(), in->data());
}
///////////////////////////////////////////////////////
__global__ void gSoftmaxGrad(float* grad,
const float* adj,
const float* val,
const int rows,
const int cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
float* gradRow = grad + j * cols;
const float* adjRow = adj + j * cols;
const float* valRow = val + j * cols;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += valRow[id] * adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float val = valRow[id] * (adjRow[id] - _sum[0]);
if(val)
gradRow[id] += val;
}
}
}
}
}
void SoftmaxGrad(Tensor grad, Tensor adj, Tensor val) {
cudaSetDevice(adj->getDevice().no);
// grad and val are both m-by-k matrices, passed as input.
// A weighted average of each row of grad (according to the weights
// specified in val) is computed and subtracted from Out.
// adj is multiplied for each element to get backward step in autodiff
int m = grad->shape().elements() / grad->shape().back();
int k = grad->shape().back();
int blocks = std::min(MAX_BLOCKS, m);
int threads = std::min(MAX_THREADS, k);
int shared = sizeof(float) * threads * 2;
gSoftmaxGrad<<<blocks, threads, shared>>>(
grad->data(), adj->data(), val->data(), m, k);
}
__global__ void gLogSoftmaxGrad(float* grad,
const float* adj,
const float* val,
const int rows,
const int cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
float* gradRow = grad + j * cols;
const float* adjRow = adj + j * cols;
const float* valRow = val + j * cols;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols)
gradRow[id] += adjRow[id] - (expf(valRow[id]) * _sum[0]);
}
}
}
}
void LogSoftmaxGrad(Tensor grad, Tensor adj, Tensor val) {
cudaSetDevice(adj->getDevice().no);
// grad and val are both m-by-k matrices, passed as input.
// A weighted average of each row of grad (according to the weights
// specified in val) is computed and subtracted from Out.
// adj is multiplied for each element to get backward step in autodiff
int m = grad->shape().elements() / grad->shape().back();
int k = grad->shape().back();
int blocks = std::min(MAX_BLOCKS, m);
int threads = std::min(MAX_THREADS, k);
int shared = sizeof(float) * threads * 2;
gLogSoftmaxGrad<<<blocks, threads, shared>>>(
grad->data(), adj->data(), val->data(), m, k);
}
///////////////////////////////////////////////////////
__global__ void gArgmax(float* out,
const float* data,
size_t rows,
size_t cols) {
size_t row = blockIdx.x;
size_t startInd = row * cols;
float maxScore = -99999;
size_t maxInd;
for(size_t col = 0; col < cols; ++col) {
size_t ind = startInd + col;
float score = data[ind];
if(score > maxScore) {
maxScore = score;
maxInd = col;
}
}
out[row] = maxInd;
}
///////////////////////////////////////////////////////
__global__ void gCopyRows(float* out,
const float* in,
size_t cols,
const size_t* sourceRowIdx,
size_t rows) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
size_t dstId = j;
size_t srcId = sourceRowIdx[j];
float* rowOut = out + dstId * cols;
const float* rowIn = in + srcId * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
rowOut[i] = rowIn[i];
}
}
}
}
void CopyRows(Tensor out, const Tensor in, const std::vector<size_t>& indices) {
cudaSetDevice(out->getDevice().no);
size_t cols = in->shape().back();
size_t rowsToCopy = indices.size();
int threads = std::min(MAX_THREADS, (int)cols);
int blocks = std::min(MAX_BLOCKS, (int)rowsToCopy);
size_t* d_indices;
CUDA_CHECK(cudaMalloc(&d_indices, rowsToCopy * sizeof(size_t)));
CUDA_CHECK(cudaMemcpy(d_indices,
indices.data(),
rowsToCopy * sizeof(size_t),
cudaMemcpyHostToDevice));
gCopyRows<<<blocks, threads>>>(
out->data(), in->data(), cols, d_indices, rowsToCopy);
CUDA_CHECK(cudaFree(d_indices));
}
__global__ void gPasteRows(float* out,
const float* in,
size_t cols,
const size_t* targetRowIdx,
size_t rows) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
size_t dstId = targetRowIdx[j];
size_t srcId = j;
float* rowOut = out + dstId * cols;
const float* rowIn = in + srcId * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
atomicAdd(rowOut + i, rowIn[i]);
}
}
}
}
void PasteRows(Tensor out,
const Tensor in,
const std::vector<size_t>& indices) {
cudaSetDevice(out->getDevice().no);
size_t cols = in->shape().back();
size_t rowsToCopy = indices.size();
int threads = std::min(MAX_THREADS, (int)cols);
int blocks = std::min(MAX_BLOCKS, (int)rowsToCopy);
// @TODO: turn into tensor
size_t* d_indices;
CUDA_CHECK(cudaMalloc(&d_indices, rowsToCopy * sizeof(size_t)));
CUDA_CHECK(cudaMemcpy(d_indices,
indices.data(),
rowsToCopy * sizeof(size_t),
cudaMemcpyHostToDevice));
gPasteRows<<<blocks, threads>>>(
out->data(), in->data(), cols, d_indices, rowsToCopy);
CUDA_CHECK(cudaFree(d_indices));
}
/////////////
__global__ void gCopyCols(float* out,
const float* in,
size_t rows,
size_t colsIn,
const size_t* sourceColIdx,
size_t colsOut) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* rowIn = in + j * colsIn;
float* rowOut = out + j * colsOut;
for(int tid = 0; tid < colsOut; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < colsOut)
rowOut[i] = rowIn[sourceColIdx[i]];
}
}
}
}
void CopyCols(Tensor out, const Tensor in, const std::vector<size_t>& indices) {
cudaSetDevice(out->getDevice().no);
size_t rows = in->shape().elements() / in->shape().back();
size_t cols = in->shape().back();
size_t colsToCopy = indices.size();
int threads = std::min(MAX_THREADS, (int)colsToCopy);
int blocks = std::min(MAX_BLOCKS, (int)rows);
size_t* d_indices;
CUDA_CHECK(cudaMalloc(&d_indices, colsToCopy * sizeof(size_t)));
CUDA_CHECK(cudaMemcpy(d_indices,
indices.data(),
colsToCopy * sizeof(size_t),
cudaMemcpyHostToDevice));
gCopyCols<<<blocks, threads>>>(
out->data(), in->data(), rows, cols, d_indices, colsToCopy);
CUDA_CHECK(cudaFree(d_indices));
}
__global__ void gPasteCols(float* out,
const float* in,
size_t rows,
size_t colsOut,
const size_t* targetColIdx,
size_t colsIn) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* rowIn = in + j * colsIn;
float* rowOut = out + j * colsOut;
for(int tid = 0; tid < colsIn; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < colsIn)
rowOut[targetColIdx[i]] += rowIn[i];
}
}
}
}
void PasteCols(Tensor out,
const Tensor in,
const std::vector<size_t>& indices) {
cudaSetDevice(out->getDevice().no);
size_t rows = in->shape().elements() / in->shape().back();
size_t cols = in->shape().back();
size_t colsToCopy = indices.size();
int threads = std::min(MAX_THREADS, (int)colsToCopy);
int blocks = std::min(MAX_BLOCKS, (int)rows);
size_t* d_indices;
CUDA_CHECK(cudaMalloc(&d_indices, colsToCopy * sizeof(size_t)));
CUDA_CHECK(cudaMemcpy(d_indices,
indices.data(),
colsToCopy * sizeof(size_t),
cudaMemcpyHostToDevice));
gPasteCols<<<blocks, threads>>>(
out->data(), in->data(), rows, cols, d_indices, colsToCopy);
CUDA_CHECK(cudaFree(d_indices));
}
__global__ void gSelect(float* out,
functional::Shape outShape,
const float* in,
const functional::Shape inShape,
int axis,
size_t* d_indices) {
int length = outShape.elements();
functional::Array<int, functional::Shape::size()> dims;
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
outShape.dims(index, dims);
dims[axis] = d_indices[dims[axis]];
int inIndex = inShape.index(dims);
out[index] = in[inIndex];
}
}
}
__global__ void gInsert(float* out,
functional::Shape outShape,
const float* in,
const functional::Shape inShape,
int axis,
size_t* d_indices) {
int length = inShape.elements();
functional::Array<int, functional::Shape::size()> dims;
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
inShape.dims(index, dims);
dims[axis] = d_indices[dims[index]];
int outIndex = outShape.index(dims);
out[outIndex] += in[index];
}
}
}
void Select(Tensor out,
const Tensor in,
int axis,
const std::vector<size_t>& indices,
Ptr<Allocator> allocator) {
cudaSetDevice(out->getDevice().no);
int length = out->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
auto mp_indices = allocator->alloc<size_t>(indices.size());
CudaCopy(indices.data(),
indices.data() + indices.size(),
mp_indices->data<size_t>());
int axisGPU = axis + functional::Shape::size() - out->shape().size();
gSelect<<<blocks, threads>>>(out->data(),
out->shape(),
in->data(),
in->shape(),
axisGPU,
mp_indices->data<size_t>());
allocator->free(mp_indices);
}
void Insert(Tensor out,
const Tensor in,
int axis,
const std::vector<size_t>& indices,
Ptr<Allocator> allocator) {
cudaSetDevice(in->getDevice().no);
int length = in->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
auto mp_indices = allocator->alloc<size_t>(indices.size());
CudaCopy(indices.data(),
indices.data() + indices.size(),
mp_indices->data<size_t>());
int axisGPU = axis + functional::Shape::size() - out->shape().size();
gInsert<<<blocks, threads>>>(out->data(),
out->shape(),
in->data(),
in->shape(),
axisGPU,
mp_indices->data<size_t>());
allocator->free(mp_indices);
}
__global__ void gGRUFastForward(float* out,
const float* state,
const float* xW,
const float* sU,
const float* b,
const float* mask,
size_t rows,
size_t cols,
bool final) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOut = out + j * cols;
const float* rowState = state + j * cols;
const float* xWrow = xW + j * cols * 3;
const float* sUrow = sU + j * cols * 3;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float r = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float z = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float h;
if(final)
h = tanhf(xWrow[l] + (sUrow[l] + b[l]) * r);
else
h = tanhf(xWrow[l] + sUrow[l] * r + b[l]);
float out = (1.0f - z) * h + z * rowState[i];
rowOut[i] = m * out + (1 - m) * rowState[i];
}
}
}
}
}
void GRUFastForward(Tensor out, std::vector<Tensor> inputs, bool final) {
cudaSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gGRUFastForward<<<blocks, threads>>>(
out->data(), // output
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
rows,
cols,
final);
}
__global__ void gGRUFastBackward(float* outState,
float* outXW,
float* outSU,
float* outB,
const float* state,
const float* xW,
const float* sU,
const float* b,
const float* mask,
const float* adj,
size_t rows,
size_t cols,
bool final) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOutState = outState + j * cols;
float* rowOutXW = outXW + j * cols * 3;
float* rowOutSU = outSU + j * cols * 3;
const float* rowState = state + j * cols;
const float* rowXW = xW + j * cols * 3;
const float* rowSU = sU + j * cols * 3;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + cols;
int l = i + 2 * cols;
float r = stableLogit(rowXW[i] + rowSU[i] + b[i]);
float z = stableLogit(rowXW[k] + rowSU[k] + b[k]);
float h;
if(final)
h = tanhf(rowXW[l] + (rowSU[l] + b[l]) * r);
else
h = tanhf(rowXW[l] + rowSU[l] * r + b[l]);
float adj = rowAdj[i];
float t = (1 - z) * (1 - h * h);
// df/ds
if(outState)
rowOutState[i] += (m * z - m + 1) * adj;
// df/d(xW_r) ...
float dfdxW_r = m * r * (1 - r) * t * adj;
if(final)
dfdxW_r *= rowSU[l] + b[l];
else
dfdxW_r *= rowSU[l];
if(outXW)
rowOutXW[i] += dfdxW_r;
if(outSU)
rowOutSU[i] += dfdxW_r;
if(outB)
atomicAdd(outB + i, dfdxW_r);
// df/d(xW_z) ...
float dfdxW_z = m * (1 - z) * z * (rowState[i] - h) * adj;
if(outXW)
rowOutXW[k] += dfdxW_z;
if(outSU)
rowOutSU[k] += dfdxW_z;
if(outB)
atomicAdd(outB + k, dfdxW_z);
// df/d(xW_x) ...
float dfdxW_x = m * t * adj;
if(outXW)
rowOutXW[l] += dfdxW_x;
if(outSU)
rowOutSU[l] += dfdxW_x * r;
if(outB)
if(final)
atomicAdd(outB + l, dfdxW_x * r);
else
atomicAdd(outB + l, dfdxW_x);
}
}
}
}
}
void GRUFastBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj,
bool final) {
cudaSetDevice(adj->getDevice().no);
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gGRUFastBackward<<<blocks, threads>>>(
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
adj->data(),
rows,
cols,
final);
}
__global__ void gCrossEntropyPick(float* out,
const functional::Shape outShape,
const float* in,
const functional::Shape inShape,
const float* pick) {
int rows = inShape.elements() / inShape.back();
int cols = inShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 1; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += __expf(sp[id] - max);
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
// cross-entropy
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id == (int)pick[j]) {
out[j] = __logf(_sum[0]) - sp[id] + max;
}
}
}
}
}
void CrossEntropyPick(Tensor out, Tensor in, Tensor pick) {
cudaSetDevice(out->getDevice().no);
int rows = in->shape().elements() / in->shape().back();
int cols = in->shape().back();
int blocks = std::min(MAX_BLOCKS, (int)rows);
int threads = std::min(MAX_THREADS, (int)cols);
int shared = sizeof(float) * threads * 2;
gCrossEntropyPick<<<blocks, threads, shared>>>(
out->data(), out->shape(), in->data(), in->shape(), pick->data());
}
__global__ void gCrossEntropyPickBackward(float* out,
const functional::Shape outShape,
const float* adj,
const float* in,
const float* pick) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* sp = in + j * cols;
float* so = out + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 1; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = __expf(sp[id] - max);
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
// cross-entropy
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float sub = (float)(id == (int)pick[j]);
so[id] += adj[j] * (__expf(sp[id] - max) / _sum[0] - sub);
}
}
}
}
}
void CrossEntropyPickBackward(Tensor out, Tensor adj, Tensor a, Tensor pick) {
cudaSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = std::min(MAX_BLOCKS, (int)rows);
int threads = std::min(MAX_THREADS, (int)cols);
int shared = sizeof(float) * threads * 2;
gCrossEntropyPickBackward<<<blocks, threads, shared>>>(
out->data(), out->shape(), adj->data(), a->data(), pick->data());
}
float L2Norm(Tensor in) {
cudaSetDevice(in->getDevice().no);
int size = in->shape().elements();
int threads = std::min(MAX_THREADS, size);
int blocks = std::min(MAX_BLOCKS, size / threads + (size % threads != 0));
uint8_t* data;
cudaMalloc(&data, blocks * sizeof(float));
Tensor out(new TensorBase(New<MemoryPiece>(data, blocks * sizeof(float)),
{1, blocks},
in->getBackend()));
using namespace functional;
ReduceAll(_1 * _1, out, in);
float dataCpu = sqrtf(out->get(0));
out.reset();
cudaFree(data);
return dataCpu;
}
__global__ void gAtt(float* out,
const float* va,
const float* ctx,
const float* state,
int m, // total rows (batch x time x beam)
int k, // depth
int b, // batch size
int t // time of ctx
) {
int rows = m;
int cols = k;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* vaRow = va;
const float* ctxRow = ctx + (j % (b * t)) * cols;
const float* stateRow = state + ((j / (b * t)) * b + j % b) * cols;
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float z = ctxRow[id] + stateRow[id];
float ex = tanhf(z) * vaRow[id];
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
out[j] = _sum[0];
__syncthreads();
}
}
}
void Att(Tensor out, Tensor va, Tensor context, Tensor state) {
cudaSetDevice(out->getDevice().no);
size_t m = out->shape().elements() / out->shape().back();
size_t k = context->shape()[-1];
size_t b = context->shape()[-2];
size_t t = context->shape()[-3];
int blocks = std::min(MAX_BLOCKS, (int)m);
int threads = std::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
gAtt<<<blocks, threads, shared>>>(
out->data(), va->data(), context->data(), state->data(), m, k, b, t);
}
__global__ void gAttBack(float* gVa,
float* gContext,
float* gState,
const float* va,
const float* context,
const float* state,
const float* adj,
int m, // rows
int k, // cols
int n // batch size
) {
int rows = m;
int cols = k;
for(int bid = 0; bid < m; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* gcRow = gContext + j * cols;
float* gsRow = gState + (j % n) * cols;
const float* cRow = context + j * cols;
const float* sRow = state + (j % n) * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float z = cRow[id] + sRow[id];
float t = tanhf(z);
float r = va[id] * (1.f - t * t);
gcRow[id] += r * adj[j];
gsRow[id] += r * adj[j];
atomicAdd(gVa + id, t * adj[j]);
}
}
}
}
}
void AttBack(Tensor gVa,
Tensor gContext,
Tensor gState,
Tensor va,
Tensor context,
Tensor state,
Tensor adj) {
cudaSetDevice(adj->getDevice().no);
size_t m = adj->shape().elements() / adj->shape()[-1];
size_t k = context->shape()[-1];
size_t n = context->shape()[-2];
int blocks = std::min(MAX_BLOCKS, (int)n);
int threads = std::min(MAX_THREADS, (int)k);
gAttBack<<<blocks, threads>>>(gVa->data(),
gContext->data(),
gState->data(),
va->data(),
context->data(),
state->data(),
adj->data(),
m,
k,
n);
}
__global__ void gLNormalization(float* out,
const float* in,
const float* alpha,
const float* beta,
int rows,
int cols,
float eps = 1e-9) {
extern __shared__ float _share[];
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0f;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float mean = _sum[0] / cols;
__syncthreads();
float* _sqSum = _share + blockDim.x;
_sqSum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = sp[id] - mean;
_sqSum[threadIdx.x] += ex * ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sqSum[threadIdx.x] += _sqSum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
float sigma = sqrtf(eps + (_sqSum[0] / cols));
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float t = alpha[id] * ((sp[id] - mean) / sigma);
if(beta != nullptr)
t += beta[id];
so[id] = t;
}
}
}
}
}
void LayerNormalization(Tensor out,
Tensor in,
Tensor gamma,
Tensor beta,
float eps) {
cudaSetDevice(out->getDevice().no);
int rows = in->shape().elements() / in->shape().back();
int cols = in->shape().back();
int blocks = std::min(MAX_BLOCKS, (int)rows);
int threads = std::min(MAX_THREADS, (int)cols);
int shared = 2 * threads * sizeof(float);
gLNormalization<<<blocks, threads, shared>>>(out->data(),
in->data(),
gamma->data(),
beta ? beta->data() : nullptr,
rows,
cols,
eps);
}
__global__ void gLayerNormalizationGrad(float* gradX,
float* gradGamma,
float* gradBeta,
float* adj,
float* y,
float* x,
float* gamma,
float* beta,
int rows,
int cols,
float eps = 1e-9) {
extern __shared__ float shared[];
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* sum_adj = shared;
float* sum_adj_x = shared + blockDim.x;
float* sum_x = shared + 2 * blockDim.x;
float* sum_sqr = shared + 3 * blockDim.x;
const float* xRow = x + j * cols;
const float* yRow = y + j * cols;
const float* adjRow = adj + j * cols;
float* gradXRow = gradX + j * cols;
sum_x[threadIdx.x] = 0.0f;
sum_adj[threadIdx.x] = 0.0f;
sum_adj_x[threadIdx.x] = 0.0f;
sum_sqr[threadIdx.x] = 0.0f;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
sum_x[threadIdx.x] += xRow[id];
sum_adj_x[threadIdx.x]
+= adjRow[id] * (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id];
sum_adj[threadIdx.x] += adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
sum_x[threadIdx.x] += sum_x[threadIdx.x + skip];
sum_adj[threadIdx.x] += sum_adj[threadIdx.x + skip];
sum_adj_x[threadIdx.x] += sum_adj_x[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float mean = sum_x[0] / cols;
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = xRow[id] - mean;
sum_sqr[threadIdx.x] += ex * ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
sum_sqr[threadIdx.x] += sum_sqr[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
float sigma = sqrtf(eps + (sum_sqr[0] / cols));
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float grad_x = 0.0f;
float x_hat = (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id];
grad_x += cols * adjRow[id];
grad_x -= sum_adj[0];
grad_x -= sum_adj_x[0] * x_hat;
grad_x /= (cols * sigma);
float valX = gamma[id] * grad_x;
float sign = (0.f < valX) - (valX < 0.f);
valX = fabs(valX) > 1000 ? sign * 1000 : valX;
gradXRow[id] += valX;
atomicAdd(gradGamma + id, adjRow[id] * x_hat);
if(beta) {
atomicAdd(gradBeta + id, adjRow[id]);
}
}
}
}
}
}
void LayerNormalizationGrad(Tensor gradX,
Tensor gradGamma,
Tensor gradBeta,
Tensor adj,
Tensor y,
Tensor x,
Tensor gamma,
Tensor beta,
float eps) {
cudaSetDevice(adj->getDevice().no);
int rows = y->shape().elements() / y->shape()[-1];
int cols = y->shape()[-1];
int threads = std::min(MAX_THREADS, cols);
int blocks = std::min(MAX_BLOCKS, rows);
int shared = sizeof(float) * threads * 4;
gLayerNormalizationGrad<<<blocks, threads, shared>>>(
gradX->data(),
gradGamma->data(),
(gradBeta) ? gradBeta->data() : nullptr,
adj->data(),
y->data(),
x->data(),
gamma->data(),
(beta) ? beta->data() : nullptr,
rows,
cols,
eps);
}
template <bool add>
__global__ void gShift(float* out, const float* in, int length, int offset) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
if(add) {
if(index - offset >= 0 && index - offset < length)
out[index] += in[index - offset];
}
else {
if(index - offset < 0 || index - offset >= length)
out[index] = 0;
else
out[index] = in[index - offset];
}
}
}
}
void Shift(Tensor out, Tensor in, marian::Shape shift, bool invert) {
ABORT_IF(in->shape().size() != shift.size(), "bad dimensions");
// BUGBUG: This can only shift along the first axis. Shifting, e.g., along the last axis cannot be implemented this way.
int offset = 0;
for(int i = 0; i < shift.size(); ++i)
offset += in->shape().stride(i) * shift[i];
if(invert)
offset = -offset;
cudaSetDevice(out->getDevice().no);
int length = out->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
gShift<false><<<blocks, threads>>>(out->data(), in->data(), length, offset);
}
void ShiftGrad(Tensor out, Tensor in, marian::Shape shift, bool invert) {
ABORT_IF(in->shape().size() != shift.size(), "bad dimensions");
// BUGBUG: This can only shift along the first axis. Shifting, e.g., along the last axis cannot be implemented this way.
int offset = 0;
for(int i = 0; i < shift.size(); ++i)
offset += in->shape().stride(i) * shift[i];
if(invert)
offset = -offset;
cudaSetDevice(out->getDevice().no);
int length = out->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
gShift<true><<<blocks, threads>>>(out->data(), in->data(), length, offset);
}
__global__ void gSetSparse(float* out,
const size_t* indices,
const float* values,
int length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
out[indices[index]] = values[index];
}
}
}
void SetSparse(float* out,
const std::vector<size_t>& indices,
const std::vector<float>& values) {
int length = indices.size();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
size_t* d_indices;
CUDA_CHECK(cudaMalloc(&d_indices, length * sizeof(size_t)));
CUDA_CHECK(cudaMemcpy(d_indices,
indices.data(),
length * sizeof(size_t),
cudaMemcpyHostToDevice));
float* d_values;
CUDA_CHECK(cudaMalloc(&d_values, length * sizeof(float)));
CUDA_CHECK(cudaMemcpy(
d_values, values.data(), length * sizeof(float), cudaMemcpyHostToDevice));
gSetSparse<<<blocks, threads>>>(out, d_indices, d_values, length);
cudaFree(d_indices);
cudaFree(d_values);
}
/******************************************************************************/
__global__ void gLSTMCellForward(float* out,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* mask,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOut = out + j * cols;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float gc = tanhf(xWrow[l] + sUrow[l] + b[l]);
float cout = gf * rowCell[i] + gi * gc;
rowOut[i] = m * cout + (1 - m) * rowCell[i];
}
}
}
}
}
void LSTMCellForward(Tensor out, std::vector<Tensor> inputs) {
cudaSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gLSTMCellForward<<<blocks, threads>>>(
out->data(), // output
inputs[0]->data(), // cell state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
rows,
cols);
}
__global__ void gLSTMOutputForward(float* out,
const float* cell,
const float* xW,
const float* sU,
const float* b,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + 3 * cols;
float go = stableLogit(xWrow[k] + sUrow[k] + b[k]);
rowOut[i] = go * tanhf(rowCell[i]);
}
}
}
}
}
void LSTMOutputForward(Tensor out, std::vector<Tensor> inputs) {
cudaSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gLSTMOutputForward<<<blocks, threads>>>(out->data(), // output
inputs[0]->data(), // cell state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
rows,
cols);
}
__global__ void gLSTMCellBackward(float* outCell,
float* outXW,
float* outSU,
float* outB,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* mask,
const float* adj,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOutCell = outCell + j * cols;
float* rowOutXW = outXW + j * cols * 4;
float* rowOutSU = outSU + j * cols * 4;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float gc = tanhf(xWrow[l] + sUrow[l] + b[l]);
float adj = rowAdj[i];
// dc/dc_{t-1}
if(outCell)
rowOutCell[i] += (m * gf - m + 1) * adj;
// dc/d(b_f) = dc/d(xW_f) ...
float dcdxf = m * rowCell[i] * gf * (1 - gf) * adj;
if(outXW)
rowOutXW[i] += dcdxf;
if(outSU)
rowOutSU[i] += dcdxf;
if(outB)
atomicAdd(outB + i, dcdxf);
// dc/d(b_i) ...
float dcdb_i = m * gc * gi * (1 - gi) * adj;
if(outXW)
rowOutXW[k] += dcdb_i;
if(outSU)
rowOutSU[k] += dcdb_i;
if(outB)
atomicAdd(outB + k, dcdb_i);
// dc/d(b_c) ...
float dcdxc = m * gi * (1 - gc * gc) * adj;
if(outXW)
rowOutXW[l] += dcdxc;
if(outSU)
rowOutSU[l] += dcdxc;
if(outB)
atomicAdd(outB + l, dcdxc);
}
}
}
}
}
void LSTMCellBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj) {
cudaSetDevice(adj->getDevice().no);
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gLSTMCellBackward<<<blocks, threads>>>(
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
adj->data(),
rows,
cols);
}
__global__ void gLSTMOutputBackward(float* outCell,
float* outXW,
float* outSU,
float* outB,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* adj,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOutCell = outCell + j * cols;
float* rowOutXW = outXW + j * cols * 4;
float* rowOutSU = outSU + j * cols * 4;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + 3 * cols;
float go = stableLogit(xWrow[k] + sUrow[k] + b[k]);
float t = tanhf(rowCell[i]);
float adj = rowAdj[i];
// dc/dc_{t-1}
if(outCell)
rowOutCell[i] += go * (1 - t * t) * adj;
// dc/d(b_o) = dc/d(xW_f) ...
float dcdxo = t * go * (1 - go) * adj;
if(outXW)
rowOutXW[k] += dcdxo;
if(outSU)
rowOutSU[k] += dcdxo;
if(outB)
atomicAdd(outB + k, dcdxo);
}
}
}
}
}
void LSTMOutputBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj) {
cudaSetDevice(adj->getDevice().no);
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gLSTMOutputBackward<<<blocks, threads>>>(
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
adj->data(),
rows,
cols);
}
__global__ void gHighwayForward(float* out,
const float* in1,
const float* in2,
const float* t,
size_t length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
float sigma = stableLogit(t[index]);
out[index] = in1[index] * sigma + in2[index] * (1.f - sigma);
}
}
}
void HighwayForward(Tensor out,
const Tensor in1,
const Tensor in2,
const Tensor t) {
cudaSetDevice(out->getDevice().no);
int length = out->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
gHighwayForward<<<blocks, threads>>>(
out->data(), in1->data(), in2->data(), t->data(), length);
}
__global__ void gHighwayBackward(float* out1,
float* out2,
float* outt,
const float* in1,
const float* in2,
const float* t,
const float* adj,
size_t length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
float sigma = stableLogit(t[index]);
out1[index] = sigma * adj[index];
out2[index] = (1.f - sigma) * adj[index];
outt[index]
= sigma * (1.f - sigma) * (in1[index] - in2[index]) * adj[index];
}
}
}
void HighwayBackward(Tensor out1,
Tensor out2,
Tensor outt,
const Tensor in1,
const Tensor in2,
const Tensor t,
const Tensor adj) {
cudaSetDevice(out1->getDevice().no);
int length = out1->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
gHighwayBackward<<<blocks, threads>>>(out1->data(),
out2->data(),
outt->data(),
in1->data(),
in2->data(),
t->data(),
adj->data(),
length);
}
__global__ void gMaxPoolingForward(float* out,
int outRows,
int outCols,
float* in,
int inRows,
int inCols,
float* mask,
int numKernels,
int maskCols,
int width,
int lastWidth) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= outRows * outCols)
return;
int rowId = tid / outRows;
int colId = tid % outRows;
float* b = in + (rowId * inCols) + (colId * width);
float* localMask = mask + (rowId / numKernels) * maskCols + colId * width;
if(colId == outRows - 1) {
width = lastWidth;
}
float currentMax = b[0] * localMask[0];
for(int i = 1; i < width; ++i) {
if(b[i] * localMask[i] > currentMax) {
currentMax = b[i] * localMask[i];
}
}
out[rowId + (colId * outCols)] = currentMax;
}
void PoolingWithMaskingForward(Tensor out,
Tensor in,
Tensor mask,
int width,
bool isEven) {
int n = out->shape().elements();
int threads = std::min(n, MAX_THREADS);
int blocks = n / threads + (n % threads != 0);
auto& inShape = in->shape();
int inRows = inShape[0] * inShape[1];
int inCols = inShape[2];
auto& outShape = out->shape();
int outRows = outShape[2];
int outCols = outShape[0] * outShape[1];
int lastWidth
= ((inCols - isEven) % width == 0) ? width : (inCols - isEven) % width;
gMaxPoolingForward<<<blocks, threads>>>(out->data(),
outRows,
outCols,
in->data(),
inRows,
inCols,
mask->data(),
outShape[1],
mask->shape()[2],
width,
lastWidth);
}
__global__ void gMaxPoolingBackward(float* adj,
int adjRows,
int adjCols,
float* in,
float* adjIn,
int inRows,
int inCols,
float* mask,
int numKernels,
int maskCols,
int width,
int lastWidth) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= adjRows * adjCols)
return;
int rowId = tid / adjRows;
int colId = tid % adjRows;
float* b = in + (rowId * inCols) + (colId * width);
if(colId == adjRows - 1) {
width = lastWidth;
}
float* localMask = mask + (rowId / numKernels) * maskCols + colId * width;
size_t currentMaxIdx = 0;
for(int i = 1; i < width; ++i) {
if(b[i] * localMask[i] > b[currentMaxIdx] * localMask[currentMaxIdx]) {
currentMaxIdx = i;
}
}
adjIn[(rowId * inCols) + (colId * width) + currentMaxIdx]
+= adj[rowId + (colId * adjCols)];
}
void PoolingWithMaskingBackward(Tensor adj,
Tensor adjIn,
Tensor in,
Tensor mask,
int width,
bool isEven) {
int n = adj->shape().elements();
int threads = std::min(n, 512);
int blocks = n / threads + (n % threads != 0);
auto& inShape = in->shape();
int inRows = inShape[0] * inShape[1];
int inCols = inShape[2];
auto& adjShape = adj->shape();
int adjRows = adjShape[2];
int adjCols = adjShape[0] * adjShape[1];
int lastWidth
= ((inCols - isEven) % width == 0) ? width : (inCols - isEven) % width;
gMaxPoolingBackward<<<blocks, threads>>>(adj->data(),
adjRows,
adjCols,
in->data(),
adjIn->data(),
inRows,
inCols,
mask->data(),
adjShape[1],
mask->shape()[2],
width,
lastWidth);
}
}
} // namespace marian
|
52f1ec2a42862243aff043ba1d9f8df37f13393c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "helpers_cuda.h"
#include "helpers_random.h"
#include "kernels_cuda.h"
#include "kernels_naive_cpu.h"
using REAL = float;
using namespace bcpnn::kernels::cuda;
using namespace bcpnn::kernels::naive_cpu;
float
test_add_bias()
{
std::default_random_engine generator;
std::uniform_int_distribution<int> d_size(1,1024);
std::uniform_real_distribution<float> d_entry(0, 1);
bcpnn::helpers::random::seed_generator(generator);
size_t n = d_size(generator);
size_t m = d_size(generator);
float * matrix = (float *)malloc(n * m * sizeof(float));
float * bias = (float *)malloc(m * sizeof(float));
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
matrix[i * m + j] = d_entry(generator);
}
}
for (size_t j = 0; j < m; ++j) {
bias[j] = d_entry(generator);
}
float * d_matrix;
float * d_bias;
CUDA_CALL(hipMalloc((void **)&d_matrix, n * m * sizeof(float)));
CUDA_CALL(hipMalloc((void **)&d_bias, m * sizeof(float)));
CUDA_CALL(hipMemcpy(d_matrix, matrix, n * m * sizeof(float), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_bias, bias, m * sizeof(float), hipMemcpyHostToDevice));
naive_add_bias<REAL>(matrix, n, m, bias);
cuda_add_bias<REAL>(d_matrix, n, m, d_bias);
float * h_matrix = (float *)malloc(n * m * sizeof(float));
float * h_bias = (float *)malloc(m * sizeof(float));
CUDA_CALL(hipMemcpy(h_matrix, d_matrix, n * m * sizeof(float), hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(h_bias, d_bias, m * sizeof(float), hipMemcpyDeviceToHost));
float delta_max = 0;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
delta_max = fmaxf(delta_max, fabsf(matrix[idx] - h_matrix[idx]));
}
}
free(matrix);
free(bias);
CUDA_CALL(hipFree(d_matrix));
CUDA_CALL(hipFree(d_bias));
free(h_matrix);
free(h_bias);
return delta_max;
}
float
test_softmax()
{
std::default_random_engine generator;
std::uniform_int_distribution<int> d_size(1,1024);
std::uniform_real_distribution<float> d_entry(-100, 100);
bcpnn::helpers::random::seed_generator(generator);
size_t n = d_size(generator);
size_t m = d_size(generator);
//std::cout << "Size: " << n << ", " << m << std::endl;
float * matrix = (float *)malloc(n * m * sizeof(float));
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
matrix[i * m + j] = d_entry(generator);
#if 0
std::cout << matrix[i * m + j] << "\t";
#endif
}
#if 0
std::cout << std::endl;
#endif
}
float * d_matrix;
CUDA_CALL(hipMalloc((void **)&d_matrix, n * m * sizeof(float)));
CUDA_CALL(hipMemcpy(d_matrix, matrix, n * m * sizeof(float), hipMemcpyHostToDevice));
naive_softmax<REAL>(matrix, n, m);
cuda_softmax<REAL>(d_matrix, n, m);
float * h_matrix = (float *)malloc(n * m * sizeof(float));
CUDA_CALL(hipMemcpy(h_matrix, d_matrix, n * m * sizeof(float), hipMemcpyDeviceToHost));
float delta_max = 0;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
delta_max = fmaxf(delta_max, fabsf(matrix[idx] - h_matrix[idx]));
}
}
#if 0
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << matrix[idx] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << std::endl;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << h_matrix[idx] << "\t";
}
std::cout << std::endl;
}
#endif
free(matrix);
CUDA_CALL(hipFree(d_matrix));
free(h_matrix);
return delta_max;
}
float
test_update_counters()
{
std::default_random_engine generator;
std::uniform_int_distribution<int> d_size(1,1024);
std::uniform_real_distribution<float> d_entry(0.001, 1);
std::uniform_int_distribution<int> d_zero(0, 9);
std::uniform_int_distribution<int> d_taupdt(1e-7, 0.1);
bcpnn::helpers::random::seed_generator(generator);
size_t n = d_size(generator);
size_t m = d_size(generator);
size_t batch_size = d_size(generator);
float taupdt = d_taupdt(generator);
//std::cout << "Size: " << n << ", " << m << std::endl;
float * Ci = (float *)malloc(n * sizeof(float));
float * Cj = (float *)malloc(m * sizeof(float));
float * Cij = (float *)malloc(n * m * sizeof(float));
float * inputs = (float *)malloc(batch_size * n * sizeof(float));
float * outputs = (float *)malloc(batch_size * m * sizeof(float));
for (size_t i = 0; i < n; ++i) {
if (d_zero(generator) == 0) {
Ci[i] = 0;
} else {
Ci[i] = d_entry(generator);
}
}
for (size_t j = 0; j < m; ++j) {
if (d_zero(generator) == 0) {
Cj[j] = 0;
} else {
Cj[j] = d_entry(generator);
}
}
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
if (d_zero(generator) == 0) {
Cij[i * m + j] = 0;
} else {
Cij[i * m + j] = d_entry(generator);
}
}
}
for (size_t b = 0; b < batch_size; ++b) {
for (size_t i = 0; i < n; ++i) {
inputs[b * n + i] = d_entry(generator);
}
for (size_t j = 0; j < m; ++j) {
outputs[b * m + j] = d_entry(generator);
}
}
float * d_Ci;
float * d_Cj;
float * d_Cij;
float * d_inputs;
float * d_outputs;
CUDA_CALL(hipMalloc((void **)&d_Ci, n * sizeof(float)));
CUDA_CALL(hipMalloc((void **)&d_Cj, m * sizeof(float)));
CUDA_CALL(hipMalloc((void **)&d_Cij, n * m * sizeof(float)));
CUDA_CALL(hipMalloc((void **)&d_inputs, batch_size * n * sizeof(float)));
CUDA_CALL(hipMalloc((void **)&d_outputs, batch_size * m * sizeof(float)));
CUDA_CALL(hipMemcpy(d_Ci, Ci, n * sizeof(float), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_Cj, Cj, m * sizeof(float), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_Cij, Cij, n * m * sizeof(float), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_inputs, inputs, batch_size * n * sizeof(float), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_outputs, outputs, batch_size * m * sizeof(float), hipMemcpyHostToDevice));
naive_update_counters<REAL>(Ci, Cj, Cij, inputs, outputs, batch_size, n, m, taupdt);
cuda_update_counters<REAL>(d_Ci, d_Cj, d_Cij, d_inputs, d_outputs, batch_size, n, m, taupdt);
float * h_Ci = (float *)malloc(n * sizeof(float));
float * h_Cj = (float *)malloc(m * sizeof(float));
float * h_Cij = (float *)malloc(n * m * sizeof(float));
float * h_inputs = (float *)malloc(batch_size * n * sizeof(float));
float * h_outputs = (float *)malloc(batch_size * m * sizeof(float));
CUDA_CALL(hipMemcpy(h_Ci, d_Ci, n * sizeof(float), hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(h_Cj, d_Cj, m * sizeof(float), hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(h_Cij, d_Cij, n * m * sizeof(float), hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(h_inputs, d_inputs, batch_size * n * sizeof(float), hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(h_outputs, d_outputs, batch_size * m * sizeof(float), hipMemcpyDeviceToHost));
float delta_max = 0;
for (size_t i = 0; i < n; ++i) {
delta_max = fmaxf(delta_max, fabsf(Ci[i] - h_Ci[i]));
}
for (size_t j = 0; j < m; ++j) {
delta_max = fmaxf(delta_max, fabsf(Cj[j] - h_Cj[j]));
}
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
delta_max = fmaxf(delta_max, fabsf(Cij[idx] - h_Cij[idx]));
}
}
for (size_t b = 0; b < batch_size; ++b) {
for (size_t i = 0; i < n; ++i) {
size_t idx = b * n + i;
delta_max = fmaxf(delta_max, fabsf(inputs[idx] - h_inputs[idx]));
}
for (size_t j = 0; j < m; ++j) {
size_t idx = b * m + j;
delta_max = fmaxf(delta_max, fabsf(outputs[idx] - h_outputs[idx]));
}
}
#if 0
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << matrix[idx] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << std::endl;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << h_matrix[idx] << "\t";
}
std::cout << std::endl;
}
#endif
free(Ci);
free(Cj);
free(Cij);
free(inputs);
free(outputs);
CUDA_CALL(hipFree(d_Ci));
CUDA_CALL(hipFree(d_Cj));
CUDA_CALL(hipFree(d_Cij));
CUDA_CALL(hipFree(d_inputs));
CUDA_CALL(hipFree(d_outputs));
free(h_Ci);
free(h_Cj);
free(h_Cij);
free(h_inputs);
free(h_outputs);
return delta_max;
}
float
test_update_weights()
{
std::default_random_engine generator;
std::uniform_int_distribution<int> d_size(1,1024);
std::uniform_real_distribution<float> d_entry(0.001, 1);
std::uniform_int_distribution<int> d_zero(0, 9);
std::uniform_int_distribution<int> d_taupdt(1e-3, 0.1);
bcpnn::helpers::random::seed_generator(generator);
size_t n = d_size(generator);
size_t m = d_size(generator);
float cthr = d_taupdt(generator)/2;
//std::cout << "Size: " << n << ", " << m << std::endl;
float * weights = (float *)malloc(n * m * sizeof(float));
float * Ci = (float *)malloc(n * sizeof(float));
float * Cj = (float *)malloc(m * sizeof(float));
float * Cij = (float *)malloc(n * m * sizeof(float));
for (size_t i = 0; i < n; ++i) {
if (d_zero(generator) == 0) {
Ci[i] = 0;
} else {
Ci[i] = d_entry(generator);
}
}
for (size_t j = 0; j < m; ++j) {
if (d_zero(generator) == 0) {
Cj[j] = 0;
} else {
Cj[j] = d_entry(generator);
}
}
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
if (d_zero(generator) == 0) {
Cij[i * m + j] = 0;
} else {
Cij[i * m + j] = d_entry(generator);
}
}
}
float * d_weights;
float * d_Ci;
float * d_Cj;
float * d_Cij;
CUDA_CALL(hipMalloc((void **)&d_weights, n * m * sizeof(float)));
CUDA_CALL(hipMalloc((void **)&d_Ci, n * sizeof(float)));
CUDA_CALL(hipMalloc((void **)&d_Cj, m * sizeof(float)));
CUDA_CALL(hipMalloc((void **)&d_Cij, n * m * sizeof(float)));
CUDA_CALL(hipMemcpy(d_Ci, Ci, n * sizeof(float), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_Cj, Cj, m * sizeof(float), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_Cij, Cij, n * m * sizeof(float), hipMemcpyHostToDevice));
naive_update_weights<REAL>(weights, Ci, Cj, Cij, cthr, n, m);
cuda_update_weights<REAL>(d_weights, d_Ci, d_Cj, d_Cij, cthr, n, m);
float * h_weights = (float *)malloc(n * m * sizeof(float));
CUDA_CALL(hipMemcpy(h_weights, d_weights, n * m * sizeof(float), hipMemcpyDeviceToHost));
float delta_max = 0;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
delta_max = fmaxf(delta_max, fabsf(weights[idx] - h_weights[idx]));
}
}
#if 0
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << matrix[idx] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << std::endl;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << h_matrix[idx] << "\t";
}
std::cout << std::endl;
}
#endif
free(weights);
free(Ci);
free(Cj);
free(Cij);
CUDA_CALL(hipFree(d_weights));
CUDA_CALL(hipFree(d_Ci));
CUDA_CALL(hipFree(d_Cj));
CUDA_CALL(hipFree(d_Cij));
free(h_weights);
return delta_max;
}
float
test_update_bias()
{
std::default_random_engine generator;
std::uniform_int_distribution<int> d_size(1,1024);
std::uniform_real_distribution<float> d_entry(0.001, 1);
std::uniform_int_distribution<int> d_zero(0, 9);
std::uniform_int_distribution<int> d_taupdt(1e-3, 0.1);
bcpnn::helpers::random::seed_generator(generator);
size_t m = d_size(generator);
float cthr = d_taupdt(generator)/2;
//std::cout << "Size: " << n << ", " << m << std::endl;
float * bias = (float *)malloc(m * sizeof(float));
float * Cj = (float *)malloc(m * sizeof(float));
for (size_t j = 0; j < m; ++j) {
if (d_zero(generator) == 0) {
Cj[j] = 0;
} else {
Cj[j] = d_entry(generator);
}
}
float * d_bias;
float * d_Cj;
CUDA_CALL(hipMalloc((void **)&d_bias, m * sizeof(float)));
CUDA_CALL(hipMalloc((void **)&d_Cj, m * sizeof(float)));
CUDA_CALL(hipMemcpy(d_Cj, Cj, m * sizeof(float), hipMemcpyHostToDevice));
naive_update_bias<REAL>(bias, Cj, cthr, m);
cuda_update_bias<REAL>(d_bias, d_Cj, cthr, m);
float * h_bias = (float *)malloc(m * sizeof(float));
CUDA_CALL(hipMemcpy(h_bias, d_bias, m * sizeof(float), hipMemcpyDeviceToHost));
float delta_max = 0;
for (size_t j = 0; j < m; ++j) {
delta_max = fmaxf(delta_max, fabsf(bias[j] - h_bias[j]));
}
#if 0
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << matrix[idx] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << std::endl;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << h_matrix[idx] << "\t";
}
std::cout << std::endl;
}
#endif
free(bias);
free(Cj);
CUDA_CALL(hipFree(d_bias));
CUDA_CALL(hipFree(d_Cj));
free(h_bias);
return delta_max;
}
float
test_update_bias_regularized()
{
std::default_random_engine generator;
std::uniform_int_distribution<int> d_size(1,1024);
std::uniform_real_distribution<float> d_entry(0.001, 1);
std::uniform_real_distribution<float> _d_kbi(-10, 10);
std::uniform_real_distribution<float> d_khalf(-1000, 0);
std::uniform_real_distribution<float> d_pmin(0.01, 0.5);
std::uniform_int_distribution<int> d_zero(0, 9);
std::uniform_int_distribution<int> d_taupdt(1e-3, 0.1);
bcpnn::helpers::random::seed_generator(generator);
size_t m = d_size(generator);
float cthr = d_taupdt(generator)/2;
float khalf = d_khalf(generator);
float pmin = d_pmin(generator);
float taubdt = d_taupdt(generator);
//std::cout << "Size: " << n << ", " << m << std::endl;
float * bias = (float *)malloc(m * sizeof(float));
float * kbi = (float *)malloc(m * sizeof(float));
float * Cj = (float *)malloc(m * sizeof(float));
for (size_t j = 0; j < m; ++j) {
if (d_zero(generator) == 0) {
Cj[j] = 0;
} else {
Cj[j] = d_entry(generator);
}
kbi[j] = _d_kbi(generator);
}
float * d_bias;
float * d_kbi;
float * d_Cj;
CUDA_CALL(hipMalloc((void **)&d_bias, m * sizeof(float)));
CUDA_CALL(hipMalloc((void **)&d_kbi, m * sizeof(float)));
CUDA_CALL(hipMalloc((void **)&d_Cj, m * sizeof(float)));
CUDA_CALL(hipMemcpy(d_kbi, kbi, m * sizeof(float), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_Cj, Cj, m * sizeof(float), hipMemcpyHostToDevice));
naive_update_bias_regularized<REAL>(bias, kbi, Cj, cthr, khalf, pmin, taubdt, m);
cuda_update_bias_regularized<REAL>(d_bias, d_kbi, d_Cj, cthr, khalf, pmin, taubdt, m);
float * h_bias = (float *)malloc(m * sizeof(float));
float * h_kbi = (float *)malloc(m * sizeof(float));
CUDA_CALL(hipMemcpy(h_bias, d_bias, m * sizeof(float), hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(h_kbi, d_kbi, m * sizeof(float), hipMemcpyDeviceToHost));
float delta_max = 0;
for (size_t j = 0; j < m; ++j) {
delta_max = fmaxf(delta_max, fabsf(bias[j] - h_bias[j]));
delta_max = fmaxf(delta_max, fabsf(kbi[j] - h_kbi[j]));
}
#if 0
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << matrix[idx] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << std::endl;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << h_matrix[idx] << "\t";
}
std::cout << std::endl;
}
#endif
free(bias);
free(kbi);
free(Cj);
CUDA_CALL(hipFree(d_bias));
CUDA_CALL(hipFree(d_kbi));
CUDA_CALL(hipFree(d_Cj));
free(h_bias);
free(h_kbi);
return delta_max;
}
float
test_update_mask()
{
std::default_random_engine generator;
std::uniform_int_distribution<int> d_size(1,1024);
std::uniform_real_distribution<float> d_counter_entry(0.001, 0.999);
std::uniform_int_distribution<int> d_mask_entry(0, 4);
std::uniform_real_distribution<float> d_weight_entry(-10, 10);
std::uniform_int_distribution<int> d_zero(0, 9);
std::uniform_real_distribution<float> d_taupdt(1e-7, 0.1);
bcpnn::helpers::random::seed_generator(generator);
size_t n = d_size(generator);
size_t hypercolumns = d_size(generator);
std::uniform_int_distribution<int> d_minicolumns(1,1024/hypercolumns);
std::uniform_int_distribution<int> d_hypercolumn(0,hypercolumns-1);
size_t minicolumns = d_minicolumns(generator);
size_t h = d_hypercolumn(generator);
size_t m = hypercolumns * minicolumns;
size_t batch_size = d_size(generator);
float cthr = d_taupdt(generator)/2;
//std::cout << "cthr: " << cthr << std::endl;
//std::cout << "Size: " << n << ", " << m << std::endl;
uint8_t * wmask = (uint8_t *)malloc(n * hypercolumns * sizeof(uint8_t));
float * weights = (float *)malloc(n * m * sizeof(float));
float * Ci = (float *)malloc(n * sizeof(float));
float * Cj = (float *)malloc(m * sizeof(float));
float * Cij = (float *)malloc(n * m * sizeof(float));
for (size_t i = 0; i < n; ++i) {
for (size_t h = 0; h < hypercolumns; ++h) {
wmask[i * hypercolumns + h] = (d_mask_entry(generator) == 0);
}
}
for (size_t i = 0; i < n; ++i) {
if (d_zero(generator) == 0) {
Ci[i] = 0;
} else {
Ci[i] = d_counter_entry(generator);
}
}
for (size_t j = 0; j < m; ++j) {
if (d_zero(generator) == 0) {
Cj[j] = 0;
} else {
Cj[j] = d_counter_entry(generator);
}
}
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
if (d_zero(generator) == 0) {
Cij[i * m + j] = 0;
} else {
Cij[i * m + j] = fminf(0.9 * Ci[i], fminf(0.9 * Cj[j], d_counter_entry(generator)));
}
}
}
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
weights[i * m + j] = d_weight_entry(generator);
}
}
uint8_t * d_wmask;
float * d_weights;
float * d_Ci;
float * d_Cj;
float * d_Cij;
CUDA_CALL(hipMalloc((void **)&d_wmask, n * hypercolumns * sizeof(uint8_t)));
CUDA_CALL(hipMalloc((void **)&d_weights, n * m * sizeof(float)));
CUDA_CALL(hipMalloc((void **)&d_Ci, n * sizeof(float)));
CUDA_CALL(hipMalloc((void **)&d_Cj, m * sizeof(float)));
CUDA_CALL(hipMalloc((void **)&d_Cij, n * m * sizeof(float)));
CUDA_CALL(hipMemcpy(d_wmask, wmask, n * hypercolumns * sizeof(uint8_t), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_weights, weights, n * m * sizeof(float), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_Ci, Ci, n * sizeof(float), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_Cj, Cj, m * sizeof(float), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_Cij, Cij, n * m * sizeof(float), hipMemcpyHostToDevice));
naive_update_mask<REAL>(wmask, weights, Ci, Cj, Cij, cthr, n, m, h, hypercolumns, minicolumns, 1);
cuda_update_mask<REAL>(d_wmask, d_weights, d_Ci, d_Cj, d_Cij, cthr, n, m, h, hypercolumns, minicolumns, 1);
uint8_t * h_wmask = (uint8_t *)malloc(n * hypercolumns * sizeof(uint8_t));
CUDA_CALL(hipMemcpy(h_wmask, d_wmask, n * hypercolumns * sizeof(uint8_t), hipMemcpyDeviceToHost));
float delta_max = 0;
for (size_t i = 0; i < n; ++i) {
for (size_t h = 0; h < hypercolumns; ++h) {
size_t idx = i * hypercolumns + h;
delta_max = fmaxf(delta_max, fabsf(wmask[idx] - h_wmask[idx]));
}
}
#if 0
for (size_t i = 0; i < n; ++i) {
for (size_t h = 0; h < hypercolumns; ++h) {
size_t idx = i * hypercolumns + h;
std::cout << (int)wmask[idx] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << std::endl;
for (size_t i = 0; i < n; ++i) {
for (size_t h = 0; h < hypercolumns; ++h) {
size_t idx = i * hypercolumns + h;
std::cout << (int)h_wmask[idx] << "\t";
}
std::cout << std::endl;
}
#endif
free(wmask);
free(weights);
free(Ci);
free(Cj);
free(Cij);
CUDA_CALL(hipFree(d_wmask));
CUDA_CALL(hipFree(d_weights));
CUDA_CALL(hipFree(d_Ci));
CUDA_CALL(hipFree(d_Cj));
CUDA_CALL(hipFree(d_Cij));
free(h_wmask);
return delta_max;
}
float
test_apply_mask()
{
std::default_random_engine generator;
std::uniform_int_distribution<int> d_size(1,1024);
std::uniform_int_distribution<int> d_mask_entry(0, 4);
std::uniform_real_distribution<float> d_weight_entry(-10, 10);
bcpnn::helpers::random::seed_generator(generator);
size_t n = d_size(generator);
size_t hypercolumns = d_size(generator);
std::uniform_int_distribution<int> d_minicolumns(1,1024/hypercolumns);
size_t minicolumns = d_minicolumns(generator);
size_t m = hypercolumns * minicolumns;
uint8_t * wmask = (uint8_t *)malloc(n * hypercolumns * sizeof(uint8_t));
float * weights = (float *)malloc(n * m * sizeof(float));
for (size_t i = 0; i < n; ++i) {
for (size_t h = 0; h < hypercolumns; ++h) {
wmask[i * hypercolumns + h] = (d_mask_entry(generator) == 0);
}
}
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
weights[i * m + j] = d_weight_entry(generator);
}
}
uint8_t * d_wmask;
float * d_weights;
CUDA_CALL(hipMalloc((void **)&d_wmask, n * hypercolumns * sizeof(uint8_t)));
CUDA_CALL(hipMalloc((void **)&d_weights, n * m * sizeof(float)));
CUDA_CALL(hipMemcpy(d_wmask, wmask, n * hypercolumns * sizeof(uint8_t), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_weights, weights, n * m * sizeof(float), hipMemcpyHostToDevice));
naive_apply_mask<REAL>(weights, wmask,n, m, hypercolumns, minicolumns);
cuda_apply_mask<REAL>(d_weights, d_wmask, n, m, hypercolumns, minicolumns);
float * h_weights = (float *)malloc(n * m * sizeof(float));
CUDA_CALL(hipMemcpy(h_weights, d_weights, n * m * sizeof(float), hipMemcpyDeviceToHost));
float delta_max = 0;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
delta_max = fmaxf(delta_max, fabsf(weights[idx] - h_weights[idx]));
}
}
#if 0
for (size_t i = 0; i < n; ++i) {
for (size_t h = 0; h < hypercolumns; ++h) {
size_t idx = i * hypercolumns + h;
std::cout << (int)wmask[idx] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << std::endl;
for (size_t i = 0; i < n; ++i) {
for (size_t h = 0; h < hypercolumns; ++h) {
size_t idx = i * hypercolumns + h;
std::cout << (int)h_wmask[idx] << "\t";
}
std::cout << std::endl;
}
#endif
free(wmask);
free(weights);
CUDA_CALL(hipFree(d_wmask));
CUDA_CALL(hipFree(d_weights));
free(h_weights);
return delta_max;
}
| 52f1ec2a42862243aff043ba1d9f8df37f13393c.cu | #include <cuda.h>
#include "helpers_cuda.h"
#include "helpers_random.h"
#include "kernels_cuda.h"
#include "kernels_naive_cpu.h"
using REAL = float;
using namespace bcpnn::kernels::cuda;
using namespace bcpnn::kernels::naive_cpu;
float
test_add_bias()
{
std::default_random_engine generator;
std::uniform_int_distribution<int> d_size(1,1024);
std::uniform_real_distribution<float> d_entry(0, 1);
bcpnn::helpers::random::seed_generator(generator);
size_t n = d_size(generator);
size_t m = d_size(generator);
float * matrix = (float *)malloc(n * m * sizeof(float));
float * bias = (float *)malloc(m * sizeof(float));
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
matrix[i * m + j] = d_entry(generator);
}
}
for (size_t j = 0; j < m; ++j) {
bias[j] = d_entry(generator);
}
float * d_matrix;
float * d_bias;
CUDA_CALL(cudaMalloc((void **)&d_matrix, n * m * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&d_bias, m * sizeof(float)));
CUDA_CALL(cudaMemcpy(d_matrix, matrix, n * m * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_bias, bias, m * sizeof(float), cudaMemcpyHostToDevice));
naive_add_bias<REAL>(matrix, n, m, bias);
cuda_add_bias<REAL>(d_matrix, n, m, d_bias);
float * h_matrix = (float *)malloc(n * m * sizeof(float));
float * h_bias = (float *)malloc(m * sizeof(float));
CUDA_CALL(cudaMemcpy(h_matrix, d_matrix, n * m * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(h_bias, d_bias, m * sizeof(float), cudaMemcpyDeviceToHost));
float delta_max = 0;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
delta_max = fmaxf(delta_max, fabsf(matrix[idx] - h_matrix[idx]));
}
}
free(matrix);
free(bias);
CUDA_CALL(cudaFree(d_matrix));
CUDA_CALL(cudaFree(d_bias));
free(h_matrix);
free(h_bias);
return delta_max;
}
float
test_softmax()
{
std::default_random_engine generator;
std::uniform_int_distribution<int> d_size(1,1024);
std::uniform_real_distribution<float> d_entry(-100, 100);
bcpnn::helpers::random::seed_generator(generator);
size_t n = d_size(generator);
size_t m = d_size(generator);
//std::cout << "Size: " << n << ", " << m << std::endl;
float * matrix = (float *)malloc(n * m * sizeof(float));
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
matrix[i * m + j] = d_entry(generator);
#if 0
std::cout << matrix[i * m + j] << "\t";
#endif
}
#if 0
std::cout << std::endl;
#endif
}
float * d_matrix;
CUDA_CALL(cudaMalloc((void **)&d_matrix, n * m * sizeof(float)));
CUDA_CALL(cudaMemcpy(d_matrix, matrix, n * m * sizeof(float), cudaMemcpyHostToDevice));
naive_softmax<REAL>(matrix, n, m);
cuda_softmax<REAL>(d_matrix, n, m);
float * h_matrix = (float *)malloc(n * m * sizeof(float));
CUDA_CALL(cudaMemcpy(h_matrix, d_matrix, n * m * sizeof(float), cudaMemcpyDeviceToHost));
float delta_max = 0;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
delta_max = fmaxf(delta_max, fabsf(matrix[idx] - h_matrix[idx]));
}
}
#if 0
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << matrix[idx] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << std::endl;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << h_matrix[idx] << "\t";
}
std::cout << std::endl;
}
#endif
free(matrix);
CUDA_CALL(cudaFree(d_matrix));
free(h_matrix);
return delta_max;
}
float
test_update_counters()
{
std::default_random_engine generator;
std::uniform_int_distribution<int> d_size(1,1024);
std::uniform_real_distribution<float> d_entry(0.001, 1);
std::uniform_int_distribution<int> d_zero(0, 9);
std::uniform_int_distribution<int> d_taupdt(1e-7, 0.1);
bcpnn::helpers::random::seed_generator(generator);
size_t n = d_size(generator);
size_t m = d_size(generator);
size_t batch_size = d_size(generator);
float taupdt = d_taupdt(generator);
//std::cout << "Size: " << n << ", " << m << std::endl;
float * Ci = (float *)malloc(n * sizeof(float));
float * Cj = (float *)malloc(m * sizeof(float));
float * Cij = (float *)malloc(n * m * sizeof(float));
float * inputs = (float *)malloc(batch_size * n * sizeof(float));
float * outputs = (float *)malloc(batch_size * m * sizeof(float));
for (size_t i = 0; i < n; ++i) {
if (d_zero(generator) == 0) {
Ci[i] = 0;
} else {
Ci[i] = d_entry(generator);
}
}
for (size_t j = 0; j < m; ++j) {
if (d_zero(generator) == 0) {
Cj[j] = 0;
} else {
Cj[j] = d_entry(generator);
}
}
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
if (d_zero(generator) == 0) {
Cij[i * m + j] = 0;
} else {
Cij[i * m + j] = d_entry(generator);
}
}
}
for (size_t b = 0; b < batch_size; ++b) {
for (size_t i = 0; i < n; ++i) {
inputs[b * n + i] = d_entry(generator);
}
for (size_t j = 0; j < m; ++j) {
outputs[b * m + j] = d_entry(generator);
}
}
float * d_Ci;
float * d_Cj;
float * d_Cij;
float * d_inputs;
float * d_outputs;
CUDA_CALL(cudaMalloc((void **)&d_Ci, n * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&d_Cj, m * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&d_Cij, n * m * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&d_inputs, batch_size * n * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&d_outputs, batch_size * m * sizeof(float)));
CUDA_CALL(cudaMemcpy(d_Ci, Ci, n * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_Cj, Cj, m * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_Cij, Cij, n * m * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_inputs, inputs, batch_size * n * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_outputs, outputs, batch_size * m * sizeof(float), cudaMemcpyHostToDevice));
naive_update_counters<REAL>(Ci, Cj, Cij, inputs, outputs, batch_size, n, m, taupdt);
cuda_update_counters<REAL>(d_Ci, d_Cj, d_Cij, d_inputs, d_outputs, batch_size, n, m, taupdt);
float * h_Ci = (float *)malloc(n * sizeof(float));
float * h_Cj = (float *)malloc(m * sizeof(float));
float * h_Cij = (float *)malloc(n * m * sizeof(float));
float * h_inputs = (float *)malloc(batch_size * n * sizeof(float));
float * h_outputs = (float *)malloc(batch_size * m * sizeof(float));
CUDA_CALL(cudaMemcpy(h_Ci, d_Ci, n * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(h_Cj, d_Cj, m * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(h_Cij, d_Cij, n * m * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(h_inputs, d_inputs, batch_size * n * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(h_outputs, d_outputs, batch_size * m * sizeof(float), cudaMemcpyDeviceToHost));
float delta_max = 0;
for (size_t i = 0; i < n; ++i) {
delta_max = fmaxf(delta_max, fabsf(Ci[i] - h_Ci[i]));
}
for (size_t j = 0; j < m; ++j) {
delta_max = fmaxf(delta_max, fabsf(Cj[j] - h_Cj[j]));
}
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
delta_max = fmaxf(delta_max, fabsf(Cij[idx] - h_Cij[idx]));
}
}
for (size_t b = 0; b < batch_size; ++b) {
for (size_t i = 0; i < n; ++i) {
size_t idx = b * n + i;
delta_max = fmaxf(delta_max, fabsf(inputs[idx] - h_inputs[idx]));
}
for (size_t j = 0; j < m; ++j) {
size_t idx = b * m + j;
delta_max = fmaxf(delta_max, fabsf(outputs[idx] - h_outputs[idx]));
}
}
#if 0
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << matrix[idx] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << std::endl;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << h_matrix[idx] << "\t";
}
std::cout << std::endl;
}
#endif
free(Ci);
free(Cj);
free(Cij);
free(inputs);
free(outputs);
CUDA_CALL(cudaFree(d_Ci));
CUDA_CALL(cudaFree(d_Cj));
CUDA_CALL(cudaFree(d_Cij));
CUDA_CALL(cudaFree(d_inputs));
CUDA_CALL(cudaFree(d_outputs));
free(h_Ci);
free(h_Cj);
free(h_Cij);
free(h_inputs);
free(h_outputs);
return delta_max;
}
float
test_update_weights()
{
std::default_random_engine generator;
std::uniform_int_distribution<int> d_size(1,1024);
std::uniform_real_distribution<float> d_entry(0.001, 1);
std::uniform_int_distribution<int> d_zero(0, 9);
std::uniform_int_distribution<int> d_taupdt(1e-3, 0.1);
bcpnn::helpers::random::seed_generator(generator);
size_t n = d_size(generator);
size_t m = d_size(generator);
float cthr = d_taupdt(generator)/2;
//std::cout << "Size: " << n << ", " << m << std::endl;
float * weights = (float *)malloc(n * m * sizeof(float));
float * Ci = (float *)malloc(n * sizeof(float));
float * Cj = (float *)malloc(m * sizeof(float));
float * Cij = (float *)malloc(n * m * sizeof(float));
for (size_t i = 0; i < n; ++i) {
if (d_zero(generator) == 0) {
Ci[i] = 0;
} else {
Ci[i] = d_entry(generator);
}
}
for (size_t j = 0; j < m; ++j) {
if (d_zero(generator) == 0) {
Cj[j] = 0;
} else {
Cj[j] = d_entry(generator);
}
}
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
if (d_zero(generator) == 0) {
Cij[i * m + j] = 0;
} else {
Cij[i * m + j] = d_entry(generator);
}
}
}
float * d_weights;
float * d_Ci;
float * d_Cj;
float * d_Cij;
CUDA_CALL(cudaMalloc((void **)&d_weights, n * m * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&d_Ci, n * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&d_Cj, m * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&d_Cij, n * m * sizeof(float)));
CUDA_CALL(cudaMemcpy(d_Ci, Ci, n * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_Cj, Cj, m * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_Cij, Cij, n * m * sizeof(float), cudaMemcpyHostToDevice));
naive_update_weights<REAL>(weights, Ci, Cj, Cij, cthr, n, m);
cuda_update_weights<REAL>(d_weights, d_Ci, d_Cj, d_Cij, cthr, n, m);
float * h_weights = (float *)malloc(n * m * sizeof(float));
CUDA_CALL(cudaMemcpy(h_weights, d_weights, n * m * sizeof(float), cudaMemcpyDeviceToHost));
float delta_max = 0;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
delta_max = fmaxf(delta_max, fabsf(weights[idx] - h_weights[idx]));
}
}
#if 0
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << matrix[idx] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << std::endl;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << h_matrix[idx] << "\t";
}
std::cout << std::endl;
}
#endif
free(weights);
free(Ci);
free(Cj);
free(Cij);
CUDA_CALL(cudaFree(d_weights));
CUDA_CALL(cudaFree(d_Ci));
CUDA_CALL(cudaFree(d_Cj));
CUDA_CALL(cudaFree(d_Cij));
free(h_weights);
return delta_max;
}
float
test_update_bias()
{
std::default_random_engine generator;
std::uniform_int_distribution<int> d_size(1,1024);
std::uniform_real_distribution<float> d_entry(0.001, 1);
std::uniform_int_distribution<int> d_zero(0, 9);
std::uniform_int_distribution<int> d_taupdt(1e-3, 0.1);
bcpnn::helpers::random::seed_generator(generator);
size_t m = d_size(generator);
float cthr = d_taupdt(generator)/2;
//std::cout << "Size: " << n << ", " << m << std::endl;
float * bias = (float *)malloc(m * sizeof(float));
float * Cj = (float *)malloc(m * sizeof(float));
for (size_t j = 0; j < m; ++j) {
if (d_zero(generator) == 0) {
Cj[j] = 0;
} else {
Cj[j] = d_entry(generator);
}
}
float * d_bias;
float * d_Cj;
CUDA_CALL(cudaMalloc((void **)&d_bias, m * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&d_Cj, m * sizeof(float)));
CUDA_CALL(cudaMemcpy(d_Cj, Cj, m * sizeof(float), cudaMemcpyHostToDevice));
naive_update_bias<REAL>(bias, Cj, cthr, m);
cuda_update_bias<REAL>(d_bias, d_Cj, cthr, m);
float * h_bias = (float *)malloc(m * sizeof(float));
CUDA_CALL(cudaMemcpy(h_bias, d_bias, m * sizeof(float), cudaMemcpyDeviceToHost));
float delta_max = 0;
for (size_t j = 0; j < m; ++j) {
delta_max = fmaxf(delta_max, fabsf(bias[j] - h_bias[j]));
}
#if 0
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << matrix[idx] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << std::endl;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << h_matrix[idx] << "\t";
}
std::cout << std::endl;
}
#endif
free(bias);
free(Cj);
CUDA_CALL(cudaFree(d_bias));
CUDA_CALL(cudaFree(d_Cj));
free(h_bias);
return delta_max;
}
float
test_update_bias_regularized()
{
std::default_random_engine generator;
std::uniform_int_distribution<int> d_size(1,1024);
std::uniform_real_distribution<float> d_entry(0.001, 1);
std::uniform_real_distribution<float> _d_kbi(-10, 10);
std::uniform_real_distribution<float> d_khalf(-1000, 0);
std::uniform_real_distribution<float> d_pmin(0.01, 0.5);
std::uniform_int_distribution<int> d_zero(0, 9);
std::uniform_int_distribution<int> d_taupdt(1e-3, 0.1);
bcpnn::helpers::random::seed_generator(generator);
size_t m = d_size(generator);
float cthr = d_taupdt(generator)/2;
float khalf = d_khalf(generator);
float pmin = d_pmin(generator);
float taubdt = d_taupdt(generator);
//std::cout << "Size: " << n << ", " << m << std::endl;
float * bias = (float *)malloc(m * sizeof(float));
float * kbi = (float *)malloc(m * sizeof(float));
float * Cj = (float *)malloc(m * sizeof(float));
for (size_t j = 0; j < m; ++j) {
if (d_zero(generator) == 0) {
Cj[j] = 0;
} else {
Cj[j] = d_entry(generator);
}
kbi[j] = _d_kbi(generator);
}
float * d_bias;
float * d_kbi;
float * d_Cj;
CUDA_CALL(cudaMalloc((void **)&d_bias, m * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&d_kbi, m * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&d_Cj, m * sizeof(float)));
CUDA_CALL(cudaMemcpy(d_kbi, kbi, m * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_Cj, Cj, m * sizeof(float), cudaMemcpyHostToDevice));
naive_update_bias_regularized<REAL>(bias, kbi, Cj, cthr, khalf, pmin, taubdt, m);
cuda_update_bias_regularized<REAL>(d_bias, d_kbi, d_Cj, cthr, khalf, pmin, taubdt, m);
float * h_bias = (float *)malloc(m * sizeof(float));
float * h_kbi = (float *)malloc(m * sizeof(float));
CUDA_CALL(cudaMemcpy(h_bias, d_bias, m * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(h_kbi, d_kbi, m * sizeof(float), cudaMemcpyDeviceToHost));
float delta_max = 0;
for (size_t j = 0; j < m; ++j) {
delta_max = fmaxf(delta_max, fabsf(bias[j] - h_bias[j]));
delta_max = fmaxf(delta_max, fabsf(kbi[j] - h_kbi[j]));
}
#if 0
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << matrix[idx] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << std::endl;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
std::cout << h_matrix[idx] << "\t";
}
std::cout << std::endl;
}
#endif
free(bias);
free(kbi);
free(Cj);
CUDA_CALL(cudaFree(d_bias));
CUDA_CALL(cudaFree(d_kbi));
CUDA_CALL(cudaFree(d_Cj));
free(h_bias);
free(h_kbi);
return delta_max;
}
float
test_update_mask()
{
std::default_random_engine generator;
std::uniform_int_distribution<int> d_size(1,1024);
std::uniform_real_distribution<float> d_counter_entry(0.001, 0.999);
std::uniform_int_distribution<int> d_mask_entry(0, 4);
std::uniform_real_distribution<float> d_weight_entry(-10, 10);
std::uniform_int_distribution<int> d_zero(0, 9);
std::uniform_real_distribution<float> d_taupdt(1e-7, 0.1);
bcpnn::helpers::random::seed_generator(generator);
size_t n = d_size(generator);
size_t hypercolumns = d_size(generator);
std::uniform_int_distribution<int> d_minicolumns(1,1024/hypercolumns);
std::uniform_int_distribution<int> d_hypercolumn(0,hypercolumns-1);
size_t minicolumns = d_minicolumns(generator);
size_t h = d_hypercolumn(generator);
size_t m = hypercolumns * minicolumns;
size_t batch_size = d_size(generator);
float cthr = d_taupdt(generator)/2;
//std::cout << "cthr: " << cthr << std::endl;
//std::cout << "Size: " << n << ", " << m << std::endl;
uint8_t * wmask = (uint8_t *)malloc(n * hypercolumns * sizeof(uint8_t));
float * weights = (float *)malloc(n * m * sizeof(float));
float * Ci = (float *)malloc(n * sizeof(float));
float * Cj = (float *)malloc(m * sizeof(float));
float * Cij = (float *)malloc(n * m * sizeof(float));
for (size_t i = 0; i < n; ++i) {
for (size_t h = 0; h < hypercolumns; ++h) {
wmask[i * hypercolumns + h] = (d_mask_entry(generator) == 0);
}
}
for (size_t i = 0; i < n; ++i) {
if (d_zero(generator) == 0) {
Ci[i] = 0;
} else {
Ci[i] = d_counter_entry(generator);
}
}
for (size_t j = 0; j < m; ++j) {
if (d_zero(generator) == 0) {
Cj[j] = 0;
} else {
Cj[j] = d_counter_entry(generator);
}
}
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
if (d_zero(generator) == 0) {
Cij[i * m + j] = 0;
} else {
Cij[i * m + j] = fminf(0.9 * Ci[i], fminf(0.9 * Cj[j], d_counter_entry(generator)));
}
}
}
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
weights[i * m + j] = d_weight_entry(generator);
}
}
uint8_t * d_wmask;
float * d_weights;
float * d_Ci;
float * d_Cj;
float * d_Cij;
CUDA_CALL(cudaMalloc((void **)&d_wmask, n * hypercolumns * sizeof(uint8_t)));
CUDA_CALL(cudaMalloc((void **)&d_weights, n * m * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&d_Ci, n * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&d_Cj, m * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&d_Cij, n * m * sizeof(float)));
CUDA_CALL(cudaMemcpy(d_wmask, wmask, n * hypercolumns * sizeof(uint8_t), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_weights, weights, n * m * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_Ci, Ci, n * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_Cj, Cj, m * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_Cij, Cij, n * m * sizeof(float), cudaMemcpyHostToDevice));
naive_update_mask<REAL>(wmask, weights, Ci, Cj, Cij, cthr, n, m, h, hypercolumns, minicolumns, 1);
cuda_update_mask<REAL>(d_wmask, d_weights, d_Ci, d_Cj, d_Cij, cthr, n, m, h, hypercolumns, minicolumns, 1);
uint8_t * h_wmask = (uint8_t *)malloc(n * hypercolumns * sizeof(uint8_t));
CUDA_CALL(cudaMemcpy(h_wmask, d_wmask, n * hypercolumns * sizeof(uint8_t), cudaMemcpyDeviceToHost));
float delta_max = 0;
for (size_t i = 0; i < n; ++i) {
for (size_t h = 0; h < hypercolumns; ++h) {
size_t idx = i * hypercolumns + h;
delta_max = fmaxf(delta_max, fabsf(wmask[idx] - h_wmask[idx]));
}
}
#if 0
for (size_t i = 0; i < n; ++i) {
for (size_t h = 0; h < hypercolumns; ++h) {
size_t idx = i * hypercolumns + h;
std::cout << (int)wmask[idx] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << std::endl;
for (size_t i = 0; i < n; ++i) {
for (size_t h = 0; h < hypercolumns; ++h) {
size_t idx = i * hypercolumns + h;
std::cout << (int)h_wmask[idx] << "\t";
}
std::cout << std::endl;
}
#endif
free(wmask);
free(weights);
free(Ci);
free(Cj);
free(Cij);
CUDA_CALL(cudaFree(d_wmask));
CUDA_CALL(cudaFree(d_weights));
CUDA_CALL(cudaFree(d_Ci));
CUDA_CALL(cudaFree(d_Cj));
CUDA_CALL(cudaFree(d_Cij));
free(h_wmask);
return delta_max;
}
float
test_apply_mask()
{
std::default_random_engine generator;
std::uniform_int_distribution<int> d_size(1,1024);
std::uniform_int_distribution<int> d_mask_entry(0, 4);
std::uniform_real_distribution<float> d_weight_entry(-10, 10);
bcpnn::helpers::random::seed_generator(generator);
size_t n = d_size(generator);
size_t hypercolumns = d_size(generator);
std::uniform_int_distribution<int> d_minicolumns(1,1024/hypercolumns);
size_t minicolumns = d_minicolumns(generator);
size_t m = hypercolumns * minicolumns;
uint8_t * wmask = (uint8_t *)malloc(n * hypercolumns * sizeof(uint8_t));
float * weights = (float *)malloc(n * m * sizeof(float));
for (size_t i = 0; i < n; ++i) {
for (size_t h = 0; h < hypercolumns; ++h) {
wmask[i * hypercolumns + h] = (d_mask_entry(generator) == 0);
}
}
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
weights[i * m + j] = d_weight_entry(generator);
}
}
uint8_t * d_wmask;
float * d_weights;
CUDA_CALL(cudaMalloc((void **)&d_wmask, n * hypercolumns * sizeof(uint8_t)));
CUDA_CALL(cudaMalloc((void **)&d_weights, n * m * sizeof(float)));
CUDA_CALL(cudaMemcpy(d_wmask, wmask, n * hypercolumns * sizeof(uint8_t), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_weights, weights, n * m * sizeof(float), cudaMemcpyHostToDevice));
naive_apply_mask<REAL>(weights, wmask,n, m, hypercolumns, minicolumns);
cuda_apply_mask<REAL>(d_weights, d_wmask, n, m, hypercolumns, minicolumns);
float * h_weights = (float *)malloc(n * m * sizeof(float));
CUDA_CALL(cudaMemcpy(h_weights, d_weights, n * m * sizeof(float), cudaMemcpyDeviceToHost));
float delta_max = 0;
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < m; ++j) {
size_t idx = i * m + j;
delta_max = fmaxf(delta_max, fabsf(weights[idx] - h_weights[idx]));
}
}
#if 0
for (size_t i = 0; i < n; ++i) {
for (size_t h = 0; h < hypercolumns; ++h) {
size_t idx = i * hypercolumns + h;
std::cout << (int)wmask[idx] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << std::endl;
for (size_t i = 0; i < n; ++i) {
for (size_t h = 0; h < hypercolumns; ++h) {
size_t idx = i * hypercolumns + h;
std::cout << (int)h_wmask[idx] << "\t";
}
std::cout << std::endl;
}
#endif
free(wmask);
free(weights);
CUDA_CALL(cudaFree(d_wmask));
CUDA_CALL(cudaFree(d_weights));
free(h_weights);
return delta_max;
}
|
13d808b7aa714122310cba8edc041237f732f72c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <dcgn/dcgn.h>
#include <dcgn/CUDAFunctions.h>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <vector>
#include <algorithm>
#include <cerrno>
#include <sched.h>
#define CHECK_ERROR() \
{ \
hipError_t err = hipGetLastError(); \
if (err != hipSuccess) \
{ \
fprintf(stderr, "%s.%s.%d: %s.\n", __FILE__, __FUNCTION__, __LINE__, hipGetErrorString(err)); \
fflush(stderr); \
exit(1); \
} \
} \
typedef struct _MandelbrotInfo
{
int width, height, maxRows, maxIters;
float xMin, xMax, yMin, yMax;
} MandelbrotInfo;
typedef struct _CommPacket
{
int startRow, endRow;
} CommPacket;
void convertHSBtoRGB(const float & hue, const float & brightness, unsigned char * pixel)
{
unsigned char r, g, b;
const float saturation = 1.0f;
float h = (hue - floor(hue)) * 6.0f;
float f = h - floor(h);
float p = brightness * (1.0f - saturation);
float q = brightness * (1.0f - saturation * f);
float t = brightness * (1.0f - (saturation * (1.0f - f)));
switch (static_cast<int>(h))
{
case 0:
r = static_cast<unsigned char>(brightness * 255.0f + 0.5f);
g = static_cast<unsigned char>(t * 255.0f + 0.5f);
b = static_cast<unsigned char>(p * 255.0f + 0.5f);
break;
case 1:
r = static_cast<unsigned char>(q * 255.0f + 0.5f);
g = static_cast<unsigned char>(brightness * 255.0f + 0.5f);
b = static_cast<unsigned char>(p * 255.0f + 0.5f);
break;
case 2:
r = static_cast<unsigned char>(p * 255.0f + 0.5f);
g = static_cast<unsigned char>(brightness * 255.0f + 0.5f);
b = static_cast<unsigned char>(t * 255.0f + 0.5f);
break;
case 3:
r = static_cast<unsigned char>(p * 255.0f + 0.5f);
g = static_cast<unsigned char>(q * 255.0f + 0.5f);
b = static_cast<unsigned char>(brightness * 255.0f + 0.5f);
break;
case 4:
r = static_cast<unsigned char>(t * 255.0f + 0.5f);
g = static_cast<unsigned char>(p * 255.0f + 0.5f);
b = static_cast<unsigned char>(brightness * 255.0f + 0.5f);
break;
case 5:
r = static_cast<unsigned char>(brightness * 255.0f + 0.5f);
g = static_cast<unsigned char>(p * 255.0f + 0.5f);
b = static_cast<unsigned char>(q * 255.0f + 0.5f);
break;
}
pixel[0] = r;
pixel[1] = g;
pixel[2] = b;
}
void readInputFile(const char * const input, MandelbrotInfo & minfo)
{
FILE * fp = fopen(input, "r");
char line[2048];
if (!fp)
{
fprintf(stderr, "Error, couldn't open file '%s' for reading.\n", input);
fflush(stderr);
exit(1);
}
while (fgets(line, 2047, fp))
{
char * ptr = line;
while (*ptr && *ptr <= ' ') ++ptr;
if (*ptr == '#') continue;
char * end = ptr + strlen(ptr) - 1;
while (end >= ptr && *end <= ' ') --end;
*(end + 1) = 0;
char var[1024];
int ival;
float fval;
sscanf(ptr, "%s = %d", var, &ival);
sscanf(ptr, "%s = %f", var, &fval);
if (strcmp(var, "width") == 0) minfo.width = ival;
else if (strcmp(var, "height") == 0) minfo.height = ival;
else if (strcmp(var, "maxRows") == 0) minfo.maxRows = ival;
else if (strcmp(var, "maxIters") == 0) minfo.maxIters = ival;
else if (strcmp(var, "xmin") == 0) minfo.xMin = fval;
else if (strcmp(var, "xmax") == 0) minfo.xMax = fval;
else if (strcmp(var, "ymin") == 0) minfo.yMin = fval;
else if (strcmp(var, "ymax") == 0) minfo.yMax = fval;
else
{
fprintf(stderr, "Warning, skipping invalid variable in input file (%s).\n", var);
fflush(stderr);
}
}
fclose(fp);
}
void storeRows(FILE * outfp, const int startOfImage, const int source, const int size,
const int startRow, const int endRow, const MandelbrotInfo & minfo,
const int * const pixels, unsigned char * const rgb)
{
const float hue = static_cast<float>(source) / static_cast<float>(size);
unsigned char * pixel = rgb;
for (int row = startRow; row < endRow; ++row)
{
const int * rowp = pixels + (row - startRow) * minfo.width;
for (int i = 0; i < minfo.width; ++i)
{
float t = 0.0f;
if (rowp[i] == 0)
{
t = 0.0f;
}
else if (rowp[i] < 16)
{
t = 0.75f * (static_cast<float>(rowp[i]) - 1.0f) / 14.0f;
}
else
{
t = 0.75f + 0.25f * (static_cast<float>(rowp[i]) - 16.0f) / static_cast<float>(minfo.maxIters - 16);
}
convertHSBtoRGB(hue, t, pixel);
pixel += 3;
}
}
fseek(outfp, startOfImage + sizeof(unsigned char) * minfo.width * startRow * 3, SEEK_SET);
fwrite(rgb, sizeof(unsigned char) * minfo.width * (endRow - startRow) * 3, 1, outfp);
}
__device__ void scanRow(const MandelbrotInfo minfo, const int row, int * pixels)
{
const float dx = minfo.xMax - minfo.xMin;
const float dy = minfo.yMax - minfo.yMin;
const float yVal = static_cast<float>(row) / static_cast<float>(minfo.height - 1);
for (int p = threadIdx.x; p < minfo.width; p += blockDim.x)
{
int iter = 0;
float z, zi, mag;
const float xVal = static_cast<float>(p) / static_cast<float>(minfo.width - 1);
z = zi = mag = 0.0f;
const float x = minfo.xMin + dx * xVal;
const float y = minfo.yMin + dy * yVal;
for (iter = 0; mag < 4.0f && iter <= minfo.maxIters; ++iter)
{
const float t = z * z - zi * zi + x;
zi = 2.0f * z * zi + y;
z = t;
mag = z * z + zi * zi;
}
pixels[p] = --iter;
}
}
__device__ void __syncblocks(volatile int * syncblocksArr)
{
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0)
{
syncblocksArr[blockIdx.x] = 1;
if (blockIdx.x == 0)
{
for (int i = 1; i < gridDim.x; ++i)
{
while (syncblocksArr[i] == 0) { }
}
for (int i = 0; i < gridDim.x; ++i)
{
syncblocksArr[i] = 0;
}
}
while (syncblocksArr[blockIdx.x] == 1) { }
}
__syncthreads();
}
__global__ void doGPUSlave(int * pixels, MandelbrotInfo * pMinfo, CommPacket * packet, int * sbarr, const dcgn::GPUInitRequest libParam)
{
dcgn::gpu::init(libParam);
if (blockIdx.x == 0 && threadIdx.x == 0)
{
dcgn::gpu::broadcast(0, 0, pMinfo, sizeof(MandelbrotInfo));
}
__syncblocks(sbarr);
MandelbrotInfo minfo = *pMinfo;
packet->startRow = packet->endRow = -1;
if (blockIdx.x == 0 && threadIdx.x == 0) dcgn::gpu::barrier(0);
do
{
dcgn::CommStatus stat;
if (blockIdx.x == 0 && threadIdx.x == 0)
{
dcgn::gpu::send(0, 0, packet, sizeof(packet));
if (packet->startRow < packet->endRow)
{
dcgn::gpu::send(0, 0, pixels, sizeof(int) * minfo.width * (packet->endRow - packet->startRow));
}
dcgn::gpu::recv(0, 0, packet, sizeof(packet), &stat);
}
__syncblocks(sbarr); // wait for packet to arrive.
const int startRow = packet->startRow, endRow = packet->endRow;
int row = startRow + blockIdx.x;
while (row < endRow)
{
scanRow(minfo, row, pixels + minfo.width * (row - startRow));
row += gridDim.x;
}
__syncblocks(sbarr); // wait for work to finish, cause when we loop, we send info back.
}
while (packet->startRow < packet->endRow);
if (blockIdx.x == 0 && threadIdx.x == 0) dcgn::gpu::barrier(0);
}
__host__ void doSlave(void * dbgInfo, const dcgn::GPUInitRequest libParam, const uint3 & gridSize, const uint3 & blockSize, const int sharedMemSize, hipStream_t * const stream)
{
int * pixels, * sbarr;
MandelbrotInfo * minfo;
CommPacket * packet;
hipMalloc(reinterpret_cast<void ** >(&sbarr), sizeof(int) * gridSize.x); CHECK_ERROR();
hipMalloc(reinterpret_cast<void ** >(&packet), sizeof(CommPacket)); CHECK_ERROR();
hipMalloc(reinterpret_cast<void ** >(&pixels), sizeof(int) * 1048576 * 100); CHECK_ERROR();
hipMalloc(reinterpret_cast<void ** >(&minfo), sizeof(MandelbrotInfo)); CHECK_ERROR();
hipMemset(sbarr, 0, sizeof(int) * gridSize.x); CHECK_ERROR();
hipMemset(minfo, 0, sizeof(MandelbrotInfo)); CHECK_ERROR();
hipLaunchKernelGGL(( doGPUSlave), dim3(gridSize), dim3(blockSize), sharedMemSize, *stream, pixels, minfo, packet, sbarr, libParam); CHECK_ERROR();
}
__host__ void gpuDtor(void * dbgInfo)
{
}
__host__ void doMaster(void * arg)
{
CommPacket packet;
int size, startOfImage, nextRow = 0;
MandelbrotInfo minfo;
char ** argv = reinterpret_cast<char ** >(arg);
readInputFile(argv[1], minfo);
FILE * outfp = fopen(argv[2], "wb");
if (!outfp)
{
fprintf(stderr, "Error, couldn't open %s for writing.\n", argv[2]);
fflush(stderr);
dcgn::abort(dcgn::DCGN_ERROR_ABORTED);
}
dcgn::broadcast(0, &minfo, sizeof(minfo));
size = dcgn::getSize();
unsigned char * rgb = new unsigned char[3 * minfo.width * minfo.maxRows];
int * pixels = new int[minfo.width * minfo.maxRows];
int numKilled = 1;
fprintf(outfp, "P6\n%d %d\n255\n%n", minfo.width, minfo.height, &startOfImage);
dcgn::barrier();
double timer = dcgn::wallTime();
int lastRow = -10000;
while (nextRow < minfo.height)
{
if (nextRow - lastRow >= 1000)
{
printf("%10d / %10d\r", nextRow, minfo.height); fflush(stdout);
lastRow = nextRow;
}
dcgn::CommStatus stat;
dcgn::recv(dcgn::ANY_SOURCE, &packet, sizeof(packet), &stat);
if (packet.endRow > packet.startRow)
{
dcgn::recv(stat.src, pixels, sizeof(int) * minfo.width * (packet.endRow - packet.startRow), &stat);
storeRows(outfp, startOfImage, stat.src - 1, size - 1, packet.startRow, packet.endRow, minfo, pixels, rgb);
}
packet.startRow = nextRow;
packet.endRow = ::min(packet.startRow + minfo.maxRows, minfo.height);
nextRow = packet.endRow;
dcgn::send(stat.src, &packet, sizeof(packet));
}
printf(" \r");
while (numKilled < size)
{
printf("%3d / %3d\r", numKilled, size); fflush(stdout);
dcgn::CommStatus stat;
dcgn::recv(dcgn::ANY_SOURCE, &packet, sizeof(packet), &stat);
if (packet.endRow > packet.startRow)
{
dcgn::recv(stat.src, pixels, sizeof(int) * minfo.width * (packet.endRow - packet.startRow), &stat);
storeRows(outfp, startOfImage, stat.src - 1, size - 1, packet.startRow, packet.endRow, minfo, pixels, rgb);
}
packet.startRow = packet.endRow = -1;
++numKilled;
dcgn::send(stat.src, &packet, sizeof(packet));
}
printf(" \r"); fflush(stdout);
dcgn::barrier();
timer = dcgn::wallTime() - timer;
fprintf(stderr, "done, took %f seconds.\n", timer);
fclose(outfp);
delete [] rgb;
delete [] pixels;
}
int main(int argc, char ** argv)
{
int gpus[] = { 0, 1, -1 };
uint3 gs = { 12, 1, 1 }, bs = { 160, 1, 1 };
dcgn::init(&argc, &argv);
dcgn::initComm(-1);
dcgn::initGPU(gpus, 1, 0);
dcgn::initCPU(dcgn::getNodeID() == 0 ? 1 : 0);
dcgn::start();
if (argc != 3)
{
if (dcgn::getNodeID() == 0)
{
fprintf(stderr, "Usage: %s <input_file> <output_file>\n", argv[0]);
fflush(stderr);
}
dcgn::finalize();
return 1;
}
void * gpuMem1, * gpuMem2;
if (dcgn::getNodeID() == 0) dcgn::launchCPUKernel(0, doMaster, argv);
dcgn::launchGPUKernel(0, doSlave, gpuDtor, &gpuMem1, gs, bs);
dcgn::launchGPUKernel(1, doSlave, gpuDtor, &gpuMem2, gs, bs);
dcgn::finalize();
return 0;
}
| 13d808b7aa714122310cba8edc041237f732f72c.cu | #include <dcgn/dcgn.h>
#include <dcgn/CUDAFunctions.h>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <vector>
#include <algorithm>
#include <cerrno>
#include <sched.h>
#define CHECK_ERROR() \
{ \
cudaError_t err = cudaGetLastError(); \
if (err != cudaSuccess) \
{ \
fprintf(stderr, "%s.%s.%d: %s.\n", __FILE__, __FUNCTION__, __LINE__, cudaGetErrorString(err)); \
fflush(stderr); \
exit(1); \
} \
} \
typedef struct _MandelbrotInfo
{
int width, height, maxRows, maxIters;
float xMin, xMax, yMin, yMax;
} MandelbrotInfo;
typedef struct _CommPacket
{
int startRow, endRow;
} CommPacket;
void convertHSBtoRGB(const float & hue, const float & brightness, unsigned char * pixel)
{
unsigned char r, g, b;
const float saturation = 1.0f;
float h = (hue - floor(hue)) * 6.0f;
float f = h - floor(h);
float p = brightness * (1.0f - saturation);
float q = brightness * (1.0f - saturation * f);
float t = brightness * (1.0f - (saturation * (1.0f - f)));
switch (static_cast<int>(h))
{
case 0:
r = static_cast<unsigned char>(brightness * 255.0f + 0.5f);
g = static_cast<unsigned char>(t * 255.0f + 0.5f);
b = static_cast<unsigned char>(p * 255.0f + 0.5f);
break;
case 1:
r = static_cast<unsigned char>(q * 255.0f + 0.5f);
g = static_cast<unsigned char>(brightness * 255.0f + 0.5f);
b = static_cast<unsigned char>(p * 255.0f + 0.5f);
break;
case 2:
r = static_cast<unsigned char>(p * 255.0f + 0.5f);
g = static_cast<unsigned char>(brightness * 255.0f + 0.5f);
b = static_cast<unsigned char>(t * 255.0f + 0.5f);
break;
case 3:
r = static_cast<unsigned char>(p * 255.0f + 0.5f);
g = static_cast<unsigned char>(q * 255.0f + 0.5f);
b = static_cast<unsigned char>(brightness * 255.0f + 0.5f);
break;
case 4:
r = static_cast<unsigned char>(t * 255.0f + 0.5f);
g = static_cast<unsigned char>(p * 255.0f + 0.5f);
b = static_cast<unsigned char>(brightness * 255.0f + 0.5f);
break;
case 5:
r = static_cast<unsigned char>(brightness * 255.0f + 0.5f);
g = static_cast<unsigned char>(p * 255.0f + 0.5f);
b = static_cast<unsigned char>(q * 255.0f + 0.5f);
break;
}
pixel[0] = r;
pixel[1] = g;
pixel[2] = b;
}
void readInputFile(const char * const input, MandelbrotInfo & minfo)
{
FILE * fp = fopen(input, "r");
char line[2048];
if (!fp)
{
fprintf(stderr, "Error, couldn't open file '%s' for reading.\n", input);
fflush(stderr);
exit(1);
}
while (fgets(line, 2047, fp))
{
char * ptr = line;
while (*ptr && *ptr <= ' ') ++ptr;
if (*ptr == '#') continue;
char * end = ptr + strlen(ptr) - 1;
while (end >= ptr && *end <= ' ') --end;
*(end + 1) = 0;
char var[1024];
int ival;
float fval;
sscanf(ptr, "%s = %d", var, &ival);
sscanf(ptr, "%s = %f", var, &fval);
if (strcmp(var, "width") == 0) minfo.width = ival;
else if (strcmp(var, "height") == 0) minfo.height = ival;
else if (strcmp(var, "maxRows") == 0) minfo.maxRows = ival;
else if (strcmp(var, "maxIters") == 0) minfo.maxIters = ival;
else if (strcmp(var, "xmin") == 0) minfo.xMin = fval;
else if (strcmp(var, "xmax") == 0) minfo.xMax = fval;
else if (strcmp(var, "ymin") == 0) minfo.yMin = fval;
else if (strcmp(var, "ymax") == 0) minfo.yMax = fval;
else
{
fprintf(stderr, "Warning, skipping invalid variable in input file (%s).\n", var);
fflush(stderr);
}
}
fclose(fp);
}
void storeRows(FILE * outfp, const int startOfImage, const int source, const int size,
const int startRow, const int endRow, const MandelbrotInfo & minfo,
const int * const pixels, unsigned char * const rgb)
{
const float hue = static_cast<float>(source) / static_cast<float>(size);
unsigned char * pixel = rgb;
for (int row = startRow; row < endRow; ++row)
{
const int * rowp = pixels + (row - startRow) * minfo.width;
for (int i = 0; i < minfo.width; ++i)
{
float t = 0.0f;
if (rowp[i] == 0)
{
t = 0.0f;
}
else if (rowp[i] < 16)
{
t = 0.75f * (static_cast<float>(rowp[i]) - 1.0f) / 14.0f;
}
else
{
t = 0.75f + 0.25f * (static_cast<float>(rowp[i]) - 16.0f) / static_cast<float>(minfo.maxIters - 16);
}
convertHSBtoRGB(hue, t, pixel);
pixel += 3;
}
}
fseek(outfp, startOfImage + sizeof(unsigned char) * minfo.width * startRow * 3, SEEK_SET);
fwrite(rgb, sizeof(unsigned char) * minfo.width * (endRow - startRow) * 3, 1, outfp);
}
__device__ void scanRow(const MandelbrotInfo minfo, const int row, int * pixels)
{
const float dx = minfo.xMax - minfo.xMin;
const float dy = minfo.yMax - minfo.yMin;
const float yVal = static_cast<float>(row) / static_cast<float>(minfo.height - 1);
for (int p = threadIdx.x; p < minfo.width; p += blockDim.x)
{
int iter = 0;
float z, zi, mag;
const float xVal = static_cast<float>(p) / static_cast<float>(minfo.width - 1);
z = zi = mag = 0.0f;
const float x = minfo.xMin + dx * xVal;
const float y = minfo.yMin + dy * yVal;
for (iter = 0; mag < 4.0f && iter <= minfo.maxIters; ++iter)
{
const float t = z * z - zi * zi + x;
zi = 2.0f * z * zi + y;
z = t;
mag = z * z + zi * zi;
}
pixels[p] = --iter;
}
}
__device__ void __syncblocks(volatile int * syncblocksArr)
{
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0)
{
syncblocksArr[blockIdx.x] = 1;
if (blockIdx.x == 0)
{
for (int i = 1; i < gridDim.x; ++i)
{
while (syncblocksArr[i] == 0) { }
}
for (int i = 0; i < gridDim.x; ++i)
{
syncblocksArr[i] = 0;
}
}
while (syncblocksArr[blockIdx.x] == 1) { }
}
__syncthreads();
}
__global__ void doGPUSlave(int * pixels, MandelbrotInfo * pMinfo, CommPacket * packet, int * sbarr, const dcgn::GPUInitRequest libParam)
{
dcgn::gpu::init(libParam);
if (blockIdx.x == 0 && threadIdx.x == 0)
{
dcgn::gpu::broadcast(0, 0, pMinfo, sizeof(MandelbrotInfo));
}
__syncblocks(sbarr);
MandelbrotInfo minfo = *pMinfo;
packet->startRow = packet->endRow = -1;
if (blockIdx.x == 0 && threadIdx.x == 0) dcgn::gpu::barrier(0);
do
{
dcgn::CommStatus stat;
if (blockIdx.x == 0 && threadIdx.x == 0)
{
dcgn::gpu::send(0, 0, packet, sizeof(packet));
if (packet->startRow < packet->endRow)
{
dcgn::gpu::send(0, 0, pixels, sizeof(int) * minfo.width * (packet->endRow - packet->startRow));
}
dcgn::gpu::recv(0, 0, packet, sizeof(packet), &stat);
}
__syncblocks(sbarr); // wait for packet to arrive.
const int startRow = packet->startRow, endRow = packet->endRow;
int row = startRow + blockIdx.x;
while (row < endRow)
{
scanRow(minfo, row, pixels + minfo.width * (row - startRow));
row += gridDim.x;
}
__syncblocks(sbarr); // wait for work to finish, cause when we loop, we send info back.
}
while (packet->startRow < packet->endRow);
if (blockIdx.x == 0 && threadIdx.x == 0) dcgn::gpu::barrier(0);
}
__host__ void doSlave(void * dbgInfo, const dcgn::GPUInitRequest libParam, const uint3 & gridSize, const uint3 & blockSize, const int sharedMemSize, cudaStream_t * const stream)
{
int * pixels, * sbarr;
MandelbrotInfo * minfo;
CommPacket * packet;
cudaMalloc(reinterpret_cast<void ** >(&sbarr), sizeof(int) * gridSize.x); CHECK_ERROR();
cudaMalloc(reinterpret_cast<void ** >(&packet), sizeof(CommPacket)); CHECK_ERROR();
cudaMalloc(reinterpret_cast<void ** >(&pixels), sizeof(int) * 1048576 * 100); CHECK_ERROR();
cudaMalloc(reinterpret_cast<void ** >(&minfo), sizeof(MandelbrotInfo)); CHECK_ERROR();
cudaMemset(sbarr, 0, sizeof(int) * gridSize.x); CHECK_ERROR();
cudaMemset(minfo, 0, sizeof(MandelbrotInfo)); CHECK_ERROR();
doGPUSlave<<<gridSize, blockSize, sharedMemSize, *stream>>>(pixels, minfo, packet, sbarr, libParam); CHECK_ERROR();
}
__host__ void gpuDtor(void * dbgInfo)
{
}
__host__ void doMaster(void * arg)
{
CommPacket packet;
int size, startOfImage, nextRow = 0;
MandelbrotInfo minfo;
char ** argv = reinterpret_cast<char ** >(arg);
readInputFile(argv[1], minfo);
FILE * outfp = fopen(argv[2], "wb");
if (!outfp)
{
fprintf(stderr, "Error, couldn't open %s for writing.\n", argv[2]);
fflush(stderr);
dcgn::abort(dcgn::DCGN_ERROR_ABORTED);
}
dcgn::broadcast(0, &minfo, sizeof(minfo));
size = dcgn::getSize();
unsigned char * rgb = new unsigned char[3 * minfo.width * minfo.maxRows];
int * pixels = new int[minfo.width * minfo.maxRows];
int numKilled = 1;
fprintf(outfp, "P6\n%d %d\n255\n%n", minfo.width, minfo.height, &startOfImage);
dcgn::barrier();
double timer = dcgn::wallTime();
int lastRow = -10000;
while (nextRow < minfo.height)
{
if (nextRow - lastRow >= 1000)
{
printf("%10d / %10d\r", nextRow, minfo.height); fflush(stdout);
lastRow = nextRow;
}
dcgn::CommStatus stat;
dcgn::recv(dcgn::ANY_SOURCE, &packet, sizeof(packet), &stat);
if (packet.endRow > packet.startRow)
{
dcgn::recv(stat.src, pixels, sizeof(int) * minfo.width * (packet.endRow - packet.startRow), &stat);
storeRows(outfp, startOfImage, stat.src - 1, size - 1, packet.startRow, packet.endRow, minfo, pixels, rgb);
}
packet.startRow = nextRow;
packet.endRow = std::min(packet.startRow + minfo.maxRows, minfo.height);
nextRow = packet.endRow;
dcgn::send(stat.src, &packet, sizeof(packet));
}
printf(" \r");
while (numKilled < size)
{
printf("%3d / %3d\r", numKilled, size); fflush(stdout);
dcgn::CommStatus stat;
dcgn::recv(dcgn::ANY_SOURCE, &packet, sizeof(packet), &stat);
if (packet.endRow > packet.startRow)
{
dcgn::recv(stat.src, pixels, sizeof(int) * minfo.width * (packet.endRow - packet.startRow), &stat);
storeRows(outfp, startOfImage, stat.src - 1, size - 1, packet.startRow, packet.endRow, minfo, pixels, rgb);
}
packet.startRow = packet.endRow = -1;
++numKilled;
dcgn::send(stat.src, &packet, sizeof(packet));
}
printf(" \r"); fflush(stdout);
dcgn::barrier();
timer = dcgn::wallTime() - timer;
fprintf(stderr, "done, took %f seconds.\n", timer);
fclose(outfp);
delete [] rgb;
delete [] pixels;
}
int main(int argc, char ** argv)
{
int gpus[] = { 0, 1, -1 };
uint3 gs = { 12, 1, 1 }, bs = { 160, 1, 1 };
dcgn::init(&argc, &argv);
dcgn::initComm(-1);
dcgn::initGPU(gpus, 1, 0);
dcgn::initCPU(dcgn::getNodeID() == 0 ? 1 : 0);
dcgn::start();
if (argc != 3)
{
if (dcgn::getNodeID() == 0)
{
fprintf(stderr, "Usage: %s <input_file> <output_file>\n", argv[0]);
fflush(stderr);
}
dcgn::finalize();
return 1;
}
void * gpuMem1, * gpuMem2;
if (dcgn::getNodeID() == 0) dcgn::launchCPUKernel(0, doMaster, argv);
dcgn::launchGPUKernel(0, doSlave, gpuDtor, &gpuMem1, gs, bs);
dcgn::launchGPUKernel(1, doSlave, gpuDtor, &gpuMem2, gs, bs);
dcgn::finalize();
return 0;
}
|
a286b3169530ef9373976619ffb4b12a49a3485d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2015 by Contributors
* \file roi_pooling.cu
* \brief roi pooling operator
* \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
*/
#include "./roi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
namespace mshadow {
namespace cuda {
template<typename Dtype>
__global__ void ROIPoolForwardKernel(const int count, const Dtype* bottom_data,
const float spatial_scale, const float pad_ratio,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data,
Dtype* argmax_data) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
if (roi_batch_ind < 0) {
top_data[index] = 0;
argmax_data[index] = 0;
continue;
}
Dtype pad_w = (bottom_rois[3] - bottom_rois[1] + 1) * pad_ratio;
Dtype pad_h = (bottom_rois[4] - bottom_rois[2] + 1) * pad_ratio;
int roi_start_w = round((bottom_rois[1] - pad_w) * spatial_scale);
int roi_start_h = round((bottom_rois[2] - pad_h) * spatial_scale);
int roi_end_w = round((bottom_rois[3] + pad_w) * spatial_scale);
int roi_end_h = round((bottom_rois[4] + pad_h) * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = (Dtype)maxidx;
}
}
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
const Dtype *bottom_data = data.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *top_data = out.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridNum, (gridSize + kMaxGridNum - 1) / kMaxGridNum);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward");
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
hipLaunchKernelGGL(( ROIPoolForwardKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream,
count, bottom_data, spatial_scale, pad_ratio, channels, height, width,
pooled_height, pooled_width, bottom_rois, top_data, argmax_data);
}
template<typename Dtype>
__global__ void ROIPoolBackwardAccKernel(const int count, const Dtype* top_diff,
const Dtype* argmax_data, const int num_rois,
const float spatial_scale, const float pad_ratio,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
Dtype* bottom_diff, const Dtype* bottom_rois) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
Dtype pad_w = (bottom_rois[3] - bottom_rois[1] + 1) * pad_ratio;
Dtype pad_h = (bottom_rois[4] - bottom_rois[2] + 1) * pad_ratio;
int roi_start_w = round((offset_bottom_rois[1] - pad_w) * spatial_scale);
int roi_start_h = round((offset_bottom_rois[2] - pad_h) * spatial_scale);
int roi_end_w = round((offset_bottom_rois[3] + pad_w) * spatial_scale);
int roi_end_h = round((offset_bottom_rois[4] + pad_h) * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const Dtype* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (static_cast<int>(offset_argmax_data[ph * pooled_width + pw]) == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] += gradient;
}
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
const Dtype *top_diff = out_grad.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *bottom_diff = in_grad.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = in_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridNum, (gridSize + kMaxGridNum - 1) / kMaxGridNum);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward");
hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
hipLaunchKernelGGL(( ROIPoolBackwardAccKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream,
count, top_diff, argmax_data, num_rois, spatial_scale, pad_ratio, channels, height, width,
pooled_height, pooled_width, bottom_diff, bottom_rois);
}
} // namespace cuda
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
cuda::ROIPoolForward(out, data, bbox, max_idx, spatial_scale, pad_ratio);
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
cuda::ROIPoolBackwardAcc(in_grad, out_grad, bbox, max_idx, spatial_scale, pad_ratio);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(ROIPoolingParam param, int dtype) {
Operator* op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
| a286b3169530ef9373976619ffb4b12a49a3485d.cu | /*!
* Copyright (c) 2015 by Contributors
* \file roi_pooling.cu
* \brief roi pooling operator
* \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
*/
#include "./roi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
namespace mshadow {
namespace cuda {
template<typename Dtype>
__global__ void ROIPoolForwardKernel(const int count, const Dtype* bottom_data,
const float spatial_scale, const float pad_ratio,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data,
Dtype* argmax_data) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
if (roi_batch_ind < 0) {
top_data[index] = 0;
argmax_data[index] = 0;
continue;
}
Dtype pad_w = (bottom_rois[3] - bottom_rois[1] + 1) * pad_ratio;
Dtype pad_h = (bottom_rois[4] - bottom_rois[2] + 1) * pad_ratio;
int roi_start_w = round((bottom_rois[1] - pad_w) * spatial_scale);
int roi_start_h = round((bottom_rois[2] - pad_h) * spatial_scale);
int roi_end_w = round((bottom_rois[3] + pad_w) * spatial_scale);
int roi_end_h = round((bottom_rois[4] + pad_h) * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = (Dtype)maxidx;
}
}
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
const Dtype *bottom_data = data.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *top_data = out.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridNum, (gridSize + kMaxGridNum - 1) / kMaxGridNum);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward");
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
ROIPoolForwardKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>(
count, bottom_data, spatial_scale, pad_ratio, channels, height, width,
pooled_height, pooled_width, bottom_rois, top_data, argmax_data);
}
template<typename Dtype>
__global__ void ROIPoolBackwardAccKernel(const int count, const Dtype* top_diff,
const Dtype* argmax_data, const int num_rois,
const float spatial_scale, const float pad_ratio,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
Dtype* bottom_diff, const Dtype* bottom_rois) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
Dtype pad_w = (bottom_rois[3] - bottom_rois[1] + 1) * pad_ratio;
Dtype pad_h = (bottom_rois[4] - bottom_rois[2] + 1) * pad_ratio;
int roi_start_w = round((offset_bottom_rois[1] - pad_w) * spatial_scale);
int roi_start_h = round((offset_bottom_rois[2] - pad_h) * spatial_scale);
int roi_end_w = round((offset_bottom_rois[3] + pad_w) * spatial_scale);
int roi_end_h = round((offset_bottom_rois[4] + pad_h) * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const Dtype* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (static_cast<int>(offset_argmax_data[ph * pooled_width + pw]) == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] += gradient;
}
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
const Dtype *top_diff = out_grad.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *bottom_diff = in_grad.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = in_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridNum, (gridSize + kMaxGridNum - 1) / kMaxGridNum);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward");
cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
ROIPoolBackwardAccKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>(
count, top_diff, argmax_data, num_rois, spatial_scale, pad_ratio, channels, height, width,
pooled_height, pooled_width, bottom_diff, bottom_rois);
}
} // namespace cuda
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
cuda::ROIPoolForward(out, data, bbox, max_idx, spatial_scale, pad_ratio);
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
cuda::ROIPoolBackwardAcc(in_grad, out_grad, bbox, max_idx, spatial_scale, pad_ratio);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(ROIPoolingParam param, int dtype) {
Operator* op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
|
ee83ef41d9bfa3a89c3e92d5dd98389b5e3e7dab.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _RKDUMBPRETTY_
#define _RKDUMBPRETTY_
#include <hip/hip_runtime.h>
#include "globalVars.h"
#include "devFunctionProtos.h"
#include "derivs.cu"
#include "rk4.cu"
__global__ void rkdumbPretty(kernelParams_t params, devPtr_t devPtrs) {
double x1, *dev_spkTimes, *y, *synapticCurrent, *dev_time;
int nstep, *totNSpks, *dev_spkNeuronIds;
hiprandState_t *dev_state;
int k;
double x, isynapNew = 0, ibg = 0, iff = 0;
double v[N_STATEVARS], vout[N_STATEVARS], dv[N_STATEVARS], vmOld;
int localTotNspks = 0, localLastNSteps;
unsigned int mNeuron = threadIdx.x + blockDim.x * blockIdx.x;
x1 = params.tStart;
nstep = params.nSteps;
totNSpks = devPtrs.dev_nSpks;
y = devPtrs.dev_vm;
dev_time = devPtrs.dev_time;
synapticCurrent = devPtrs.synapticCurrent;
dev_state = devPtrs.devStates;
dev_spkTimes = devPtrs.dev_spkTimes;
dev_spkNeuronIds = devPtrs.dev_spkNeuronIds;
/* dev_nPostNeurons = devPtrs.dev_nPostNeurons;
dev_sparseConVec = devPtrs.dev_sparseConVec;
dev_sparseIdx = devPtrs.dev_sparseIdx;*/
k = devPtrs.k;
if(mNeuron < N_NEURONS) {
if(k == 0) {
dev_v[mNeuron] = (-1 * 70) + (40 * randkernel(dev_state)); /* Vm(0) ~ U(-70, -30)*/
dev_n[mNeuron] = 0.3176;
dev_z[mNeuron] = 0.1;
dev_h[mNeuron] = 0.5961;
dev_isynap[mNeuron] = 0;
dev_gE[mNeuron] = 0.0;
dev_gI[mNeuron] = 0.0;
if(mNeuron < NE) {
gaussNoiseE[mNeuron] = 0.0;
}
else {
gaussNoiseI[mNeuron - NE] = 0.0;
}
gFF[mNeuron] = 0.0;
rTotal[mNeuron] = 0.0;
gffItgrl[mNeuron] = 0.0;
}
localLastNSteps = nstep - STORE_LAST_N_STEPS;
/* TIMELOOP */
x = x1 + (double)k * DT;
dev_IF_SPK[mNeuron] = 0;
vmOld = dev_v[mNeuron];
v[0] = vmOld;
v[1] = dev_n[mNeuron];
v[2] = dev_z[mNeuron];
v[3] = dev_h[mNeuron];
isynapNew = dev_isynap[mNeuron];
iff = dev_iffCurrent[mNeuron];
ibg = bgCur(vmOld);
/* runge kutta 4 */
derivs(x, v, dv, isynapNew, ibg, iff);
rk4(v, dv, N_STATEVARS, x, DT, vout, isynapNew, ibg, iff);
x += DT;
/* UPDATE */
dev_v[mNeuron] = vout[0];
dev_n[mNeuron] = vout[1];
dev_z[mNeuron] = vout[2];
dev_h[mNeuron] = vout[3];
if(k >= localLastNSteps) {
y[mNeuron + N_NEURONS * (k - localLastNSteps)] = vout[0];
synapticCurrent[mNeuron + N_NEURONS * (k - localLastNSteps)] = isynapNew;
if(mNeuron == 0) {
dev_time[k - localLastNSteps] = x;
}
}
if(k > 2) {
if(vout[0] > SPK_THRESH) {
if(vmOld <= SPK_THRESH) {
dev_IF_SPK[mNeuron] = 1;
localTotNspks = atomicAdd(totNSpks, 1); /* atomic add on global introduces memory latency*/
if(localTotNspks + 1 < MAX_SPKS) {
dev_spkNeuronIds[localTotNspks + 1] = mNeuron;
dev_spkTimes[localTotNspks + 1] = x;
}
}
}
}
}
}
#endif
| ee83ef41d9bfa3a89c3e92d5dd98389b5e3e7dab.cu | #ifndef _RKDUMBPRETTY_
#define _RKDUMBPRETTY_
#include <cuda.h>
#include "globalVars.h"
#include "devFunctionProtos.h"
#include "derivs.cu"
#include "rk4.cu"
__global__ void rkdumbPretty(kernelParams_t params, devPtr_t devPtrs) {
double x1, *dev_spkTimes, *y, *synapticCurrent, *dev_time;
int nstep, *totNSpks, *dev_spkNeuronIds;
curandState *dev_state;
int k;
double x, isynapNew = 0, ibg = 0, iff = 0;
double v[N_STATEVARS], vout[N_STATEVARS], dv[N_STATEVARS], vmOld;
int localTotNspks = 0, localLastNSteps;
unsigned int mNeuron = threadIdx.x + blockDim.x * blockIdx.x;
x1 = params.tStart;
nstep = params.nSteps;
totNSpks = devPtrs.dev_nSpks;
y = devPtrs.dev_vm;
dev_time = devPtrs.dev_time;
synapticCurrent = devPtrs.synapticCurrent;
dev_state = devPtrs.devStates;
dev_spkTimes = devPtrs.dev_spkTimes;
dev_spkNeuronIds = devPtrs.dev_spkNeuronIds;
/* dev_nPostNeurons = devPtrs.dev_nPostNeurons;
dev_sparseConVec = devPtrs.dev_sparseConVec;
dev_sparseIdx = devPtrs.dev_sparseIdx;*/
k = devPtrs.k;
if(mNeuron < N_NEURONS) {
if(k == 0) {
dev_v[mNeuron] = (-1 * 70) + (40 * randkernel(dev_state)); /* Vm(0) ~ U(-70, -30)*/
dev_n[mNeuron] = 0.3176;
dev_z[mNeuron] = 0.1;
dev_h[mNeuron] = 0.5961;
dev_isynap[mNeuron] = 0;
dev_gE[mNeuron] = 0.0;
dev_gI[mNeuron] = 0.0;
if(mNeuron < NE) {
gaussNoiseE[mNeuron] = 0.0;
}
else {
gaussNoiseI[mNeuron - NE] = 0.0;
}
gFF[mNeuron] = 0.0;
rTotal[mNeuron] = 0.0;
gffItgrl[mNeuron] = 0.0;
}
localLastNSteps = nstep - STORE_LAST_N_STEPS;
/* TIMELOOP */
x = x1 + (double)k * DT;
dev_IF_SPK[mNeuron] = 0;
vmOld = dev_v[mNeuron];
v[0] = vmOld;
v[1] = dev_n[mNeuron];
v[2] = dev_z[mNeuron];
v[3] = dev_h[mNeuron];
isynapNew = dev_isynap[mNeuron];
iff = dev_iffCurrent[mNeuron];
ibg = bgCur(vmOld);
/* runge kutta 4 */
derivs(x, v, dv, isynapNew, ibg, iff);
rk4(v, dv, N_STATEVARS, x, DT, vout, isynapNew, ibg, iff);
x += DT;
/* UPDATE */
dev_v[mNeuron] = vout[0];
dev_n[mNeuron] = vout[1];
dev_z[mNeuron] = vout[2];
dev_h[mNeuron] = vout[3];
if(k >= localLastNSteps) {
y[mNeuron + N_NEURONS * (k - localLastNSteps)] = vout[0];
synapticCurrent[mNeuron + N_NEURONS * (k - localLastNSteps)] = isynapNew;
if(mNeuron == 0) {
dev_time[k - localLastNSteps] = x;
}
}
if(k > 2) {
if(vout[0] > SPK_THRESH) {
if(vmOld <= SPK_THRESH) {
dev_IF_SPK[mNeuron] = 1;
localTotNspks = atomicAdd(totNSpks, 1); /* atomic add on global introduces memory latency*/
if(localTotNspks + 1 < MAX_SPKS) {
dev_spkNeuronIds[localTotNspks + 1] = mNeuron;
dev_spkTimes[localTotNspks + 1] = x;
}
}
}
}
}
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.