hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
092f56cae0d2df9fae75bd9d99396454eae945c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <omp.h>
#include <math.h>
#define BLOCK_SIZE 1024
// error checking wrapper (taken from stackexchange)
#define CUDA_Error_Check(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// threaded vec-dot
void vec_dot(double* c, const double* a, const double* b, long N)
{
double sum = 0;
#pragma omp parallel for reduction(+:sum)
for (long i = 0; i < N; i++) sum += a[i]*b[i];
*c = sum;
}
__global__ void vec_dot_kernel(double* c, const double* a, const double* b, long N)
{
__shared__ double smem[BLOCK_SIZE];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// each thread reads data from global into shared memory
if (idx < N) smem[threadIdx.x] = a[idx] * b[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
unsigned int s;
for (s=blockDim.x/2; s>32; s>>=1)
{
if (threadIdx.x < s)
{
smem[threadIdx.x] += smem[threadIdx.x + s];
}
__syncthreads();
}
// last 32 threads belong to same warp
if (threadIdx.x < 32)
{
volatile double* s_ptr = smem;
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 32];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 16];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 8];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 4];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 2];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 1];
}
// write to global memory
if (threadIdx.x == 0) c[blockIdx.x] = smem[threadIdx.x];
}
__global__ void reduction(double* sum, const double* a, long N)
{
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
// each thread reads data from global into shared memory
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
for (unsigned int s=blockDim.x/2; s>32; s>>=1)
{
if (threadIdx.x < s)
{
smem[threadIdx.x] += smem[threadIdx.x + s];
}
__syncthreads();
}
// last 32 threads belong to same warp
if (threadIdx.x < 32)
{
volatile double* s_ptr = smem;
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 32];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 16];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 8];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 4];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 2];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 1];
}
// write to global memory
if (threadIdx.x == 0) sum[blockIdx.x] = smem[threadIdx.x];
}
int main()
{
const long N = (1UL<<25);
// host copies and initialization
double *a, *b, c;
CUDA_Error_Check(hipHostMalloc(&a, N * sizeof(double)));
CUDA_Error_Check(hipHostMalloc(&b, N * sizeof(double)));
#pragma omp parallel for
for (long i = 0; i < N; ++i) a[i] = b[i] = drand48();
// get reference val and time
double c_ref; double tt = omp_get_wtime();
vec_dot(&c_ref, a, b, N);
printf("CPU Bandwidth = %f GB/s\n", N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
// device copies
double *a_d, *b_d, *c_d; long N_work = 1, Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE);
CUDA_Error_Check(hipMalloc(&a_d, N * sizeof(double)));
CUDA_Error_Check(hipMalloc(&b_d, N * sizeof(double)));
// extra memory buffer for reduction across thread-blocks
for (long i = Nb; i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i;
CUDA_Error_Check(hipMalloc(&c_d, N_work*sizeof(double)));
// copy host inputs to device
CUDA_Error_Check(hipMemcpyAsync(a_d, a, N * sizeof(double), hipMemcpyHostToDevice));
CUDA_Error_Check(hipMemcpyAsync(b_d, b, N * sizeof(double), hipMemcpyHostToDevice));
CUDA_Error_Check(hipDeviceSynchronize());
// call kernel recursively
tt = omp_get_wtime();
double* sum_d = c_d;
hipLaunchKernelGGL(( vec_dot_kernel), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, sum_d, a_d, b_d, N);
while (Nb > 1)
{
long N = Nb;
Nb = (Nb+BLOCK_SIZE-1)/(BLOCK_SIZE);
hipLaunchKernelGGL(( reduction), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, sum_d + N, sum_d, N);
sum_d += N;
}
// copy device result back to host
CUDA_Error_Check(hipMemcpyAsync(&c, sum_d, sizeof(double), hipMemcpyDeviceToHost));
CUDA_Error_Check(hipDeviceSynchronize());
printf("GPU Bandwidth = %f GB/s\n", N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
printf("Error = %lf\n", fabs(c-c_ref));
// free mem
CUDA_Error_Check(hipFree(a_d));
CUDA_Error_Check(hipFree(b_d));
CUDA_Error_Check(hipFree(c_d));
CUDA_Error_Check(hipHostFree(a));
CUDA_Error_Check(hipHostFree(b));
return 0;
}
| 092f56cae0d2df9fae75bd9d99396454eae945c8.cu | #include <stdio.h>
#include <omp.h>
#include <math.h>
#define BLOCK_SIZE 1024
// error checking wrapper (taken from stackexchange)
#define CUDA_Error_Check(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// threaded vec-dot
void vec_dot(double* c, const double* a, const double* b, long N)
{
double sum = 0;
#pragma omp parallel for reduction(+:sum)
for (long i = 0; i < N; i++) sum += a[i]*b[i];
*c = sum;
}
__global__ void vec_dot_kernel(double* c, const double* a, const double* b, long N)
{
__shared__ double smem[BLOCK_SIZE];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// each thread reads data from global into shared memory
if (idx < N) smem[threadIdx.x] = a[idx] * b[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
unsigned int s;
for (s=blockDim.x/2; s>32; s>>=1)
{
if (threadIdx.x < s)
{
smem[threadIdx.x] += smem[threadIdx.x + s];
}
__syncthreads();
}
// last 32 threads belong to same warp
if (threadIdx.x < 32)
{
volatile double* s_ptr = smem;
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 32];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 16];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 8];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 4];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 2];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 1];
}
// write to global memory
if (threadIdx.x == 0) c[blockIdx.x] = smem[threadIdx.x];
}
__global__ void reduction(double* sum, const double* a, long N)
{
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
// each thread reads data from global into shared memory
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
for (unsigned int s=blockDim.x/2; s>32; s>>=1)
{
if (threadIdx.x < s)
{
smem[threadIdx.x] += smem[threadIdx.x + s];
}
__syncthreads();
}
// last 32 threads belong to same warp
if (threadIdx.x < 32)
{
volatile double* s_ptr = smem;
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 32];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 16];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 8];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 4];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 2];
s_ptr[threadIdx.x] += s_ptr[threadIdx.x + 1];
}
// write to global memory
if (threadIdx.x == 0) sum[blockIdx.x] = smem[threadIdx.x];
}
int main()
{
const long N = (1UL<<25);
// host copies and initialization
double *a, *b, c;
CUDA_Error_Check(cudaMallocHost(&a, N * sizeof(double)));
CUDA_Error_Check(cudaMallocHost(&b, N * sizeof(double)));
#pragma omp parallel for
for (long i = 0; i < N; ++i) a[i] = b[i] = drand48();
// get reference val and time
double c_ref; double tt = omp_get_wtime();
vec_dot(&c_ref, a, b, N);
printf("CPU Bandwidth = %f GB/s\n", N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
// device copies
double *a_d, *b_d, *c_d; long N_work = 1, Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE);
CUDA_Error_Check(cudaMalloc(&a_d, N * sizeof(double)));
CUDA_Error_Check(cudaMalloc(&b_d, N * sizeof(double)));
// extra memory buffer for reduction across thread-blocks
for (long i = Nb; i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i;
CUDA_Error_Check(cudaMalloc(&c_d, N_work*sizeof(double)));
// copy host inputs to device
CUDA_Error_Check(cudaMemcpyAsync(a_d, a, N * sizeof(double), cudaMemcpyHostToDevice));
CUDA_Error_Check(cudaMemcpyAsync(b_d, b, N * sizeof(double), cudaMemcpyHostToDevice));
CUDA_Error_Check(cudaDeviceSynchronize());
// call kernel recursively
tt = omp_get_wtime();
double* sum_d = c_d;
vec_dot_kernel<<<Nb,BLOCK_SIZE>>>(sum_d, a_d, b_d, N);
while (Nb > 1)
{
long N = Nb;
Nb = (Nb+BLOCK_SIZE-1)/(BLOCK_SIZE);
reduction<<<Nb,BLOCK_SIZE>>>(sum_d + N, sum_d, N);
sum_d += N;
}
// copy device result back to host
CUDA_Error_Check(cudaMemcpyAsync(&c, sum_d, sizeof(double), cudaMemcpyDeviceToHost));
CUDA_Error_Check(cudaDeviceSynchronize());
printf("GPU Bandwidth = %f GB/s\n", N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
printf("Error = %lf\n", fabs(c-c_ref));
// free mem
CUDA_Error_Check(cudaFree(a_d));
CUDA_Error_Check(cudaFree(b_d));
CUDA_Error_Check(cudaFree(c_d));
CUDA_Error_Check(cudaFreeHost(a));
CUDA_Error_Check(cudaFreeHost(b));
return 0;
}
|
1231d8f9ba173533a3de74caa9d4380f9d9f89ae.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "addValue_i32.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *vector = NULL;
hipMalloc(&vector, XSIZE*YSIZE);
int value = 2;
int *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
addValue_i32), dim3(gridBlock),dim3(threadBlock), 0, 0, vector,value,output,len);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
addValue_i32), dim3(gridBlock),dim3(threadBlock), 0, 0, vector,value,output,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
addValue_i32), dim3(gridBlock),dim3(threadBlock), 0, 0, vector,value,output,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 1231d8f9ba173533a3de74caa9d4380f9d9f89ae.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "addValue_i32.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *vector = NULL;
cudaMalloc(&vector, XSIZE*YSIZE);
int value = 2;
int *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
addValue_i32<<<gridBlock,threadBlock>>>(vector,value,output,len);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
addValue_i32<<<gridBlock,threadBlock>>>(vector,value,output,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
addValue_i32<<<gridBlock,threadBlock>>>(vector,value,output,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
5d09e74c1a92d6f58536c974c02e3928d40d0c68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/common.h"
#include <stdio.h>
#define N 1025
#define M 12
__device__ int foo(int row, int col)
{
return (2 * row);
}
__global__ void kernel(int **arr)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i;
for ( ; tid < N; tid++)
{
for (i = 0; i < M; i++)
{
arr[tid][i] = foo(tid, i);
}
}
}
int main(int argc, char **argv)
{
int i;
int **h_matrix;
int **d_ptrs;
int **d_matrix;
h_matrix = (int **)malloc(N * sizeof(int *));
d_ptrs = (int **)malloc(N * sizeof(int *));
CHECK(hipMalloc((void **)&d_matrix, N * sizeof(int *)));
CHECK(hipMemset(d_matrix, 0x00, N * sizeof(int *)));
for (i = 0; i < N; i++)
{
h_matrix[i] = (int *)malloc(M * sizeof(int));
CHECK(hipMalloc((void **)&d_ptrs[i], M * sizeof(int)));
CHECK(hipMemset(d_ptrs[i], 0x00, M * sizeof(int)));
}
int threadsPerBlock = 256;
int blocksPerGrid = 1024;
hipLaunchKernelGGL(( kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_matrix);
for (i = 0; i < N; i++)
{
CHECK(hipMemcpy(h_matrix[i], d_ptrs[i], M * sizeof(int),
hipMemcpyDeviceToHost));
CHECK(hipFree(d_ptrs[i]));
free(h_matrix[i]);
}
CHECK(hipFree(d_matrix));
free(h_matrix);
return 0;
}
| 5d09e74c1a92d6f58536c974c02e3928d40d0c68.cu | #include "../common/common.h"
#include <stdio.h>
#define N 1025
#define M 12
__device__ int foo(int row, int col)
{
return (2 * row);
}
__global__ void kernel(int **arr)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i;
for ( ; tid < N; tid++)
{
for (i = 0; i < M; i++)
{
arr[tid][i] = foo(tid, i);
}
}
}
int main(int argc, char **argv)
{
int i;
int **h_matrix;
int **d_ptrs;
int **d_matrix;
h_matrix = (int **)malloc(N * sizeof(int *));
d_ptrs = (int **)malloc(N * sizeof(int *));
CHECK(cudaMalloc((void **)&d_matrix, N * sizeof(int *)));
CHECK(cudaMemset(d_matrix, 0x00, N * sizeof(int *)));
for (i = 0; i < N; i++)
{
h_matrix[i] = (int *)malloc(M * sizeof(int));
CHECK(cudaMalloc((void **)&d_ptrs[i], M * sizeof(int)));
CHECK(cudaMemset(d_ptrs[i], 0x00, M * sizeof(int)));
}
int threadsPerBlock = 256;
int blocksPerGrid = 1024;
kernel<<<blocksPerGrid, threadsPerBlock>>>(d_matrix);
for (i = 0; i < N; i++)
{
CHECK(cudaMemcpy(h_matrix[i], d_ptrs[i], M * sizeof(int),
cudaMemcpyDeviceToHost));
CHECK(cudaFree(d_ptrs[i]));
free(h_matrix[i]);
}
CHECK(cudaFree(d_matrix));
free(h_matrix);
return 0;
}
|
4213a1325dbe8b0ecfc36ded51dbb47429b64cee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Mark Gates
@author Azzam Haidar
@generated from magmablas/zlacpy.cu, normal z -> s, Thu Oct 8 23:05:34 2020
*/
#include "magma_internal.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for slaset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/******************************************************************************/
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to slaset, slacpy, slag2d, clag2z, sgeadd.
*/
static __device__
void slacpy_full_device(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/******************************************************************************/
/*
Similar to slacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slacpy_lower_device(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/******************************************************************************/
/*
Similar to slacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slacpy_upper_device(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
/******************************************************************************/
/*
kernel wrappers to call the device functions.
*/
__global__
void slacpy_full_kernel(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void slacpy_lower_kernel(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_lower_device(m, n, dA, ldda, dB, lddb);
}
__global__
void slacpy_upper_kernel(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_upper_device(m, n, dA, ldda, dB, lddb);
}
/******************************************************************************/
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void slacpy_full_kernel_batched(
int m, int n,
float const * const *dAarray, int ldda,
float **dBarray, int lddb )
{
int batchid = blockIdx.z;
slacpy_full_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void slacpy_lower_kernel_batched(
int m, int n,
float const * const *dAarray, int ldda,
float **dBarray, int lddb )
{
int batchid = blockIdx.z;
slacpy_lower_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void slacpy_upper_kernel_batched(
int m, int n,
float const * const *dAarray, int ldda,
float **dBarray, int lddb )
{
int batchid = blockIdx.z;
slacpy_upper_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
/******************************************************************************/
/*
kernel wrappers to call the device functions for the vbatched routine.
*/
__global__
void slacpy_full_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
float const * const *dAarray, magma_int_t* ldda,
float **dBarray, magma_int_t* lddb )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if(blockIdx.x >= magma_ceildiv(my_m, BLK_X)) return;
if(blockIdx.y >= magma_ceildiv(my_n, BLK_Y)) return;
slacpy_full_device(my_m, my_n, dAarray[batchid], (int)ldda[batchid], dBarray[batchid], (int)lddb[batchid]);
}
__global__
void slacpy_lower_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
float const * const *dAarray, magma_int_t* ldda,
float **dBarray, magma_int_t* lddb )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if(blockIdx.x >= magma_ceildiv(my_m, BLK_X)) return;
if(blockIdx.y >= magma_ceildiv(my_n, BLK_Y)) return;
slacpy_lower_device(my_m, my_n, dAarray[batchid], (int)ldda[batchid], dBarray[batchid], (int)lddb[batchid]);
}
__global__
void slacpy_upper_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
float const * const *dAarray, magma_int_t* ldda,
float **dBarray, magma_int_t* lddb )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if(blockIdx.x >= magma_ceildiv(my_m, BLK_X)) return;
if(blockIdx.y >= magma_ceildiv(my_n, BLK_Y)) return;
slacpy_upper_device(my_m, my_n, dAarray[batchid], (int)ldda[batchid], dBarray[batchid], (int)lddb[batchid]);
}
/***************************************************************************//**
Purpose
-------
SLACPY copies all or part of a two-dimensional matrix dA to another
matrix dB.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA REAL array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[out]
dB REAL array, dimension (LDDB,N)
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacpy
*******************************************************************************/
extern "C" void
magmablas_slacpy(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
#define dB(i_, j_) (dB + (i_) + (j_)*lddb)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if ( uplo == MagmaLower ) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( slacpy_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
hipLaunchKernelGGL(( slacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else if ( uplo == MagmaUpper ) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( slacpy_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
hipLaunchKernelGGL(( slacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else {
// TODO: use hipMemcpy or hipMemcpy2D ?
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
hipLaunchKernelGGL(( slacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
/***************************************************************************//**
Purpose
-------
SLACPY_BATCHED copies all or part of each two-dimensional matrix
dAarray[i] to matrix dBarray[i], for 0 <= i < batchcount.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dA
@param[in]
m INTEGER
The number of rows of each matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dA. N >= 0.
@param[in]
dAarray REAL* array, dimension (batchCount)
Array of pointers to the matrices dA, where each dA is of dimension (LDDA,N).
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of each array dA. LDDA >= max(1,M).
@param[out]
dBarray REAL* array, dimension (batchCount)
Array of pointers to the matrices dB, where each dB is of dimension (LDDB,N).
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of each array dB. LDDB >= max(1,M).
@param[in]
batchCount Number of matrices in dAarray and dBarray.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacpy_batched
*******************************************************************************/
extern "C" void
magmablas_slacpy_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr const dAarray[], magma_int_t ldda,
magmaFloat_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
magma_int_t max_batchCount = queue->get_maxBatch();
for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) {
magma_int_t ibatch = min(max_batchCount, batchCount-i);
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), ibatch );
if ( uplo == MagmaLower ) {
hipLaunchKernelGGL(( slacpy_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dAarray+i, ldda, dBarray+i, lddb );
}
else if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( slacpy_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dAarray+i, ldda, dBarray+i, lddb );
}
else {
hipLaunchKernelGGL(( slacpy_full_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dAarray+i, ldda, dBarray+i, lddb );
}
}
}
/***************************************************************************//**
Purpose
-------
SLACPY_VBATCHED copies all or part of each two-dimensional matrix
dAarray[i] to matrix dBarray[i], for 0 <= i < batchcount.
Matrices are assumed to generally have different sizes/leading dimensions
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dA
@param[in]
max_m INTEGER.
maximum value of m.
@param[in]
max_n INTEGER.
maximum value of n.
@param[in]
m INTEGER array, dimension (batchCount).
Each is the number of rows of each matrix dA. M >= 0.
@param[in]
n INTEGER array, dimension (batchCount).
The number of columns of each matrix dA. N >= 0.
@param[in]
dAarray Array of pointers , dimension (batchCount)
Each is a REAL array dA, where the ith matrix dA is of dimension (ldda[i],n[i]).
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER array, dimension (batchCount).
Each is the leading dimension of each array dA. For the ith matrix dA ldda[i] >= max(1,m[i]).
@param[out]
dBarray Array of pointers, dimension(batchCount).
Each is a REAL array dB, where the ith matrix dB is of dimension (lddb[i],n[i]).
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER array, dimension (batchCount).
Each is the leading dimension of each array dB. For the ith matrix dB lddb[i] >= max(1,m[i]).
@param[in]
batchCount Number of matrices in dAarray and dBarray.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacpy_batched
*******************************************************************************/
extern "C" void
magmablas_slacpy_vbatched(
magma_uplo_t uplo,
magma_int_t max_m, magma_int_t max_n,
magma_int_t* m, magma_int_t* n,
float const * const * dAarray, magma_int_t* ldda,
float** dBarray, magma_int_t* lddb,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
//else if ( m < 0 )
// info = -2;
//else if ( n < 0 )
// info = -3;
//else if ( ldda < max(1,m))
// info = -5;
//else if ( lddb < max(1,m))
// info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( max_m == 0 || max_n == 0 || batchCount == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
magma_int_t max_batchCount = queue->get_maxBatch();
for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) {
magma_int_t ibatch = min(max_batchCount, batchCount-i);
dim3 grid( magma_ceildiv( max_m, BLK_X ), magma_ceildiv( max_n, BLK_Y ), ibatch );
if ( uplo == MagmaLower ) {
hipLaunchKernelGGL(( slacpy_lower_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m+i, n+i, dAarray+i, ldda+i, dBarray+i, lddb+i );
}
else if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( slacpy_upper_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m+i, n+i, dAarray+i, ldda+i, dBarray+i, lddb+i );
}
else {
hipLaunchKernelGGL(( slacpy_full_kernel_vbatched) , dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m+i, n+i, dAarray+i, ldda+i, dBarray+i, lddb+i );
}
}
}
| 4213a1325dbe8b0ecfc36ded51dbb47429b64cee.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Mark Gates
@author Azzam Haidar
@generated from magmablas/zlacpy.cu, normal z -> s, Thu Oct 8 23:05:34 2020
*/
#include "magma_internal.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for slaset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/******************************************************************************/
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to slaset, slacpy, slag2d, clag2z, sgeadd.
*/
static __device__
void slacpy_full_device(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/******************************************************************************/
/*
Similar to slacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slacpy_lower_device(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/******************************************************************************/
/*
Similar to slacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slacpy_upper_device(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
/******************************************************************************/
/*
kernel wrappers to call the device functions.
*/
__global__
void slacpy_full_kernel(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void slacpy_lower_kernel(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_lower_device(m, n, dA, ldda, dB, lddb);
}
__global__
void slacpy_upper_kernel(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_upper_device(m, n, dA, ldda, dB, lddb);
}
/******************************************************************************/
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void slacpy_full_kernel_batched(
int m, int n,
float const * const *dAarray, int ldda,
float **dBarray, int lddb )
{
int batchid = blockIdx.z;
slacpy_full_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void slacpy_lower_kernel_batched(
int m, int n,
float const * const *dAarray, int ldda,
float **dBarray, int lddb )
{
int batchid = blockIdx.z;
slacpy_lower_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void slacpy_upper_kernel_batched(
int m, int n,
float const * const *dAarray, int ldda,
float **dBarray, int lddb )
{
int batchid = blockIdx.z;
slacpy_upper_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
/******************************************************************************/
/*
kernel wrappers to call the device functions for the vbatched routine.
*/
__global__
void slacpy_full_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
float const * const *dAarray, magma_int_t* ldda,
float **dBarray, magma_int_t* lddb )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if(blockIdx.x >= magma_ceildiv(my_m, BLK_X)) return;
if(blockIdx.y >= magma_ceildiv(my_n, BLK_Y)) return;
slacpy_full_device(my_m, my_n, dAarray[batchid], (int)ldda[batchid], dBarray[batchid], (int)lddb[batchid]);
}
__global__
void slacpy_lower_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
float const * const *dAarray, magma_int_t* ldda,
float **dBarray, magma_int_t* lddb )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if(blockIdx.x >= magma_ceildiv(my_m, BLK_X)) return;
if(blockIdx.y >= magma_ceildiv(my_n, BLK_Y)) return;
slacpy_lower_device(my_m, my_n, dAarray[batchid], (int)ldda[batchid], dBarray[batchid], (int)lddb[batchid]);
}
__global__
void slacpy_upper_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
float const * const *dAarray, magma_int_t* ldda,
float **dBarray, magma_int_t* lddb )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if(blockIdx.x >= magma_ceildiv(my_m, BLK_X)) return;
if(blockIdx.y >= magma_ceildiv(my_n, BLK_Y)) return;
slacpy_upper_device(my_m, my_n, dAarray[batchid], (int)ldda[batchid], dBarray[batchid], (int)lddb[batchid]);
}
/***************************************************************************//**
Purpose
-------
SLACPY copies all or part of a two-dimensional matrix dA to another
matrix dB.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA REAL array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[out]
dB REAL array, dimension (LDDB,N)
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacpy
*******************************************************************************/
extern "C" void
magmablas_slacpy(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
#define dB(i_, j_) (dB + (i_) + (j_)*lddb)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if ( uplo == MagmaLower ) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
slacpy_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
slacpy_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else if ( uplo == MagmaUpper ) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
slacpy_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
slacpy_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else {
// TODO: use cudaMemcpy or cudaMemcpy2D ?
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
slacpy_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
/***************************************************************************//**
Purpose
-------
SLACPY_BATCHED copies all or part of each two-dimensional matrix
dAarray[i] to matrix dBarray[i], for 0 <= i < batchcount.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dA
@param[in]
m INTEGER
The number of rows of each matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dA. N >= 0.
@param[in]
dAarray REAL* array, dimension (batchCount)
Array of pointers to the matrices dA, where each dA is of dimension (LDDA,N).
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of each array dA. LDDA >= max(1,M).
@param[out]
dBarray REAL* array, dimension (batchCount)
Array of pointers to the matrices dB, where each dB is of dimension (LDDB,N).
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of each array dB. LDDB >= max(1,M).
@param[in]
batchCount Number of matrices in dAarray and dBarray.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacpy_batched
*******************************************************************************/
extern "C" void
magmablas_slacpy_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr const dAarray[], magma_int_t ldda,
magmaFloat_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
magma_int_t max_batchCount = queue->get_maxBatch();
for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) {
magma_int_t ibatch = min(max_batchCount, batchCount-i);
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), ibatch );
if ( uplo == MagmaLower ) {
slacpy_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, dAarray+i, ldda, dBarray+i, lddb );
}
else if ( uplo == MagmaUpper ) {
slacpy_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, dAarray+i, ldda, dBarray+i, lddb );
}
else {
slacpy_full_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, dAarray+i, ldda, dBarray+i, lddb );
}
}
}
/***************************************************************************//**
Purpose
-------
SLACPY_VBATCHED copies all or part of each two-dimensional matrix
dAarray[i] to matrix dBarray[i], for 0 <= i < batchcount.
Matrices are assumed to generally have different sizes/leading dimensions
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dA
@param[in]
max_m INTEGER.
maximum value of m.
@param[in]
max_n INTEGER.
maximum value of n.
@param[in]
m INTEGER array, dimension (batchCount).
Each is the number of rows of each matrix dA. M >= 0.
@param[in]
n INTEGER array, dimension (batchCount).
The number of columns of each matrix dA. N >= 0.
@param[in]
dAarray Array of pointers , dimension (batchCount)
Each is a REAL array dA, where the ith matrix dA is of dimension (ldda[i],n[i]).
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER array, dimension (batchCount).
Each is the leading dimension of each array dA. For the ith matrix dA ldda[i] >= max(1,m[i]).
@param[out]
dBarray Array of pointers, dimension(batchCount).
Each is a REAL array dB, where the ith matrix dB is of dimension (lddb[i],n[i]).
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER array, dimension (batchCount).
Each is the leading dimension of each array dB. For the ith matrix dB lddb[i] >= max(1,m[i]).
@param[in]
batchCount Number of matrices in dAarray and dBarray.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacpy_batched
*******************************************************************************/
extern "C" void
magmablas_slacpy_vbatched(
magma_uplo_t uplo,
magma_int_t max_m, magma_int_t max_n,
magma_int_t* m, magma_int_t* n,
float const * const * dAarray, magma_int_t* ldda,
float** dBarray, magma_int_t* lddb,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
//else if ( m < 0 )
// info = -2;
//else if ( n < 0 )
// info = -3;
//else if ( ldda < max(1,m))
// info = -5;
//else if ( lddb < max(1,m))
// info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( max_m == 0 || max_n == 0 || batchCount == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
magma_int_t max_batchCount = queue->get_maxBatch();
for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) {
magma_int_t ibatch = min(max_batchCount, batchCount-i);
dim3 grid( magma_ceildiv( max_m, BLK_X ), magma_ceildiv( max_n, BLK_Y ), ibatch );
if ( uplo == MagmaLower ) {
slacpy_lower_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>
( m+i, n+i, dAarray+i, ldda+i, dBarray+i, lddb+i );
}
else if ( uplo == MagmaUpper ) {
slacpy_upper_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>
( m+i, n+i, dAarray+i, ldda+i, dBarray+i, lddb+i );
}
else {
slacpy_full_kernel_vbatched <<< grid, threads, 0, queue->cuda_stream() >>>
( m+i, n+i, dAarray+i, ldda+i, dBarray+i, lddb+i );
}
}
}
|
1588db3c66ccec38a73424d8d716dd30600be42a.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/device_vector.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include <glm/gtc/matrix_inverse.hpp>
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#include "stream_compaction/common.h"
#include "stream_compaction/efficient.h"
#define ERRORCHECK 1
// Toggle features
#define PATH_COMPACT 1
#define CACHE_FIRST_BOUNCE 1
#define SORT_BY_MATERIAL 0
#define STOCHASTIC_ANTIALIASING 0
#define DEPTH_OF_FIELD 0
#define MOTION_BLUR 0
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
static ShadeableIntersection * dev_intersections_firstbounce = NULL;
static int * dev_materials_paths = NULL;
static int * dev_materials_intersections = NULL;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
hipMalloc(&dev_intersections_firstbounce, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections_firstbounce, 0, pixelcount * sizeof(ShadeableIntersection));
hipMalloc(&dev_materials_paths, pixelcount * sizeof(int));
hipMalloc(&dev_materials_intersections, pixelcount * sizeof(int));
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
// TODO: clean up any extra device memory you created
hipFree(dev_intersections_firstbounce);
hipFree(dev_materials_paths);
hipFree(dev_materials_intersections);
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
// Stochastic sampled antialiasing implemented by jittering the ray
#if STOCHASTIC_ANTIALIASING
thrust::default_random_engine rng = makeSeededRandomEngine(iter, x, y);
thrust::uniform_real_distribution<float> u01(0, 1);
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f + u01(rng) - 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f + u01(rng) - 0.5f)
);
#else
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
#endif
// Depth of Field
#if DEPTH_OF_FIELD
thrust::default_random_engine rngx = makeSeededRandomEngine(iter, x, 0);
thrust::default_random_engine rngy = makeSeededRandomEngine(iter, y, 0);
thrust::uniform_real_distribution<float> udof(-1.0, 1.0);
float lensU; float lensV;
lensU = (udof(rngx)) / (80.0f);
lensV = (udof(rngy)) / (80.0f);
float t = 2.0f;
glm::vec3 focus = t * segment.ray.direction + segment.ray.origin;
segment.ray.origin += cam.right * lensU + cam.up * lensV;
segment.ray.direction = glm::normalize(focus - segment.ray.origin);
#endif
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
intersections[path_index].point = intersect_point;
}
}
}
__global__ void shadeMaterial(
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, int depth
, glm::vec3* img
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
PathSegment& path_segment = pathSegments[idx];
if (idx < num_paths && pathSegments[idx].remainingBounces > 0)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
path_segment.color *= (materialColor * material.emittance);
path_segment.remainingBounces = 0;
}
else {
scatterRay(path_segment, intersection.point, intersection.surfaceNormal, material, rng);
path_segment.remainingBounces--;
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}
else {
path_segment.color = glm::vec3(0.0f);
path_segment.remainingBounces = 0;
}
if (path_segment.isDone()) {
img[path_segment.pixelIndex] += path_segment.color;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
struct isPathInactive {
__host__ __device__ bool operator() (const PathSegment& path_segment) {
return (path_segment.remainingBounces <= 0);
}
};
__global__ void kernGetMaterialId(int n, int *odata, const ShadeableIntersection *intersections) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > n)
{
odata[index] = intersections[index].materialId;
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
//timer().startGpuTimer();
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
const Geom *hst_scene_geoms = &(hst_scene->geoms)[0];
Geom *motionBlurGeoms = &(hst_scene->geoms)[0];
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
#if MOTION_BLUR
thrust::default_random_engine rng = makeSeededRandomEngine(iter, hst_scene->geoms.size(), traceDepth);
thrust::uniform_real_distribution<float> umotion(0, TWO_PI);
for (int i = 0; i < hst_scene->geoms.size(); i++) {
Geom& currGeom = motionBlurGeoms[i];
currGeom = hst_scene_geoms[i];
currGeom.translation.x += hst_scene_geoms[i].motion.x * 0.06 * cos(umotion(rng));
currGeom.translation.y += hst_scene_geoms[i].motion.y * 0.06 * cos(umotion(rng));
currGeom.translation.z += hst_scene_geoms[i].motion.z * 0.06 * cos(umotion(rng));
// calculate transforms of geometry
currGeom.transform = utilityCore::buildTransformationMatrix(
currGeom.translation, currGeom.rotation, currGeom.scale);
currGeom.inverseTransform = glm::inverse(currGeom.transform);
currGeom.invTranspose = glm::inverseTranspose(currGeom.transform);
}
hipMemcpy(dev_geoms, motionBlurGeoms, hst_scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
#endif
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
int num_paths_active = num_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths_active + blockSize1d - 1) / blockSize1d;
if (!CACHE_FIRST_BOUNCE || (CACHE_FIRST_BOUNCE && ((depth > 0) || (depth == 0 && iter == 1)))) {
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
}
#if CACHE_FIRST_BOUNCE
if (depth == 0 && iter == 1) {
hipMemcpy(dev_intersections_firstbounce, dev_intersections, num_paths_active * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
}
// re-use first bounce across all subsequent iterations
else if (depth == 0 && iter > 1) {
hipMemcpy(dev_intersections, dev_intersections_firstbounce, num_paths_active * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
}
#endif
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
#if SORT_BY_MATERIAL
kernGetMaterialId << <numblocksPathSegmentTracing, blockSize1d >> >(num_paths_active, dev_materials_paths, dev_intersections);
checkCUDAError("kernGetMaterialType failed");
hipMemcpy(dev_materials_intersections, dev_materials_paths, num_paths_active * sizeof(int), hipMemcpyDeviceToDevice);
thrust::sort_by_key(thrust::device, dev_materials_paths, dev_materials_paths + num_paths_active, dev_paths);
thrust::sort_by_key(thrust::device, dev_materials_intersections, dev_materials_intersections + num_paths_active, dev_intersections);
#endif
shadeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
num_paths_active,
dev_intersections,
dev_paths,
dev_materials,
depth,
dev_image
);
#if PATH_COMPACT
// Thrust compact
thrust::device_ptr<PathSegment> thrust_dev_paths_ptr(dev_paths);
auto thrust_end = thrust::remove_if(thrust::device, thrust_dev_paths_ptr, thrust_dev_paths_ptr + num_paths_active, isPathInactive());
num_paths_active = thrust_end - thrust_dev_paths_ptr;
#endif
depth++;
iterationComplete = num_paths_active <= 0 || traceDepth < depth; // TODO: should be based off stream compaction results.
}
/*timer().endGpuTimer();
if (iter < 101 && (iter % 10 == 0 || iter == 1)) {
cout << timer().getGpuElapsedTimeForPreviousOperation() << endl;
}*/
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
| 1588db3c66ccec38a73424d8d716dd30600be42a.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/device_vector.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include <glm/gtc/matrix_inverse.hpp>
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#include "stream_compaction/common.h"
#include "stream_compaction/efficient.h"
#define ERRORCHECK 1
// Toggle features
#define PATH_COMPACT 1
#define CACHE_FIRST_BOUNCE 1
#define SORT_BY_MATERIAL 0
#define STOCHASTIC_ANTIALIASING 0
#define DEPTH_OF_FIELD 0
#define MOTION_BLUR 0
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
static ShadeableIntersection * dev_intersections_firstbounce = NULL;
static int * dev_materials_paths = NULL;
static int * dev_materials_intersections = NULL;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
cudaMalloc(&dev_intersections_firstbounce, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections_firstbounce, 0, pixelcount * sizeof(ShadeableIntersection));
cudaMalloc(&dev_materials_paths, pixelcount * sizeof(int));
cudaMalloc(&dev_materials_intersections, pixelcount * sizeof(int));
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
// TODO: clean up any extra device memory you created
cudaFree(dev_intersections_firstbounce);
cudaFree(dev_materials_paths);
cudaFree(dev_materials_intersections);
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
// Stochastic sampled antialiasing implemented by jittering the ray
#if STOCHASTIC_ANTIALIASING
thrust::default_random_engine rng = makeSeededRandomEngine(iter, x, y);
thrust::uniform_real_distribution<float> u01(0, 1);
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f + u01(rng) - 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f + u01(rng) - 0.5f)
);
#else
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
#endif
// Depth of Field
#if DEPTH_OF_FIELD
thrust::default_random_engine rngx = makeSeededRandomEngine(iter, x, 0);
thrust::default_random_engine rngy = makeSeededRandomEngine(iter, y, 0);
thrust::uniform_real_distribution<float> udof(-1.0, 1.0);
float lensU; float lensV;
lensU = (udof(rngx)) / (80.0f);
lensV = (udof(rngy)) / (80.0f);
float t = 2.0f;
glm::vec3 focus = t * segment.ray.direction + segment.ray.origin;
segment.ray.origin += cam.right * lensU + cam.up * lensV;
segment.ray.direction = glm::normalize(focus - segment.ray.origin);
#endif
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
intersections[path_index].point = intersect_point;
}
}
}
__global__ void shadeMaterial(
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, int depth
, glm::vec3* img
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
PathSegment& path_segment = pathSegments[idx];
if (idx < num_paths && pathSegments[idx].remainingBounces > 0)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
path_segment.color *= (materialColor * material.emittance);
path_segment.remainingBounces = 0;
}
else {
scatterRay(path_segment, intersection.point, intersection.surfaceNormal, material, rng);
path_segment.remainingBounces--;
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}
else {
path_segment.color = glm::vec3(0.0f);
path_segment.remainingBounces = 0;
}
if (path_segment.isDone()) {
img[path_segment.pixelIndex] += path_segment.color;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
struct isPathInactive {
__host__ __device__ bool operator() (const PathSegment& path_segment) {
return (path_segment.remainingBounces <= 0);
}
};
__global__ void kernGetMaterialId(int n, int *odata, const ShadeableIntersection *intersections) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > n)
{
odata[index] = intersections[index].materialId;
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
//timer().startGpuTimer();
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
const Geom *hst_scene_geoms = &(hst_scene->geoms)[0];
Geom *motionBlurGeoms = &(hst_scene->geoms)[0];
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
#if MOTION_BLUR
thrust::default_random_engine rng = makeSeededRandomEngine(iter, hst_scene->geoms.size(), traceDepth);
thrust::uniform_real_distribution<float> umotion(0, TWO_PI);
for (int i = 0; i < hst_scene->geoms.size(); i++) {
Geom& currGeom = motionBlurGeoms[i];
currGeom = hst_scene_geoms[i];
currGeom.translation.x += hst_scene_geoms[i].motion.x * 0.06 * cos(umotion(rng));
currGeom.translation.y += hst_scene_geoms[i].motion.y * 0.06 * cos(umotion(rng));
currGeom.translation.z += hst_scene_geoms[i].motion.z * 0.06 * cos(umotion(rng));
// calculate transforms of geometry
currGeom.transform = utilityCore::buildTransformationMatrix(
currGeom.translation, currGeom.rotation, currGeom.scale);
currGeom.inverseTransform = glm::inverse(currGeom.transform);
currGeom.invTranspose = glm::inverseTranspose(currGeom.transform);
}
cudaMemcpy(dev_geoms, motionBlurGeoms, hst_scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
#endif
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
int num_paths_active = num_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths_active + blockSize1d - 1) / blockSize1d;
if (!CACHE_FIRST_BOUNCE || (CACHE_FIRST_BOUNCE && ((depth > 0) || (depth == 0 && iter == 1)))) {
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
}
#if CACHE_FIRST_BOUNCE
if (depth == 0 && iter == 1) {
cudaMemcpy(dev_intersections_firstbounce, dev_intersections, num_paths_active * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
}
// re-use first bounce across all subsequent iterations
else if (depth == 0 && iter > 1) {
cudaMemcpy(dev_intersections, dev_intersections_firstbounce, num_paths_active * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
}
#endif
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
#if SORT_BY_MATERIAL
kernGetMaterialId << <numblocksPathSegmentTracing, blockSize1d >> >(num_paths_active, dev_materials_paths, dev_intersections);
checkCUDAError("kernGetMaterialType failed");
cudaMemcpy(dev_materials_intersections, dev_materials_paths, num_paths_active * sizeof(int), cudaMemcpyDeviceToDevice);
thrust::sort_by_key(thrust::device, dev_materials_paths, dev_materials_paths + num_paths_active, dev_paths);
thrust::sort_by_key(thrust::device, dev_materials_intersections, dev_materials_intersections + num_paths_active, dev_intersections);
#endif
shadeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
num_paths_active,
dev_intersections,
dev_paths,
dev_materials,
depth,
dev_image
);
#if PATH_COMPACT
// Thrust compact
thrust::device_ptr<PathSegment> thrust_dev_paths_ptr(dev_paths);
auto thrust_end = thrust::remove_if(thrust::device, thrust_dev_paths_ptr, thrust_dev_paths_ptr + num_paths_active, isPathInactive());
num_paths_active = thrust_end - thrust_dev_paths_ptr;
#endif
depth++;
iterationComplete = num_paths_active <= 0 || traceDepth < depth; // TODO: should be based off stream compaction results.
}
/*timer().endGpuTimer();
if (iter < 101 && (iter % 10 == 0 || iter == 1)) {
cout << timer().getGpuElapsedTimeForPreviousOperation() << endl;
}*/
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
|
0384b345b8048027e1f025f76d503f16a2b2b8cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "../compat.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <hip/hip_runtime.h>
extern "C"{
#include "../graph500.h"
}
#include "../xalloc.h"
#include "../generator/graph_generator.h"
#ifndef NWARPS
// Set by Makefile
#define NWARPS 8
#endif
//#define CHUNK_ELEM 4096
//#define CHUNK_SIZE (sizeof(int32_t)*CHUNK_ELEM) // Size of a chunk in byte
#define BITMAP_TYPE uint32_t
#define BITMAP_WORD 32
/* Global variables */
static int64_t maxvtx; // total number of vertices
static int64_t nv; // number of vertices
static int64_t maxedg;
static int32_t nwords;
/* Host pointers */
static int32_t * h_CSR_R;
static int32_t * h_CSR_C;
static int32_t * h_predecessors;
static int32_t h_n_in_queue;
static int32_t h_n_out_queue;
/* Device pointers */
static int32_t * d_CSR_R;
static int32_t * d_CSR_C;
static BITMAP_TYPE * d_in_queue;
static BITMAP_TYPE * d_out_queue;
static int32_t * d_predecessors;
__managed__ static int32_t d_n_in_queue;
__managed__ static int32_t d_n_out_queue;
static BITMAP_TYPE * d_visited_tex;
__constant__ int32_t d_nwords;
texture<BITMAP_TYPE, 1, hipReadModeElementType> tex_visited;
texture<BITMAP_TYPE, 1, hipReadModeElementType> tex_in_queue;
static hipEvent_t start, stop;
static void HandleError(hipError_t err,
const char *file,
int line)
{
if(err != hipSuccess)
{
printf("%s in %s at line %d \n",hipGetErrorString(err),file,line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError(err, __FILE__, __LINE__))
/* "Private functions" */
/* Compute the total number of vertices in the generated graph */
static void find_nv(const struct packed_edge * restrict IJ, const int64_t nedge)
{
maxvtx = -1;
// Here use the 40 cores to compute
#pragma omp parallel
{
int64_t k;
#pragma omp for reduction(max:maxvtx)
for(k = 0 ; k < nedge ; ++k)
{
if(get_v0_from_edge(&IJ[k]) > maxvtx)
maxvtx = get_v0_from_edge(&IJ[k]);
if(get_v1_from_edge(&IJ[k]) > maxvtx)
maxvtx = get_v1_from_edge(&IJ[k]);
}
}
nv = maxvtx+1;
}
void
omp_prefix_sum(int32_t * x, int N)
{
int32_t * suma;
#pragma omp parallel
{
const int ithread = omp_get_thread_num();
const int nthreads = omp_get_num_threads();
#pragma omp single
{
suma = (int32_t*)malloc(sizeof(int32_t)*nthreads+1);
suma[0] = 0;
}
int32_t sum = 0;
#pragma omp for schedule(static)
for(unsigned int i = 0 ; i < N ; ++i)
{
sum += x[i];
x[i] = sum;
}
suma[ithread+1] = sum;
#pragma omp barrier
float offset = 0;
for(unsigned int i = 0 ; i < (ithread+1) ; ++i)
offset += suma[i];
#pragma omp for schedule(static)
for(unsigned int i = 0 ; i < N ; ++i)
x[i] += offset;
}
for(unsigned int i = N ; i > 0 ; --i)
x[i] = x[i-1];
x[0] = 0;
free(suma);
}
static void
edgelist_to_CSR(const struct packed_edge * restrict IJ, const int64_t nedge)
{
//int32_t *h_chunk_v0, *h_chunk_v1;
//int32_t *d_chunk_v0, *d_chunk_v1;
//int nchunk = (2*nedge*sizeof(int32_t))/CHUNK_SIZE;
//printf("MAXVTX(%" PRId64 ")\n",maxvtx);
//printf("NV(%" PRId64 ")\n",nv);
hipSetDevice(0);
/* Init CSR arrays on GPU */
HANDLE_ERROR(hipMallocManaged((void**)&d_CSR_R,sizeof(int32_t)*(nv+1)));
h_CSR_R = d_CSR_R;
//h_CSR_R = (int32_t*)malloc(sizeof(int32_t)*(nv+1));
assert(h_CSR_R);
HANDLE_ERROR(hipMemAdvise(d_CSR_R, sizeof(int32_t)*(nv+1), hipMemAdviseSetPreferredLocation, 0));
memset(h_CSR_R,0,sizeof(int32_t)*(nv+1));
/* Step one, count the CSR_R and CSR_C size */
maxedg = 0;
#pragma omp parallel for reduction(+:maxedg)
for(unsigned int i = 0 ; i < nedge ; ++i)
{
// No self loop
if(get_v0_from_edge(&IJ[i]) != get_v1_from_edge(&IJ[i]))
{
__sync_fetch_and_add(&h_CSR_R[get_v0_from_edge(&IJ[i])],1);
__sync_fetch_and_add(&h_CSR_R[get_v1_from_edge(&IJ[i])],1);
maxedg+=2;
}
}
//printf("MAXEDG(%" PRId64 ")\n",maxedg);
int32_t tot = 0;
for(unsigned int i = 0 ; i < nv+1 ; ++i)
tot += h_CSR_R[i];
printf("tot(%d)\n",tot);
// Malloc CRC array
//h_CSR_C = (int32_t*)malloc(sizeof(int32_t)*maxedg);
HANDLE_ERROR(hipMallocManaged((void**)&d_CSR_C,sizeof(int32_t)*maxedg));
h_CSR_C = d_CSR_C;
assert(h_CSR_C);
HANDLE_ERROR(hipMemAdvise(d_CSR_C, sizeof(int32_t)*maxedg, hipMemAdviseSetPreferredLocation, 0));
//omp_prefix_sum(h_CSR_R,nv);
int32_t tmp = h_CSR_R[0];
for(unsigned int i = 1 ; i < nv+1 ; ++i)
{
int32_t tmp2 = h_CSR_R[i];
h_CSR_R[i] = tmp;
tmp += tmp2;
}
h_CSR_R[0] = 0;
printf("last(%d)\n",h_CSR_R[nv]);
assert(h_CSR_R[nv] == maxedg);
//printf("\nCSR_R list");
//for(unsigned int i = 0 ; i < nv-1 ; ++i)
// printf(" %d(%d)",h_CSR_R[i],h_CSR_R[i+1] - h_CSR_R[i]);
//printf("\n");
int32_t * CSR_R_counter = (int32_t*)malloc(sizeof(int32_t)*nv);
assert(CSR_R_counter);
memset(CSR_R_counter,0,sizeof(int32_t)*nv);
//printf("CSR_C generiation\n");
/* Step two generate CSC array */
#pragma omp parallel for
for(unsigned int i = 0 ; i < nedge ; ++i)
{
int32_t v0 = (int32_t)get_v0_from_edge(&IJ[i]);
int32_t v1 = (int32_t)get_v1_from_edge(&IJ[i]);
if(v0 != v1)
{
int counter_v0 = __sync_fetch_and_add(&(CSR_R_counter[v0]),1);
int counter_v1 = __sync_fetch_and_add(&(CSR_R_counter[v1]),1);
//printf("Edge(%d,%d) added in %d(%d) and %d(%d)\n",v0,v1,v0,counter_v0,v1,counter_v1);
h_CSR_C[h_CSR_R[v0]+counter_v0] = v1;
h_CSR_C[h_CSR_R[v1]+counter_v1] = v0;
}
}
free(CSR_R_counter);
//printf("\nMalloc\n");
// Copy CSR and CSC on GPU
//HANDLE_ERROR(hipMemcpy(d_CSR_R,h_CSR_R,sizeof(int32_t)*(nv+1),hipMemcpyHostToDevice));
//HANDLE_ERROR(hipMemcpy(d_CSR_C,h_CSR_C,sizeof(int32_t)*maxedg,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemAdvise(d_CSR_R, sizeof(int32_t)*(nv+1), hipMemAdviseSetReadMostly, 0));
HANDLE_ERROR(hipMemAdvise(d_CSR_C, sizeof(int32_t)*maxedg, hipMemAdviseSetReadMostly, 0));
// Prepare in and ou queues as bitmap
nwords = (nv + (BITMAP_WORD / 2)) / BITMAP_WORD;
HANDLE_ERROR(hipMemcpyToSymbol(d_nwords,&nwords,sizeof(int32_t)));
HANDLE_ERROR(hipMallocManaged((void**)&d_in_queue,sizeof(BITMAP_TYPE)*nwords));
HANDLE_ERROR(hipMallocManaged((void**)&d_out_queue,sizeof(BITMAP_TYPE)*nwords));
//HANDLE_ERROR(hipMallocManaged((void**)&d_n_in_queue,sizeof(int32_t)));
//HANDLE_ERROR(hipMallocManaged((void**)&d_n_out_queue,sizeof(int32_t)));
HANDLE_ERROR(hipMallocManaged((void**)&d_visited_tex,sizeof(BITMAP_TYPE)*nwords));
HANDLE_ERROR(hipMallocManaged((void**)&d_predecessors,sizeof(int32_t)*nv));
//h_predecessors = (int32_t*)malloc(sizeof(int32_t)*nv);
h_predecessors = d_predecessors;
assert(h_predecessors);
HANDLE_ERROR(hipMemAdvise(d_in_queue, sizeof(BITMAP_TYPE)*nwords, hipMemAdviseSetPreferredLocation, 0));
HANDLE_ERROR(hipMemAdvise(d_out_queue, sizeof(BITMAP_TYPE)*nwords, hipMemAdviseSetPreferredLocation, 0));
HANDLE_ERROR(hipMemAdvise(d_visited_tex, sizeof(BITMAP_TYPE)*nwords, hipMemAdviseSetPreferredLocation, 0));
HANDLE_ERROR(hipMemAdvise(&d_n_in_queue, sizeof(int32_t), hipMemAdviseSetPreferredLocation, 0));
HANDLE_ERROR(hipMemAdvise(&d_n_out_queue, sizeof(int32_t), hipMemAdviseSetPreferredLocation, 0));
HANDLE_ERROR(hipMemAdvise(d_predecessors, sizeof(int32_t)*nv, hipMemAdviseSetPreferredLocation, 0));
HANDLE_ERROR(hipMemAdvise(d_predecessors, sizeof(int32_t)*nv, hipMemAdviseSetAccessedBy, hipCpuDeviceId));
hipEventCreate(&start);
hipEventCreate(&stop);
printf("End make_CSR\n");
}
__inline__ __device__ int warpScanSumDown(int val)
{
int lane_id = threadIdx.x & 31;
for(int offset = 1 ; offset < 32 ; offset <<= 1)
{
int y = __shfl_down(val,offset);
if(lane_id <= 31 - offset)
val += y;
}
return val;
}
__inline__ __device__ int warpScanSum(int val)
{
int lane_id = threadIdx.x & 31;
for(int offset = 1 ; offset < 32 ; offset <<= 1)
{
int y = __shfl_up(val,offset);
if(lane_id >= offset)
val += y;
}
return val;
}
__inline__ __device__ int warpReduceSum(int val)
{
for(int offset = warpSize/2 ; offset > 0 ; offset /= 2)
val += __shfl_down(val,offset);
return val;
}
__global__ void explore_frontier_CSR( BITMAP_TYPE * out_queue, int32_t * visited_label, BITMAP_TYPE * visited_tex, int32_t * n_out_queue, int32_t * R, int32_t * C)
{
int lane_id = threadIdx.x & 31;
int warp_id = threadIdx.x >> 5;
int ligne = threadIdx.x+blockIdx.x*blockDim.x;
int32_t value_visited = visited_label[ligne]; //GLOBAL
int actif = 0;
if(value_visited == -1)
actif = 1;
if(!__any(actif))
return;
unsigned int word = ligne/BITMAP_WORD;
unsigned int range[3] = {0,0,0};
if(value_visited == -1)
{
range[0] = R[ligne];
range[1] = R[ligne+1];
range[2] = range[1] - range[0];
}
// On va explorer chaque ligne successivement
volatile __shared__ int comm[NWARPS][3];
volatile __shared__ int shared_ligne[NWARPS];
volatile __shared__ int sum[NWARPS];
volatile __shared__ int fin[NWARPS];
if(lane_id == 0)
sum[warp_id] = 0;
while( __any(range[2]) )
{
int voisin = -1;
if(range[2])
comm[warp_id][0] = lane_id;
if(comm[warp_id][0] == lane_id)
{
comm[warp_id][1] = range[0];
comm[warp_id][2] = range[1];
range[2] = 0;
shared_ligne[warp_id] = ligne;
}
int r_gather = comm[warp_id][1] + lane_id;
int r_gather_end = comm[warp_id][2];
if(lane_id==0)
fin[warp_id] = 0;
while(r_gather < r_gather_end && !fin[warp_id])
{
voisin = C[r_gather];
// Vrifier voisin dans in_queue
unsigned int position = voisin / BITMAP_WORD;
BITMAP_TYPE mask = tex1Dfetch(tex_in_queue,position);
BITMAP_TYPE mask_bit = 1 << (voisin % BITMAP_WORD);
if(mask & mask_bit)
{
// Ajout direct du voisin dans visited et passer la suite
//visited_label[shared_ligne[warp_id]] = voisin+d_offset;
//int old = atomicCAS(&visited_label[shared_ligne[warp_id]],-1,voisin+d_offset);
//if(old == -1)
visited_label[shared_ligne[warp_id]] = voisin;
if(visited_label[shared_ligne[warp_id]] == voisin)
{
visited_tex[word] |= 1 << shared_ligne[warp_id]%BITMAP_WORD;
out_queue[word] |= 1 << shared_ligne[warp_id]%BITMAP_WORD;
++sum[warp_id];
fin[warp_id] = 1;
}
}
r_gather+=32;
}
}
if(lane_id == 0 && sum[warp_id])
atomicAdd(n_out_queue,sum[warp_id]);
}
//__launch_bounds__(NWARPS*32, MIN_BLOCKS_PER_SMX)
__global__ void explore_frontier_CSC( restrict BITMAP_TYPE * in_queue, restrict BITMAP_TYPE * out_queue, int32_t * visited_label, BITMAP_TYPE * visited_tex , int32_t * n_out_queue, int32_t * R, int32_t * C)
{
int lane_id = threadIdx.x & 31;
int warp_id = threadIdx.x >> 5;
int word = blockIdx.x*NWARPS+warp_id;
int val_in_queue = in_queue[word]; // GLOBAL
if(val_in_queue == 0)
return;
int id_sommet = -1;
unsigned int range[3] = {0,0,0};
if(val_in_queue & 1 << lane_id)
{
id_sommet = word*32+lane_id;
range[0] = C[id_sommet]; //GLOBAL
range[1] = C[id_sommet+1]; //GLOBAL
range[2] = range[1] - range[0];
}
volatile __shared__ int comm[NWARPS][3];
volatile __shared__ int shared_sommet[NWARPS];
uint32_t sum;
while( __any(range[2]) )
{
int voisin = -1;
if(range[2])
comm[warp_id][0] = lane_id; // SHARED
if(comm[warp_id][0] == lane_id)
{
comm[warp_id][1] = range[0]; // SHARED
comm[warp_id][2] = range[1]; // SHARED
range[2] = 0;
shared_sommet[warp_id] = id_sommet; // SHARED
}
int r_gather = comm[warp_id][1] + lane_id;
int r_gather_end = comm[warp_id][2];
while(r_gather < r_gather_end)
{
sum = 0;
voisin = R[r_gather]; // GLOBAL
unsigned int position = voisin / BITMAP_WORD;
BITMAP_TYPE mask = tex1Dfetch(tex_visited,position);
BITMAP_TYPE mask_bit = 1 << (voisin % BITMAP_WORD);
if(!(mask & mask_bit))
{
visited_tex[position] |= mask_bit;
//int32_t value = atomicCAS(&visited_label[voisin],-1,shared_sommet[warp_id]+d_offset);
if(visited_label[voisin] == -1)
visited_label[voisin] = shared_sommet[warp_id];
if(visited_label[voisin] == shared_sommet[warp_id])
{
unsigned int val_out_queue = 1 << voisin%32;
atomicOr(&out_queue[voisin/32],val_out_queue);
sum = 1;
}
}
// TODO faire la fin
if(__any(sum))
{
sum = warpReduceSum(sum);
if(lane_id == 0)
atomicAdd(n_out_queue,sum);
}
r_gather+=32;
}
}
}
__global__ void setup_GPU(int32_t * predecessors, int64_t srcvtx, BITMAP_TYPE * in_queue, BITMAP_TYPE * visited_tex)
{
predecessors[srcvtx] = (int32_t)srcvtx;
in_queue[srcvtx/BITMAP_WORD] = 1 << srcvtx%BITMAP_WORD;
visited_tex[srcvtx/BITMAP_WORD] = 1 << srcvtx%BITMAP_WORD;
}
/* Create the graph structure on the GPUs */
extern "C"
int create_graph_from_edgelist(struct packed_edge * IJ, int64_t nedge)
{
//printf("create_graph_from_edgelist nedge(%" PRId64 ")\n",nedge);
#pragma omp parallel
#pragma omp single
printf("%d threads\n", omp_get_num_threads());
/* Each thread handle a GPU */
find_nv(IJ,nedge);
/* Compute CSR representation */
edgelist_to_CSR(IJ,nedge);
return 0;
}
extern "C"
int make_bfs_tree( int64_t *bfs_tree_out, int64_t *max_vtx_out, int64_t srcvtx)
{
printf("\n");
// TODO check this nv != maxvtx
*max_vtx_out = maxvtx;
h_n_in_queue = 1;
d_n_in_queue = h_n_in_queue;
// HANDLE_ERROR(hipMemcpy(d_n_in_queue,&h_n_in_queue,sizeof(int32_t),hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemset(d_in_queue,0,sizeof(BITMAP_TYPE)*nwords));
HANDLE_ERROR(hipMemset(d_visited_tex,0,sizeof(BITMAP_TYPE)*nwords));
HANDLE_ERROR(hipMemset(d_predecessors,-1,sizeof(int32_t)*nv));
hipLaunchKernelGGL(( setup_GPU), dim3(1),dim3(1), 0, 0, d_predecessors,srcvtx,d_in_queue,d_visited_tex);
//setup_GPU<<<(nv + (NWARPS/2))/NWARPS,NWARPS>>>(d_predecessors,srcvtx);
int32_t iteration = 0;
while(1)
{
if(iteration++ > 1 << 20)
{
fprintf(stderr,"Too many iterations(%d)\n",iteration);
return -1;
}
dim3 dimGrid(nwords/NWARPS,0,0);
dim3 dimBlock(32*NWARPS ,0,0);
printf("iteration(%2d) n_in_queue(%10d) nblocks(%4d) nthreads(%d) ",
iteration,
h_n_in_queue,
dimGrid.x,
dimBlock.x);
fflush(stdout);
HANDLE_ERROR(hipMemset(&d_n_out_queue,0,sizeof(int32_t)));
hipEventRecord(start);
if(iteration < 4)
{
printf(" explore_frontier_CSC ");
HANDLE_ERROR(hipMemset(d_out_queue,0,sizeof(BITMAP_TYPE)*nwords));
HANDLE_ERROR(hipBindTexture(0, tex_visited, d_visited_tex,sizeof(BITMAP_TYPE)*nwords));
hipLaunchKernelGGL(( explore_frontier_CSC), dim3(dimGrid.x) , dim3(dimBlock.x) , 0, 0, d_in_queue, d_out_queue, d_predecessors,d_visited_tex, &d_n_out_queue, d_CSR_C, d_CSR_R);
HANDLE_ERROR(hipUnbindTexture(tex_visited));
}else{
printf(" explore_frontier_CSR ");
HANDLE_ERROR(hipMemset(&d_n_out_queue,0,sizeof(uint32_t)));
HANDLE_ERROR(hipMemset(d_out_queue,0,sizeof(BITMAP_TYPE)*nwords));
HANDLE_ERROR(hipBindTexture(0, tex_in_queue, d_in_queue,sizeof(BITMAP_TYPE)*nwords));
hipLaunchKernelGGL(( explore_frontier_CSR), dim3(dimGrid.x) , dim3(dimBlock.x) , 0, 0, d_out_queue, d_predecessors,d_visited_tex, &d_n_out_queue, d_CSR_R, d_CSR_C);
HANDLE_ERROR(hipUnbindTexture(tex_in_queue));
}
hipEventRecord(stop);
//HANDLE_ERROR(hipMemcpy(&h_n_out_queue,d_n_out_queue,sizeof(int32_t),hipMemcpyDeviceToHost));
hipEventSynchronize(stop);
h_n_out_queue = d_n_out_queue;
float milliseconds = 0;
hipEventElapsedTime(&milliseconds,start,stop);
printf("out_queue(%10d) time(%.4f)s \n",h_n_out_queue,milliseconds/1000);
if(h_n_out_queue == 0)
{
printf("BFS ended\n");
break;
}
/* Switch queues */
HANDLE_ERROR(hipMemcpy(d_in_queue,d_out_queue,sizeof(BITMAP_TYPE)*nwords,hipMemcpyDeviceToDevice));
HANDLE_ERROR(hipMemcpy(&d_n_in_queue,&d_n_out_queue,sizeof(int32_t),hipMemcpyDeviceToDevice));
h_n_in_queue = h_n_out_queue;
}
// HANDLE_ERROR(hipMemcpy(h_predecessors,d_predecessors,sizeof(int32_t)*nv,hipMemcpyDeviceToHost));
#pragma omp parallel for
for(unsigned int i = 0 ; i < nv ; ++i)
{
bfs_tree_out[i] = (int64_t)h_predecessors[i];
assert(bfs_tree_out[i] < nv);
assert(bfs_tree_out[i] > -2);
}
return 0;
}
extern "C"
void destroy_graph()
{
//free(h_CSR_R);
//free(h_CSR_C);
HANDLE_ERROR(hipFree(d_CSR_R));
HANDLE_ERROR(hipFree(d_CSR_C));
HANDLE_ERROR(hipFree(d_in_queue));
HANDLE_ERROR(hipFree(d_out_queue));
HANDLE_ERROR(hipFree(d_predecessors));
HANDLE_ERROR(hipFree(d_visited_tex));
}
| 0384b345b8048027e1f025f76d503f16a2b2b8cf.cu | #include "../compat.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <cuda.h>
extern "C"{
#include "../graph500.h"
}
#include "../xalloc.h"
#include "../generator/graph_generator.h"
#ifndef NWARPS
// Set by Makefile
#define NWARPS 8
#endif
//#define CHUNK_ELEM 4096
//#define CHUNK_SIZE (sizeof(int32_t)*CHUNK_ELEM) // Size of a chunk in byte
#define BITMAP_TYPE uint32_t
#define BITMAP_WORD 32
/* Global variables */
static int64_t maxvtx; // total number of vertices
static int64_t nv; // number of vertices
static int64_t maxedg;
static int32_t nwords;
/* Host pointers */
static int32_t * h_CSR_R;
static int32_t * h_CSR_C;
static int32_t * h_predecessors;
static int32_t h_n_in_queue;
static int32_t h_n_out_queue;
/* Device pointers */
static int32_t * d_CSR_R;
static int32_t * d_CSR_C;
static BITMAP_TYPE * d_in_queue;
static BITMAP_TYPE * d_out_queue;
static int32_t * d_predecessors;
__managed__ static int32_t d_n_in_queue;
__managed__ static int32_t d_n_out_queue;
static BITMAP_TYPE * d_visited_tex;
__constant__ int32_t d_nwords;
texture<BITMAP_TYPE, 1, cudaReadModeElementType> tex_visited;
texture<BITMAP_TYPE, 1, cudaReadModeElementType> tex_in_queue;
static cudaEvent_t start, stop;
static void HandleError(cudaError_t err,
const char *file,
int line)
{
if(err != cudaSuccess)
{
printf("%s in %s at line %d \n",cudaGetErrorString(err),file,line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError(err, __FILE__, __LINE__))
/* "Private functions" */
/* Compute the total number of vertices in the generated graph */
static void find_nv(const struct packed_edge * restrict IJ, const int64_t nedge)
{
maxvtx = -1;
// Here use the 40 cores to compute
#pragma omp parallel
{
int64_t k;
#pragma omp for reduction(max:maxvtx)
for(k = 0 ; k < nedge ; ++k)
{
if(get_v0_from_edge(&IJ[k]) > maxvtx)
maxvtx = get_v0_from_edge(&IJ[k]);
if(get_v1_from_edge(&IJ[k]) > maxvtx)
maxvtx = get_v1_from_edge(&IJ[k]);
}
}
nv = maxvtx+1;
}
void
omp_prefix_sum(int32_t * x, int N)
{
int32_t * suma;
#pragma omp parallel
{
const int ithread = omp_get_thread_num();
const int nthreads = omp_get_num_threads();
#pragma omp single
{
suma = (int32_t*)malloc(sizeof(int32_t)*nthreads+1);
suma[0] = 0;
}
int32_t sum = 0;
#pragma omp for schedule(static)
for(unsigned int i = 0 ; i < N ; ++i)
{
sum += x[i];
x[i] = sum;
}
suma[ithread+1] = sum;
#pragma omp barrier
float offset = 0;
for(unsigned int i = 0 ; i < (ithread+1) ; ++i)
offset += suma[i];
#pragma omp for schedule(static)
for(unsigned int i = 0 ; i < N ; ++i)
x[i] += offset;
}
for(unsigned int i = N ; i > 0 ; --i)
x[i] = x[i-1];
x[0] = 0;
free(suma);
}
static void
edgelist_to_CSR(const struct packed_edge * restrict IJ, const int64_t nedge)
{
//int32_t *h_chunk_v0, *h_chunk_v1;
//int32_t *d_chunk_v0, *d_chunk_v1;
//int nchunk = (2*nedge*sizeof(int32_t))/CHUNK_SIZE;
//printf("MAXVTX(%" PRId64 ")\n",maxvtx);
//printf("NV(%" PRId64 ")\n",nv);
cudaSetDevice(0);
/* Init CSR arrays on GPU */
HANDLE_ERROR(cudaMallocManaged((void**)&d_CSR_R,sizeof(int32_t)*(nv+1)));
h_CSR_R = d_CSR_R;
//h_CSR_R = (int32_t*)malloc(sizeof(int32_t)*(nv+1));
assert(h_CSR_R);
HANDLE_ERROR(cudaMemAdvise(d_CSR_R, sizeof(int32_t)*(nv+1), cudaMemAdviseSetPreferredLocation, 0));
memset(h_CSR_R,0,sizeof(int32_t)*(nv+1));
/* Step one, count the CSR_R and CSR_C size */
maxedg = 0;
#pragma omp parallel for reduction(+:maxedg)
for(unsigned int i = 0 ; i < nedge ; ++i)
{
// No self loop
if(get_v0_from_edge(&IJ[i]) != get_v1_from_edge(&IJ[i]))
{
__sync_fetch_and_add(&h_CSR_R[get_v0_from_edge(&IJ[i])],1);
__sync_fetch_and_add(&h_CSR_R[get_v1_from_edge(&IJ[i])],1);
maxedg+=2;
}
}
//printf("MAXEDG(%" PRId64 ")\n",maxedg);
int32_t tot = 0;
for(unsigned int i = 0 ; i < nv+1 ; ++i)
tot += h_CSR_R[i];
printf("tot(%d)\n",tot);
// Malloc CRC array
//h_CSR_C = (int32_t*)malloc(sizeof(int32_t)*maxedg);
HANDLE_ERROR(cudaMallocManaged((void**)&d_CSR_C,sizeof(int32_t)*maxedg));
h_CSR_C = d_CSR_C;
assert(h_CSR_C);
HANDLE_ERROR(cudaMemAdvise(d_CSR_C, sizeof(int32_t)*maxedg, cudaMemAdviseSetPreferredLocation, 0));
//omp_prefix_sum(h_CSR_R,nv);
int32_t tmp = h_CSR_R[0];
for(unsigned int i = 1 ; i < nv+1 ; ++i)
{
int32_t tmp2 = h_CSR_R[i];
h_CSR_R[i] = tmp;
tmp += tmp2;
}
h_CSR_R[0] = 0;
printf("last(%d)\n",h_CSR_R[nv]);
assert(h_CSR_R[nv] == maxedg);
//printf("\nCSR_R list");
//for(unsigned int i = 0 ; i < nv-1 ; ++i)
// printf(" %d(%d)",h_CSR_R[i],h_CSR_R[i+1] - h_CSR_R[i]);
//printf("\n");
int32_t * CSR_R_counter = (int32_t*)malloc(sizeof(int32_t)*nv);
assert(CSR_R_counter);
memset(CSR_R_counter,0,sizeof(int32_t)*nv);
//printf("CSR_C generiation\n");
/* Step two generate CSC array */
#pragma omp parallel for
for(unsigned int i = 0 ; i < nedge ; ++i)
{
int32_t v0 = (int32_t)get_v0_from_edge(&IJ[i]);
int32_t v1 = (int32_t)get_v1_from_edge(&IJ[i]);
if(v0 != v1)
{
int counter_v0 = __sync_fetch_and_add(&(CSR_R_counter[v0]),1);
int counter_v1 = __sync_fetch_and_add(&(CSR_R_counter[v1]),1);
//printf("Edge(%d,%d) added in %d(%d) and %d(%d)\n",v0,v1,v0,counter_v0,v1,counter_v1);
h_CSR_C[h_CSR_R[v0]+counter_v0] = v1;
h_CSR_C[h_CSR_R[v1]+counter_v1] = v0;
}
}
free(CSR_R_counter);
//printf("\nMalloc\n");
// Copy CSR and CSC on GPU
//HANDLE_ERROR(cudaMemcpy(d_CSR_R,h_CSR_R,sizeof(int32_t)*(nv+1),cudaMemcpyHostToDevice));
//HANDLE_ERROR(cudaMemcpy(d_CSR_C,h_CSR_C,sizeof(int32_t)*maxedg,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemAdvise(d_CSR_R, sizeof(int32_t)*(nv+1), cudaMemAdviseSetReadMostly, 0));
HANDLE_ERROR(cudaMemAdvise(d_CSR_C, sizeof(int32_t)*maxedg, cudaMemAdviseSetReadMostly, 0));
// Prepare in and ou queues as bitmap
nwords = (nv + (BITMAP_WORD / 2)) / BITMAP_WORD;
HANDLE_ERROR(cudaMemcpyToSymbol(d_nwords,&nwords,sizeof(int32_t)));
HANDLE_ERROR(cudaMallocManaged((void**)&d_in_queue,sizeof(BITMAP_TYPE)*nwords));
HANDLE_ERROR(cudaMallocManaged((void**)&d_out_queue,sizeof(BITMAP_TYPE)*nwords));
//HANDLE_ERROR(cudaMallocManaged((void**)&d_n_in_queue,sizeof(int32_t)));
//HANDLE_ERROR(cudaMallocManaged((void**)&d_n_out_queue,sizeof(int32_t)));
HANDLE_ERROR(cudaMallocManaged((void**)&d_visited_tex,sizeof(BITMAP_TYPE)*nwords));
HANDLE_ERROR(cudaMallocManaged((void**)&d_predecessors,sizeof(int32_t)*nv));
//h_predecessors = (int32_t*)malloc(sizeof(int32_t)*nv);
h_predecessors = d_predecessors;
assert(h_predecessors);
HANDLE_ERROR(cudaMemAdvise(d_in_queue, sizeof(BITMAP_TYPE)*nwords, cudaMemAdviseSetPreferredLocation, 0));
HANDLE_ERROR(cudaMemAdvise(d_out_queue, sizeof(BITMAP_TYPE)*nwords, cudaMemAdviseSetPreferredLocation, 0));
HANDLE_ERROR(cudaMemAdvise(d_visited_tex, sizeof(BITMAP_TYPE)*nwords, cudaMemAdviseSetPreferredLocation, 0));
HANDLE_ERROR(cudaMemAdvise(&d_n_in_queue, sizeof(int32_t), cudaMemAdviseSetPreferredLocation, 0));
HANDLE_ERROR(cudaMemAdvise(&d_n_out_queue, sizeof(int32_t), cudaMemAdviseSetPreferredLocation, 0));
HANDLE_ERROR(cudaMemAdvise(d_predecessors, sizeof(int32_t)*nv, cudaMemAdviseSetPreferredLocation, 0));
HANDLE_ERROR(cudaMemAdvise(d_predecessors, sizeof(int32_t)*nv, cudaMemAdviseSetAccessedBy, cudaCpuDeviceId));
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("End make_CSR\n");
}
__inline__ __device__ int warpScanSumDown(int val)
{
int lane_id = threadIdx.x & 31;
for(int offset = 1 ; offset < 32 ; offset <<= 1)
{
int y = __shfl_down(val,offset);
if(lane_id <= 31 - offset)
val += y;
}
return val;
}
__inline__ __device__ int warpScanSum(int val)
{
int lane_id = threadIdx.x & 31;
for(int offset = 1 ; offset < 32 ; offset <<= 1)
{
int y = __shfl_up(val,offset);
if(lane_id >= offset)
val += y;
}
return val;
}
__inline__ __device__ int warpReduceSum(int val)
{
for(int offset = warpSize/2 ; offset > 0 ; offset /= 2)
val += __shfl_down(val,offset);
return val;
}
__global__ void explore_frontier_CSR( BITMAP_TYPE * out_queue, int32_t * visited_label, BITMAP_TYPE * visited_tex, int32_t * n_out_queue, int32_t * R, int32_t * C)
{
int lane_id = threadIdx.x & 31;
int warp_id = threadIdx.x >> 5;
int ligne = threadIdx.x+blockIdx.x*blockDim.x;
int32_t value_visited = visited_label[ligne]; //GLOBAL
int actif = 0;
if(value_visited == -1)
actif = 1;
if(!__any(actif))
return;
unsigned int word = ligne/BITMAP_WORD;
unsigned int range[3] = {0,0,0};
if(value_visited == -1)
{
range[0] = R[ligne];
range[1] = R[ligne+1];
range[2] = range[1] - range[0];
}
// On va explorer chaque ligne successivement
volatile __shared__ int comm[NWARPS][3];
volatile __shared__ int shared_ligne[NWARPS];
volatile __shared__ int sum[NWARPS];
volatile __shared__ int fin[NWARPS];
if(lane_id == 0)
sum[warp_id] = 0;
while( __any(range[2]) )
{
int voisin = -1;
if(range[2])
comm[warp_id][0] = lane_id;
if(comm[warp_id][0] == lane_id)
{
comm[warp_id][1] = range[0];
comm[warp_id][2] = range[1];
range[2] = 0;
shared_ligne[warp_id] = ligne;
}
int r_gather = comm[warp_id][1] + lane_id;
int r_gather_end = comm[warp_id][2];
if(lane_id==0)
fin[warp_id] = 0;
while(r_gather < r_gather_end && !fin[warp_id])
{
voisin = C[r_gather];
// Vérifier voisin dans in_queue
unsigned int position = voisin / BITMAP_WORD;
BITMAP_TYPE mask = tex1Dfetch(tex_in_queue,position);
BITMAP_TYPE mask_bit = 1 << (voisin % BITMAP_WORD);
if(mask & mask_bit)
{
// Ajout direct du voisin dans visited et passer à la suite
//visited_label[shared_ligne[warp_id]] = voisin+d_offset;
//int old = atomicCAS(&visited_label[shared_ligne[warp_id]],-1,voisin+d_offset);
//if(old == -1)
visited_label[shared_ligne[warp_id]] = voisin;
if(visited_label[shared_ligne[warp_id]] == voisin)
{
visited_tex[word] |= 1 << shared_ligne[warp_id]%BITMAP_WORD;
out_queue[word] |= 1 << shared_ligne[warp_id]%BITMAP_WORD;
++sum[warp_id];
fin[warp_id] = 1;
}
}
r_gather+=32;
}
}
if(lane_id == 0 && sum[warp_id])
atomicAdd(n_out_queue,sum[warp_id]);
}
//__launch_bounds__(NWARPS*32, MIN_BLOCKS_PER_SMX)
__global__ void explore_frontier_CSC( restrict BITMAP_TYPE * in_queue, restrict BITMAP_TYPE * out_queue, int32_t * visited_label, BITMAP_TYPE * visited_tex , int32_t * n_out_queue, int32_t * R, int32_t * C)
{
int lane_id = threadIdx.x & 31;
int warp_id = threadIdx.x >> 5;
int word = blockIdx.x*NWARPS+warp_id;
int val_in_queue = in_queue[word]; // GLOBAL
if(val_in_queue == 0)
return;
int id_sommet = -1;
unsigned int range[3] = {0,0,0};
if(val_in_queue & 1 << lane_id)
{
id_sommet = word*32+lane_id;
range[0] = C[id_sommet]; //GLOBAL
range[1] = C[id_sommet+1]; //GLOBAL
range[2] = range[1] - range[0];
}
volatile __shared__ int comm[NWARPS][3];
volatile __shared__ int shared_sommet[NWARPS];
uint32_t sum;
while( __any(range[2]) )
{
int voisin = -1;
if(range[2])
comm[warp_id][0] = lane_id; // SHARED
if(comm[warp_id][0] == lane_id)
{
comm[warp_id][1] = range[0]; // SHARED
comm[warp_id][2] = range[1]; // SHARED
range[2] = 0;
shared_sommet[warp_id] = id_sommet; // SHARED
}
int r_gather = comm[warp_id][1] + lane_id;
int r_gather_end = comm[warp_id][2];
while(r_gather < r_gather_end)
{
sum = 0;
voisin = R[r_gather]; // GLOBAL
unsigned int position = voisin / BITMAP_WORD;
BITMAP_TYPE mask = tex1Dfetch(tex_visited,position);
BITMAP_TYPE mask_bit = 1 << (voisin % BITMAP_WORD);
if(!(mask & mask_bit))
{
visited_tex[position] |= mask_bit;
//int32_t value = atomicCAS(&visited_label[voisin],-1,shared_sommet[warp_id]+d_offset);
if(visited_label[voisin] == -1)
visited_label[voisin] = shared_sommet[warp_id];
if(visited_label[voisin] == shared_sommet[warp_id])
{
unsigned int val_out_queue = 1 << voisin%32;
atomicOr(&out_queue[voisin/32],val_out_queue);
sum = 1;
}
}
// TODO faire à la fin
if(__any(sum))
{
sum = warpReduceSum(sum);
if(lane_id == 0)
atomicAdd(n_out_queue,sum);
}
r_gather+=32;
}
}
}
__global__ void setup_GPU(int32_t * predecessors, int64_t srcvtx, BITMAP_TYPE * in_queue, BITMAP_TYPE * visited_tex)
{
predecessors[srcvtx] = (int32_t)srcvtx;
in_queue[srcvtx/BITMAP_WORD] = 1 << srcvtx%BITMAP_WORD;
visited_tex[srcvtx/BITMAP_WORD] = 1 << srcvtx%BITMAP_WORD;
}
/* Create the graph structure on the GPUs */
extern "C"
int create_graph_from_edgelist(struct packed_edge * IJ, int64_t nedge)
{
//printf("create_graph_from_edgelist nedge(%" PRId64 ")\n",nedge);
#pragma omp parallel
#pragma omp single
printf("%d threads\n", omp_get_num_threads());
/* Each thread handle a GPU */
find_nv(IJ,nedge);
/* Compute CSR representation */
edgelist_to_CSR(IJ,nedge);
return 0;
}
extern "C"
int make_bfs_tree( int64_t *bfs_tree_out, int64_t *max_vtx_out, int64_t srcvtx)
{
printf("\n");
// TODO check this nv != maxvtx
*max_vtx_out = maxvtx;
h_n_in_queue = 1;
d_n_in_queue = h_n_in_queue;
// HANDLE_ERROR(cudaMemcpy(d_n_in_queue,&h_n_in_queue,sizeof(int32_t),cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemset(d_in_queue,0,sizeof(BITMAP_TYPE)*nwords));
HANDLE_ERROR(cudaMemset(d_visited_tex,0,sizeof(BITMAP_TYPE)*nwords));
HANDLE_ERROR(cudaMemset(d_predecessors,-1,sizeof(int32_t)*nv));
setup_GPU<<<1,1>>>(d_predecessors,srcvtx,d_in_queue,d_visited_tex);
//setup_GPU<<<(nv + (NWARPS/2))/NWARPS,NWARPS>>>(d_predecessors,srcvtx);
int32_t iteration = 0;
while(1)
{
if(iteration++ > 1 << 20)
{
fprintf(stderr,"Too many iterations(%d)\n",iteration);
return -1;
}
dim3 dimGrid(nwords/NWARPS,0,0);
dim3 dimBlock(32*NWARPS ,0,0);
printf("iteration(%2d) n_in_queue(%10d) nblocks(%4d) nthreads(%d) ",
iteration,
h_n_in_queue,
dimGrid.x,
dimBlock.x);
fflush(stdout);
HANDLE_ERROR(cudaMemset(&d_n_out_queue,0,sizeof(int32_t)));
cudaEventRecord(start);
if(iteration < 4)
{
printf(" explore_frontier_CSC ");
HANDLE_ERROR(cudaMemset(d_out_queue,0,sizeof(BITMAP_TYPE)*nwords));
HANDLE_ERROR(cudaBindTexture(0, tex_visited, d_visited_tex,sizeof(BITMAP_TYPE)*nwords));
explore_frontier_CSC<<< dimGrid.x , dimBlock.x >>>( d_in_queue, d_out_queue, d_predecessors,d_visited_tex, &d_n_out_queue, d_CSR_C, d_CSR_R);
HANDLE_ERROR(cudaUnbindTexture(tex_visited));
}else{
printf(" explore_frontier_CSR ");
HANDLE_ERROR(cudaMemset(&d_n_out_queue,0,sizeof(uint32_t)));
HANDLE_ERROR(cudaMemset(d_out_queue,0,sizeof(BITMAP_TYPE)*nwords));
HANDLE_ERROR(cudaBindTexture(0, tex_in_queue, d_in_queue,sizeof(BITMAP_TYPE)*nwords));
explore_frontier_CSR<<< dimGrid.x , dimBlock.x >>>(d_out_queue, d_predecessors,d_visited_tex, &d_n_out_queue, d_CSR_R, d_CSR_C);
HANDLE_ERROR(cudaUnbindTexture(tex_in_queue));
}
cudaEventRecord(stop);
//HANDLE_ERROR(cudaMemcpy(&h_n_out_queue,d_n_out_queue,sizeof(int32_t),cudaMemcpyDeviceToHost));
cudaEventSynchronize(stop);
h_n_out_queue = d_n_out_queue;
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds,start,stop);
printf("out_queue(%10d) time(%.4f)s \n",h_n_out_queue,milliseconds/1000);
if(h_n_out_queue == 0)
{
printf("BFS ended\n");
break;
}
/* Switch queues */
HANDLE_ERROR(cudaMemcpy(d_in_queue,d_out_queue,sizeof(BITMAP_TYPE)*nwords,cudaMemcpyDeviceToDevice));
HANDLE_ERROR(cudaMemcpy(&d_n_in_queue,&d_n_out_queue,sizeof(int32_t),cudaMemcpyDeviceToDevice));
h_n_in_queue = h_n_out_queue;
}
// HANDLE_ERROR(cudaMemcpy(h_predecessors,d_predecessors,sizeof(int32_t)*nv,cudaMemcpyDeviceToHost));
#pragma omp parallel for
for(unsigned int i = 0 ; i < nv ; ++i)
{
bfs_tree_out[i] = (int64_t)h_predecessors[i];
assert(bfs_tree_out[i] < nv);
assert(bfs_tree_out[i] > -2);
}
return 0;
}
extern "C"
void destroy_graph()
{
//free(h_CSR_R);
//free(h_CSR_C);
HANDLE_ERROR(cudaFree(d_CSR_R));
HANDLE_ERROR(cudaFree(d_CSR_C));
HANDLE_ERROR(cudaFree(d_in_queue));
HANDLE_ERROR(cudaFree(d_out_queue));
HANDLE_ERROR(cudaFree(d_predecessors));
HANDLE_ERROR(cudaFree(d_visited_tex));
}
|
e34e718214f3d8679e201757e1427145cb4ff0d4.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
using utilTimer::PerformanceTimer;
#define ERRORCHECK 1
#define CACHE_BOUNCE 1
#define MATERIAL_SORT 1
#define DEPTH_OF_FIELD 0
#define ANTIALIASING 0
#define GPU_TIMER 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
static PathSegment * dev_cache_paths = NULL;
static ShadeableIntersection * dev_cache_intersections = NULL;
static Triangle* dev_triangles = NULL;
static int* dev_idxOfEachMesh = NULL;
static int* dev_endIdxOfEachMesh = NULL;
static int mesh_size = 0;
static int triangle_size = 0;
static std::vector<int> indexOffset;
hipEvent_t start, stop;
float totalTime = 0.f;
bool timerStart = true;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
hipMalloc(&dev_cache_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_cache_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
mesh_size = scene->meshes.size();
//cout << mesh_size << endl;
triangle_size = scene->totalTriangles;
hipMalloc(&dev_triangles, triangle_size * sizeof(Triangle));
for (int i = 0; i < mesh_size; i++) {
int triangle_size_per = scene->meshes[i].size();
int offset = scene->idxOfEachMesh[i];
hipMemcpy(dev_triangles + offset, scene->meshes[i].data(), triangle_size_per * sizeof(Triangle), hipMemcpyHostToDevice);
//cout << triangle_size_per << endl;
}
hipMalloc(&dev_idxOfEachMesh, mesh_size * sizeof(int));
hipMemcpy(dev_idxOfEachMesh, scene->idxOfEachMesh.data(), mesh_size * sizeof(int), hipMemcpyHostToDevice);
hipMalloc(&dev_endIdxOfEachMesh, mesh_size * sizeof(int));
hipMemcpy(dev_endIdxOfEachMesh, scene->endIdxOfEachMesh.data(), mesh_size * sizeof(int), hipMemcpyHostToDevice);
hipEventCreate(&start);
hipEventCreate(&stop);
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
// TODO: clean up any extra device memory you created
hipFree(dev_cache_paths);
hipFree(dev_cache_intersections);
hipFree(dev_triangles);
hipFree(dev_idxOfEachMesh);
hipFree(dev_endIdxOfEachMesh);
checkCUDAError("pathtraceFree");
}
__host__ __device__
glm::vec3 squareToDiskUniform(const glm::vec2& sample)
{
float r = sqrt(sample.x);
float theta = 2 * PI * sample.y;
float x = r * cos(theta);
float y = r * sin(theta);
return glm::vec3(x, y, 0.f);
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
// TODO: implement antialiasing by jittering the ray
#if ANTIALIASING == 1 && CACHE_BOUNCE == 0
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f + u01(rng) - 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f + u01(rng) - 0.5f)
);
#else
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
#endif //ANTIALIASING
#if DEPTH_OF_FIELD == 1
float discRadius = 1.f;
float focalDistance = 7.f; // distance between the projection point and the plane where everything is in perfect focus
glm::vec2 sample = glm::vec2(u01(rng), u01(rng));
glm::vec3 pLens = discRadius * squareToDiskUniform(sample);
float ft = focalDistance / glm::abs(segment.ray.direction.z);
glm::vec3 pFocus = segment.ray.origin + ft * segment.ray.direction;
segment.ray.origin += glm::vec3(pLens.x, pLens.y, 0.f);
segment.ray.direction = glm::normalize(pFocus - segment.ray.origin);
#endif //DEPTH_OF_FEILD
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, Triangle *triangles
, int geoms_size
, ShadeableIntersection * intersections
, int* idxOfEachMesh
, int* endIdxOfEachMesh
, int iter
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside, u01(rng));
}
else if (geom.type == MESH) {
int startIdx = idxOfEachMesh[geom.meshIdx];
int endIdx = endIdxOfEachMesh[geom.meshIdx];
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
t = meshIntersectionTest(geom, triangles, pathSegment.ray, tmp_intersect, tmp_normal, outside, startIdx, endIdx, u01(rng));
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
pathSegments[idx].color *= u01(rng); // apply some noise because why not
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
} else {
pathSegments[idx].color = glm::vec3(0.0f);
}
}
}
__global__ void shadeRealMaterial (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < num_paths) {
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
if(material.emittance > 0.f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = -1;
}
else
{
glm::vec3 intersectPoint = getPointOnRay(pathSegments[idx].ray, intersection.t);
glm::vec3 normal = intersection.surfaceNormal;
scatterRay(pathSegments[idx], intersectPoint, normal, material, rng);
}
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = -1;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
//timer().startGpuTimer();
#if GPU_TIMER == 1
hipEventRecord(start);
#endif // GPU_TIMER
#if CACHE_BOUNCE == 1
if(iter == 1)
{
hipLaunchKernelGGL(( generateRayFromCamera) , dim3(blocksPerGrid2d), dim3(blockSize2d) , 0, 0, cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
hipMemcpy(dev_cache_paths, dev_paths, pixelcount * sizeof(PathSegment), hipMemcpyDeviceToDevice);
}
else
{
hipMemcpy(dev_paths, dev_cache_paths, pixelcount * sizeof(PathSegment), hipMemcpyDeviceToDevice);
}
#else
hipLaunchKernelGGL(( generateRayFromCamera) , dim3(blocksPerGrid2d), dim3(blockSize2d) , 0, 0, cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
#endif //CACHE_BOUNCE
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
#if CACHE_BOUNCE == 1
if(depth == 0)
{
if(iter == 1)
{
hipLaunchKernelGGL(( computeIntersections) , dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
depth, num_paths, dev_paths, dev_geoms, dev_triangles
, hst_scene->geoms.size(), dev_intersections
, dev_idxOfEachMesh, dev_endIdxOfEachMesh, iter);
checkCUDAError("trace one bounce");
//hipDeviceSynchronize();
depth++;
hipMemcpy(dev_cache_intersections, dev_intersections, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
hipDeviceSynchronize();
}
else
{
hipMemcpy(dev_intersections, dev_cache_intersections, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
depth++;
hipDeviceSynchronize();
}
}
else
{
hipLaunchKernelGGL(( computeIntersections) , dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
depth, num_paths, dev_paths, dev_geoms, dev_triangles
, hst_scene->geoms.size(), dev_intersections
, dev_idxOfEachMesh, dev_endIdxOfEachMesh, iter);
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
depth++;
}
#else
hipLaunchKernelGGL(( computeIntersections) , dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
depth, num_paths, dev_paths, dev_geoms, dev_triangles
, hst_scene->geoms.size(), dev_intersections
, dev_idxOfEachMesh, dev_endIdxOfEachMesh, iter);
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
depth++;
#endif //CACHE_BOUNCE
#if MATERIAL_SORT == 1
// Sort the rays/path segments
thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, material_sort());
#endif //MATERIAL_SORT
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
hipLaunchKernelGGL(( shadeRealMaterial), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials
);
// Stream compact away all of the terminated paths
//dev_path_end = thrust::remove_if(thrust::device, dev_paths, dev_paths + num_paths, isTerminated());
dev_path_end = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, isContinuing());
num_paths = dev_path_end - dev_paths;
if(num_paths <= 0 || depth >= traceDepth)
{
iterationComplete = true;
}
}
//timer().endGpuTimer();
#if GPU_TIMER == 1
hipEventRecord(stop);
hipEventSynchronize(stop);
float t;
hipEventElapsedTime(&t, start, stop);
totalTime += t;
if (timerStart && iter > 50) {
std::cout << " time per iteration is: " << totalTime / iter << " ms" <<std::endl;
timerStart = false;
}
#endif // GPU_TIMER
//printElapsedTime(timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)");
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
hipLaunchKernelGGL(( finalGather), dim3(numBlocksPixels), dim3(blockSize1d), 0, 0, pixelcount, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
| e34e718214f3d8679e201757e1427145cb4ff0d4.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
using utilTimer::PerformanceTimer;
#define ERRORCHECK 1
#define CACHE_BOUNCE 1
#define MATERIAL_SORT 1
#define DEPTH_OF_FIELD 0
#define ANTIALIASING 0
#define GPU_TIMER 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
static PathSegment * dev_cache_paths = NULL;
static ShadeableIntersection * dev_cache_intersections = NULL;
static Triangle* dev_triangles = NULL;
static int* dev_idxOfEachMesh = NULL;
static int* dev_endIdxOfEachMesh = NULL;
static int mesh_size = 0;
static int triangle_size = 0;
static std::vector<int> indexOffset;
cudaEvent_t start, stop;
float totalTime = 0.f;
bool timerStart = true;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
cudaMalloc(&dev_cache_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_cache_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
mesh_size = scene->meshes.size();
//cout << mesh_size << endl;
triangle_size = scene->totalTriangles;
cudaMalloc(&dev_triangles, triangle_size * sizeof(Triangle));
for (int i = 0; i < mesh_size; i++) {
int triangle_size_per = scene->meshes[i].size();
int offset = scene->idxOfEachMesh[i];
cudaMemcpy(dev_triangles + offset, scene->meshes[i].data(), triangle_size_per * sizeof(Triangle), cudaMemcpyHostToDevice);
//cout << triangle_size_per << endl;
}
cudaMalloc(&dev_idxOfEachMesh, mesh_size * sizeof(int));
cudaMemcpy(dev_idxOfEachMesh, scene->idxOfEachMesh.data(), mesh_size * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&dev_endIdxOfEachMesh, mesh_size * sizeof(int));
cudaMemcpy(dev_endIdxOfEachMesh, scene->endIdxOfEachMesh.data(), mesh_size * sizeof(int), cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
// TODO: clean up any extra device memory you created
cudaFree(dev_cache_paths);
cudaFree(dev_cache_intersections);
cudaFree(dev_triangles);
cudaFree(dev_idxOfEachMesh);
cudaFree(dev_endIdxOfEachMesh);
checkCUDAError("pathtraceFree");
}
__host__ __device__
glm::vec3 squareToDiskUniform(const glm::vec2& sample)
{
float r = sqrt(sample.x);
float theta = 2 * PI * sample.y;
float x = r * cos(theta);
float y = r * sin(theta);
return glm::vec3(x, y, 0.f);
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
// TODO: implement antialiasing by jittering the ray
#if ANTIALIASING == 1 && CACHE_BOUNCE == 0
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f + u01(rng) - 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f + u01(rng) - 0.5f)
);
#else
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
#endif //ANTIALIASING
#if DEPTH_OF_FIELD == 1
float discRadius = 1.f;
float focalDistance = 7.f; // distance between the projection point and the plane where everything is in perfect focus
glm::vec2 sample = glm::vec2(u01(rng), u01(rng));
glm::vec3 pLens = discRadius * squareToDiskUniform(sample);
float ft = focalDistance / glm::abs(segment.ray.direction.z);
glm::vec3 pFocus = segment.ray.origin + ft * segment.ray.direction;
segment.ray.origin += glm::vec3(pLens.x, pLens.y, 0.f);
segment.ray.direction = glm::normalize(pFocus - segment.ray.origin);
#endif //DEPTH_OF_FEILD
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, Triangle *triangles
, int geoms_size
, ShadeableIntersection * intersections
, int* idxOfEachMesh
, int* endIdxOfEachMesh
, int iter
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside, u01(rng));
}
else if (geom.type == MESH) {
int startIdx = idxOfEachMesh[geom.meshIdx];
int endIdx = endIdxOfEachMesh[geom.meshIdx];
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
t = meshIntersectionTest(geom, triangles, pathSegment.ray, tmp_intersect, tmp_normal, outside, startIdx, endIdx, u01(rng));
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
pathSegments[idx].color *= u01(rng); // apply some noise because why not
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
} else {
pathSegments[idx].color = glm::vec3(0.0f);
}
}
}
__global__ void shadeRealMaterial (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < num_paths) {
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
if(material.emittance > 0.f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = -1;
}
else
{
glm::vec3 intersectPoint = getPointOnRay(pathSegments[idx].ray, intersection.t);
glm::vec3 normal = intersection.surfaceNormal;
scatterRay(pathSegments[idx], intersectPoint, normal, material, rng);
}
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = -1;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
//timer().startGpuTimer();
#if GPU_TIMER == 1
cudaEventRecord(start);
#endif // GPU_TIMER
#if CACHE_BOUNCE == 1
if(iter == 1)
{
generateRayFromCamera <<<blocksPerGrid2d, blockSize2d >>>(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
cudaMemcpy(dev_cache_paths, dev_paths, pixelcount * sizeof(PathSegment), cudaMemcpyDeviceToDevice);
}
else
{
cudaMemcpy(dev_paths, dev_cache_paths, pixelcount * sizeof(PathSegment), cudaMemcpyDeviceToDevice);
}
#else
generateRayFromCamera <<<blocksPerGrid2d, blockSize2d >>>(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
#endif //CACHE_BOUNCE
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
#if CACHE_BOUNCE == 1
if(depth == 0)
{
if(iter == 1)
{
computeIntersections <<<numblocksPathSegmentTracing, blockSize1d>>> (
depth, num_paths, dev_paths, dev_geoms, dev_triangles
, hst_scene->geoms.size(), dev_intersections
, dev_idxOfEachMesh, dev_endIdxOfEachMesh, iter);
checkCUDAError("trace one bounce");
//cudaDeviceSynchronize();
depth++;
cudaMemcpy(dev_cache_intersections, dev_intersections, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
}
else
{
cudaMemcpy(dev_intersections, dev_cache_intersections, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
depth++;
cudaDeviceSynchronize();
}
}
else
{
computeIntersections <<<numblocksPathSegmentTracing, blockSize1d>>> (
depth, num_paths, dev_paths, dev_geoms, dev_triangles
, hst_scene->geoms.size(), dev_intersections
, dev_idxOfEachMesh, dev_endIdxOfEachMesh, iter);
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
depth++;
}
#else
computeIntersections <<<numblocksPathSegmentTracing, blockSize1d>>> (
depth, num_paths, dev_paths, dev_geoms, dev_triangles
, hst_scene->geoms.size(), dev_intersections
, dev_idxOfEachMesh, dev_endIdxOfEachMesh, iter);
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
depth++;
#endif //CACHE_BOUNCE
#if MATERIAL_SORT == 1
// Sort the rays/path segments
thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, material_sort());
#endif //MATERIAL_SORT
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
shadeRealMaterial<<<numblocksPathSegmentTracing, blockSize1d>>> (
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials
);
// Stream compact away all of the terminated paths
//dev_path_end = thrust::remove_if(thrust::device, dev_paths, dev_paths + num_paths, isTerminated());
dev_path_end = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, isContinuing());
num_paths = dev_path_end - dev_paths;
if(num_paths <= 0 || depth >= traceDepth)
{
iterationComplete = true;
}
}
//timer().endGpuTimer();
#if GPU_TIMER == 1
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float t;
cudaEventElapsedTime(&t, start, stop);
totalTime += t;
if (timerStart && iter > 50) {
std::cout << " time per iteration is: " << totalTime / iter << " ms" <<std::endl;
timerStart = false;
}
#endif // GPU_TIMER
//printElapsedTime(timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)");
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather<<<numBlocksPixels, blockSize1d>>>(pixelcount, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
|
98b4e116b4ddf8349e136a85cf3c113e32ea693f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda_runtime.h>
#include<math.h>
#define SIZE 1024
#define TILE_WIDTH 16
float h_M[SIZE*SIZE],h_N[SIZE*SIZE],h_P[SIZE*SIZE];
__global__ void multiplication_kernel(float *d_M,float *d_N,float *d_P)
{
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int col = TILE_WIDTH * bx + tx;
int row = TILE_WIDTH * by + ty;
float prod_value = 0;
int m,k;
for(m=0;m<SIZE/TILE_WIDTH;m++)
{
ds_M[ty][tx] = d_M[row*SIZE+(m*TILE_WIDTH+tx)];
ds_N[ty][tx] = d_N[(m*TILE_WIDTH+ty)*SIZE+col];
__syncthreads();
for(k=0;k<TILE_WIDTH;k++)
prod_value+=ds_M[ty][k]*ds_N[k][tx];
__syncthreads();
}
d_P[row*SIZE+col] = prod_value;
}
void matrix_multiplication(float *d_M,float *d_N,float *d_P)
{
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
dim3 dimGrid(SIZE/TILE_WIDTH,SIZE/TILE_WIDTH,1);
hipLaunchKernelGGL(( multiplication_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_M,d_N,d_P);
}
void display_matrix(float mat[])
{
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
printf("%f ",mat[i*SIZE+j]);
printf("\n");
}
}
int main()
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
if(!deviceCount){
fprintf(stderr,"No devices supporting cuda\n");
exit(EXIT_FAILURE);
}
int deviceId = 0;
hipSetDevice(deviceId);
const int ARRAY_BYTES = SIZE*SIZE*sizeof(float);
float *d_M,*d_N,*d_P;
hipMalloc((void**)&d_M,ARRAY_BYTES);
hipMalloc((void**)&d_N,ARRAY_BYTES);
hipMalloc((void**)&d_P,ARRAY_BYTES);
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
h_M[i*SIZE+j] = rand()%101;
h_N[i*SIZE+j] = rand()%101;
}
}
hipMemcpy(d_M,h_M,ARRAY_BYTES,hipMemcpyHostToDevice);
hipMemcpy(d_N,h_N,ARRAY_BYTES,hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
matrix_multiplication(d_M,d_N,d_P);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,start,stop);
hipMemcpy(h_P,d_P,ARRAY_BYTES,hipMemcpyDeviceToHost);
/*
printf("M is \n");
display_matrix(h_M);
printf("N is \n");
display_matrix(h_N);
printf("Product of M and N is \n");
display_matrix(h_P);
*/
printf("Elapsed time is %f\n",elapsedTime);
hipFree(d_M);
hipFree(d_N);
hipFree(d_P);
return 0;
} | 98b4e116b4ddf8349e136a85cf3c113e32ea693f.cu | #include<stdio.h>
#include<cuda_runtime.h>
#include<math.h>
#define SIZE 1024
#define TILE_WIDTH 16
float h_M[SIZE*SIZE],h_N[SIZE*SIZE],h_P[SIZE*SIZE];
__global__ void multiplication_kernel(float *d_M,float *d_N,float *d_P)
{
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int col = TILE_WIDTH * bx + tx;
int row = TILE_WIDTH * by + ty;
float prod_value = 0;
int m,k;
for(m=0;m<SIZE/TILE_WIDTH;m++)
{
ds_M[ty][tx] = d_M[row*SIZE+(m*TILE_WIDTH+tx)];
ds_N[ty][tx] = d_N[(m*TILE_WIDTH+ty)*SIZE+col];
__syncthreads();
for(k=0;k<TILE_WIDTH;k++)
prod_value+=ds_M[ty][k]*ds_N[k][tx];
__syncthreads();
}
d_P[row*SIZE+col] = prod_value;
}
void matrix_multiplication(float *d_M,float *d_N,float *d_P)
{
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
dim3 dimGrid(SIZE/TILE_WIDTH,SIZE/TILE_WIDTH,1);
multiplication_kernel<<<dimGrid,dimBlock>>>(d_M,d_N,d_P);
}
void display_matrix(float mat[])
{
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
printf("%f ",mat[i*SIZE+j]);
printf("\n");
}
}
int main()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if(!deviceCount){
fprintf(stderr,"No devices supporting cuda\n");
exit(EXIT_FAILURE);
}
int deviceId = 0;
cudaSetDevice(deviceId);
const int ARRAY_BYTES = SIZE*SIZE*sizeof(float);
float *d_M,*d_N,*d_P;
cudaMalloc((void**)&d_M,ARRAY_BYTES);
cudaMalloc((void**)&d_N,ARRAY_BYTES);
cudaMalloc((void**)&d_P,ARRAY_BYTES);
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
h_M[i*SIZE+j] = rand()%101;
h_N[i*SIZE+j] = rand()%101;
}
}
cudaMemcpy(d_M,h_M,ARRAY_BYTES,cudaMemcpyHostToDevice);
cudaMemcpy(d_N,h_N,ARRAY_BYTES,cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
matrix_multiplication(d_M,d_N,d_P);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
cudaMemcpy(h_P,d_P,ARRAY_BYTES,cudaMemcpyDeviceToHost);
/*
printf("M is \n");
display_matrix(h_M);
printf("N is \n");
display_matrix(h_N);
printf("Product of M and N is \n");
display_matrix(h_P);
*/
printf("Elapsed time is %f\n",elapsedTime);
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
return 0;
} |
b329ca39c2789c134435d2708b09c4d7e6aba8fc.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include "cpu.h"
namespace StreamCompaction {
namespace CPU {
/**
* CPU scan (prefix sum).
*/
void scan(int n, int *odata, const int *idata) {
// TODO
//cuda event init
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float milliseconds = 0;
hipEventRecord(start);
odata[0] = 0;
for (int i = 1; i<n; i++)
{
odata[i] = odata[i - 1] + idata[i - 1];
}
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << "cpu method: " << milliseconds << "ms" << std::endl;
}
/**
* CPU stream compaction without using the scan function.
*
* @returns the number of elements remaining after compaction.
*/
int compactWithoutScan(int n, int *odata, const int *idata) {
// TODO
//iterate over the indata
int cur_index = 0;
for (int i = 0; i < n; i++)
{
if (idata[i]!=0)
{
odata[cur_index++] = idata[i];
}
}
return cur_index;
}
/**
* CPU stream compaction using scan and scatter, like the parallel version.
*
* @returns the number of elements remaining after compaction.
*/
int compactWithScan(int n, int *odata, const int *idata) {
// TODO
int* idata_map = new int[n];
int* scan_sum = new int[n];
for (int i = 0; i<n; i++)
{
idata_map[i] = (idata[i] == 0) ? 0 : 1;
}
scan(n, scan_sum, idata_map);
int num_remain = scatter(n, odata, scan_sum, idata_map,idata);
return num_remain;
}
int scatter(int n, int *odata, const int *scan_sum, const int *idata_map, const int *idata)
{
int cur_num = 0;
for (int i = 0; i<n; i++)
{
if (idata_map[i] == 1)
{
odata[scan_sum[i]] = idata[i];
cur_num++;
}
}
return cur_num;
}
}
}
| b329ca39c2789c134435d2708b09c4d7e6aba8fc.cu | #include <cstdio>
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include "cpu.h"
namespace StreamCompaction {
namespace CPU {
/**
* CPU scan (prefix sum).
*/
void scan(int n, int *odata, const int *idata) {
// TODO
//cuda event init
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
cudaEventRecord(start);
odata[0] = 0;
for (int i = 1; i<n; i++)
{
odata[i] = odata[i - 1] + idata[i - 1];
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << "cpu method: " << milliseconds << "ms" << std::endl;
}
/**
* CPU stream compaction without using the scan function.
*
* @returns the number of elements remaining after compaction.
*/
int compactWithoutScan(int n, int *odata, const int *idata) {
// TODO
//iterate over the indata
int cur_index = 0;
for (int i = 0; i < n; i++)
{
if (idata[i]!=0)
{
odata[cur_index++] = idata[i];
}
}
return cur_index;
}
/**
* CPU stream compaction using scan and scatter, like the parallel version.
*
* @returns the number of elements remaining after compaction.
*/
int compactWithScan(int n, int *odata, const int *idata) {
// TODO
int* idata_map = new int[n];
int* scan_sum = new int[n];
for (int i = 0; i<n; i++)
{
idata_map[i] = (idata[i] == 0) ? 0 : 1;
}
scan(n, scan_sum, idata_map);
int num_remain = scatter(n, odata, scan_sum, idata_map,idata);
return num_remain;
}
int scatter(int n, int *odata, const int *scan_sum, const int *idata_map, const int *idata)
{
int cur_num = 0;
for (int i = 0; i<n; i++)
{
if (idata_map[i] == 1)
{
odata[scan_sum[i]] = idata[i];
cur_num++;
}
}
return cur_num;
}
}
}
|
3bccd92d19ce38a516172a879b2f4cd0cad1cd0f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
#include "device_launch_parameters.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
if (idata[index] != 0) {
bools[index] = 1;
}
else {
bools[index] = 0;
}
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
if (bools[index] == 1) {
odata[indices[index]] = idata[index];
}
}
}
}
| 3bccd92d19ce38a516172a879b2f4cd0cad1cd0f.cu | #include "common.h"
#include "device_launch_parameters.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
if (idata[index] != 0) {
bools[index] = 1;
}
else {
bools[index] = 0;
}
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
if (bools[index] == 1) {
odata[indices[index]] = idata[index];
}
}
}
}
|
590893fced22cd1c7919917a47ba574d47914216.hip | // !!! This is a file automatically generated by hipify!!!
/*
Fast WalshHadamard transform algorithm
Copyright (c) 2013, Dmitry Protopopov
http://protopopov.ru
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "fwhtcuda.cuh"
| 590893fced22cd1c7919917a47ba574d47914216.cu | /*
Fast Walsh–Hadamard transform algorithm
Copyright (c) 2013, Dmitry Protopopov
http://protopopov.ru
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include "fwhtcuda.cuh"
|
eba3e24047498f3b8f9d17ac2de34f9d8ffceb6e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/conv_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/cast_kernel.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/scatter.cu.h"
#include "paddle/phi/kernels/funcs/sparse/scatter.cu.h"
#include "paddle/phi/kernels/sparse/gpu/conv.cu.h"
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
#include "paddle/phi/kernels/sparse/gpu/gather_gemm_scatter.h"
#endif
#include "glog/logging.h"
namespace phi {
namespace sparse {
#define GATHER_GEMM_SCATTER(arch, input_type, x_nnz, kernel) \
({ \
const input_type* kernel_ptr = kernel.data<input_type>(); \
const input_type* x_nnz_ptr = x_nnz.data<input_type>(); \
for (int i = 0; i < kernel_size; i++) { \
if (h_counter_ptr[i] <= 0) { \
continue; \
} \
const int M = h_counter_ptr[i]; \
const int K = in_channels; \
const int N = out_channels; \
const input_type* tmp_kernel_ptr = kernel_ptr + i * K * N; \
const IntT* gather_indices = rulebook_ptr + h_offsets_ptr[i]; \
const IntT* scatter_indices = \
rulebook_ptr + rulebook_len + h_offsets_ptr[i]; \
const size_t key = autotune::GenKey(M / features_num_range, N, K); \
GatherGemmScatterDriver<arch, false, false>( \
dev_ctx, \
key, \
x_nnz_ptr, \
tmp_kernel_ptr, \
out_values_ptr, \
out_values_ptr, \
M, \
N, \
K, \
gather_indices, \
static_cast<const IntT*>(nullptr), \
scatter_indices, \
static_cast<T>(1.0), \
static_cast<T>(1.0), \
nullptr); \
} \
})
template <typename T, typename IntT>
void Conv3dCooGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const bool subm,
const std::string& key,
SparseCooTensor* out,
DenseTensor* rulebook,
DenseTensor* counter) {
// update padding and dilation
// Currently, only support x.layout is NDHWC, groups = 1
// if x.layout != NDHWC then transpose(x), transpose(weight)
const auto& x_dims = x.dims();
const auto& kernel_dims = kernel.dims();
const bool is2D = x_dims.size() == 4 ? true : false;
int kernel_size = is2D ? kernel_dims[0] * kernel_dims[1]
: kernel_dims[0] * kernel_dims[1] * kernel_dims[2];
int rank = is2D ? 4 : 5;
std::vector<int> out_dims_vec(rank, 1);
DDim out_dims = make_ddim(out_dims_vec);
std::vector<int> kernel_sizes(kernel_dims.size());
for (int i = 0; i < kernel_dims.size(); i++) {
kernel_sizes[i] = kernel_dims[i];
}
std::vector<int> subm_paddings(paddings), subm_strides(strides);
if (subm) {
// the out shape of subm_conv is same as input shape
// reset the padding=kernel_size/2 and strides=1
phi::funcs::sparse::ResetSubmKernelSizeAndStrides(
kernel.dims(), &subm_paddings, &subm_strides);
}
phi::funcs::sparse::GetOutShape(
x_dims, kernel_sizes, subm_paddings, dilations, subm_strides, &out_dims);
const int in_channels = is2D ? kernel_dims[2] : kernel_dims[3];
const int out_channels = is2D ? kernel_dims[3] : kernel_dims[4];
DenseTensor h_counter, h_offsets;
h_counter.Resize({kernel_size});
h_offsets.Resize({kernel_size + 1});
int* h_counter_ptr = dev_ctx.template HostAlloc<int>(&h_counter);
int* h_offsets_ptr = dev_ctx.template HostAlloc<int>(&h_offsets);
// Second algorithm:
// https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf
// 1. product rulebook
DenseTensor counter_per_kernel = phi::Empty<int>(dev_ctx, {kernel_size});
DenseTensor offsets_per_kernel = phi::Empty<int>(dev_ctx, {kernel_size});
DenseTensor out_index = phi::Empty<int>(dev_ctx, {1});
DenseTensor unique_value = phi::Empty<int>(dev_ctx, {1});
if (is2D) {
VLOG(6) << "call SubmConv2D or Conv2D " << subm << " and the key is "
<< key;
} else {
VLOG(6) << "call SubmConv3D or Conv3D " << subm << " and the key is "
<< key;
}
int rulebook_len = 0;
const IntT* rulebook_ptr = nullptr;
bool need_product_rulebook = true;
if (subm && !key.empty()) {
rulebook_ptr = phi::funcs::sparse::PrepareSubm<T, IntT, GPUContext>(
dev_ctx,
x,
key,
out_dims,
out,
h_counter.data<int>(),
h_offsets.data<int>(),
&rulebook_len,
&need_product_rulebook);
}
if (need_product_rulebook) {
DenseTensor tmp_rulebook;
rulebook_len = ProductRuleBook<T, GPUContext, IntT>(dev_ctx,
x,
kernel_sizes,
subm_paddings,
dilations,
subm_strides,
out_dims,
subm,
&tmp_rulebook,
&counter_per_kernel,
&offsets_per_kernel,
&out_index,
&unique_value,
out,
h_counter_ptr,
h_offsets_ptr);
rulebook_ptr = tmp_rulebook.data<IntT>();
phi::funcs::sparse::SaveToTable(
dev_ctx, x, key, tmp_rulebook, h_counter, out, rulebook, counter);
}
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
bool mixed_precision = dev_ctx.GetComputeCapability() >= 75 &&
dev_ctx.GetComputeCapability() < 80 &&
std::is_same<T, float>::value;
bool cutlass = true;
if (dev_ctx.GetComputeCapability() < 75) cutlass = false;
if (in_channels % 8 != 0 || out_channels % 8 != 0) {
if (std::is_same<T, phi::dtype::float16>::value) cutlass = false;
if (mixed_precision) cutlass = false;
}
if (in_channels % 4 != 0 || out_channels % 4 != 0) {
if (std::is_same<T, float>::value) cutlass = false;
}
if (std::is_same<T, double>::value) cutlass = false;
if (!std::is_same<IntT, int32_t>::value) cutlass = false;
if (cutlass) {
auto* out_values = out->mutable_non_zero_elements();
T* out_values_ptr = out_values->data<T>();
phi::funcs::SetConstant<GPUContext, T> set_zero;
set_zero(dev_ctx, out_values, static_cast<T>(0.0f));
if (mixed_precision) {
DenseTensor kernel_fp16 =
phi::Cast<T, GPUContext>(dev_ctx, kernel, DataType::FLOAT16);
DenseTensor x_nnz_fp16 = phi::Cast<T, GPUContext>(
dev_ctx, x.non_zero_elements(), DataType::FLOAT16);
GATHER_GEMM_SCATTER(75, phi::dtype::float16, x_nnz_fp16, kernel_fp16);
} else {
if (dev_ctx.GetComputeCapability() < 80)
GATHER_GEMM_SCATTER(75, T, x.non_zero_elements(), kernel);
else
GATHER_GEMM_SCATTER(80, T, x.non_zero_elements(), kernel);
}
} else {
#endif
if (subm) {
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, rulebook_len, 1);
unique_value.ResizeAndAllocate(
{static_cast<int>(out->nnz() * kernel_size)});
out_index.ResizeAndAllocate({static_cast<int>(rulebook_len)});
int* out_index_ptr = out_index.data<int>();
int* unique_value_ptr = unique_value.data<int>();
phi::backends::gpu::GpuMemsetAsync(
out_index_ptr, 0, sizeof(int) * rulebook_len, dev_ctx.stream());
hipLaunchKernelGGL(( GroupIndexs), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), rulebook_len,
kernel_size,
rulebook_ptr + rulebook_len,
out_index_ptr,
unique_value_ptr);
}
// 2. gather
phi::DenseTensor in_features =
phi::Empty<T>(dev_ctx, {rulebook_len, in_channels});
phi::DenseTensor out_features =
phi::Empty<T>(dev_ctx, {rulebook_len, out_channels});
T* in_features_ptr = in_features.data<T>();
T* out_features_ptr = out_features.data<T>();
phi::funcs::SetConstant<GPUContext, T> set_zero;
set_zero(dev_ctx, &out_features, static_cast<T>(0.0f));
Gather<T, IntT>(dev_ctx,
x.values().data<T>(),
rulebook_ptr,
rulebook_len,
in_channels,
in_features_ptr);
// 3. call gemm for every werght
auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx);
auto* out_values = out->mutable_values();
T* out_values_ptr = out_values->data<T>();
set_zero(dev_ctx, out_values, static_cast<T>(0.0f));
const T* kernel_ptr = kernel.data<T>();
for (int i = 0; i < kernel_size; i++) {
if (h_counter_ptr[i] <= 0) {
continue;
}
// call gemm: (n, in_channels) * (in_channels, out_channels)
const int M = h_counter_ptr[i];
const int K = in_channels;
const int N = out_channels;
T* tmp_in_ptr = in_features_ptr + h_offsets_ptr[i] * in_channels;
const T* tmp_kernel_ptr = kernel_ptr + i * K * N;
T* tmp_out_ptr = out_features_ptr + h_offsets_ptr[i] * out_channels;
blas.GEMM(CblasNoTrans,
CblasNoTrans,
M,
N,
K,
static_cast<T>(1),
tmp_in_ptr,
tmp_kernel_ptr,
static_cast<T>(0),
tmp_out_ptr);
}
// 4. scatter
phi::funcs::sparse::ScatterV2<T>(dev_ctx,
out_features_ptr,
out_index.data<int>(),
unique_value.data<int>(),
out->nnz(),
kernel_size,
out_channels,
1,
out_values_ptr);
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
}
#endif
}
/**
* x: the input SparseCooTensor, shape is (N, D, H, W, C)
* kernel: the weight data, shape is (D, H, W, C, OC)
* out: the output SparseCooTensor, shape is (N, D, H, W, OC)
* rulebook: return rulebook if key is not vailed else return nullptr
* counter: return counter if key is not vailed else return nullptr
**/
template <typename T, typename Context>
void Conv3dCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const bool subm,
const std::string& key,
SparseCooTensor* out,
DenseTensor* rulebook,
DenseTensor* counter) {
PD_VISIT_BASE_INTEGRAL_TYPES(x.indices().dtype(), "Conv3dCooGPUKernel", ([&] {
Conv3dCooGPUKernel<T, data_t>(dev_ctx,
x,
kernel,
paddings,
dilations,
strides,
groups,
subm,
key,
out,
rulebook,
counter);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(conv3d_coo,
GPU,
ALL_LAYOUT,
phi::sparse::Conv3dCooKernel,
float,
double,
phi::dtype::float16) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
kernel->OutputAt(0).SetDataType(paddle::DataType::UNDEFINED);
kernel->OutputAt(1).SetDataType(paddle::DataType::INT32);
kernel->OutputAt(2).SetDataType(paddle::DataType::INT32);
}
| eba3e24047498f3b8f9d17ac2de34f9d8ffceb6e.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/conv_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/cast_kernel.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/scatter.cu.h"
#include "paddle/phi/kernels/funcs/sparse/scatter.cu.h"
#include "paddle/phi/kernels/sparse/gpu/conv.cu.h"
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
#include "paddle/phi/kernels/sparse/gpu/gather_gemm_scatter.h"
#endif
#include "glog/logging.h"
namespace phi {
namespace sparse {
#define GATHER_GEMM_SCATTER(arch, input_type, x_nnz, kernel) \
({ \
const input_type* kernel_ptr = kernel.data<input_type>(); \
const input_type* x_nnz_ptr = x_nnz.data<input_type>(); \
for (int i = 0; i < kernel_size; i++) { \
if (h_counter_ptr[i] <= 0) { \
continue; \
} \
const int M = h_counter_ptr[i]; \
const int K = in_channels; \
const int N = out_channels; \
const input_type* tmp_kernel_ptr = kernel_ptr + i * K * N; \
const IntT* gather_indices = rulebook_ptr + h_offsets_ptr[i]; \
const IntT* scatter_indices = \
rulebook_ptr + rulebook_len + h_offsets_ptr[i]; \
const size_t key = autotune::GenKey(M / features_num_range, N, K); \
GatherGemmScatterDriver<arch, false, false>( \
dev_ctx, \
key, \
x_nnz_ptr, \
tmp_kernel_ptr, \
out_values_ptr, \
out_values_ptr, \
M, \
N, \
K, \
gather_indices, \
static_cast<const IntT*>(nullptr), \
scatter_indices, \
static_cast<T>(1.0), \
static_cast<T>(1.0), \
nullptr); \
} \
})
template <typename T, typename IntT>
void Conv3dCooGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const bool subm,
const std::string& key,
SparseCooTensor* out,
DenseTensor* rulebook,
DenseTensor* counter) {
// update padding and dilation
// Currently, only support x.layout is NDHWC, groups = 1
// if x.layout != NDHWC then transpose(x), transpose(weight)
const auto& x_dims = x.dims();
const auto& kernel_dims = kernel.dims();
const bool is2D = x_dims.size() == 4 ? true : false;
int kernel_size = is2D ? kernel_dims[0] * kernel_dims[1]
: kernel_dims[0] * kernel_dims[1] * kernel_dims[2];
int rank = is2D ? 4 : 5;
std::vector<int> out_dims_vec(rank, 1);
DDim out_dims = make_ddim(out_dims_vec);
std::vector<int> kernel_sizes(kernel_dims.size());
for (int i = 0; i < kernel_dims.size(); i++) {
kernel_sizes[i] = kernel_dims[i];
}
std::vector<int> subm_paddings(paddings), subm_strides(strides);
if (subm) {
// the out shape of subm_conv is same as input shape
// reset the padding=kernel_size/2 and strides=1
phi::funcs::sparse::ResetSubmKernelSizeAndStrides(
kernel.dims(), &subm_paddings, &subm_strides);
}
phi::funcs::sparse::GetOutShape(
x_dims, kernel_sizes, subm_paddings, dilations, subm_strides, &out_dims);
const int in_channels = is2D ? kernel_dims[2] : kernel_dims[3];
const int out_channels = is2D ? kernel_dims[3] : kernel_dims[4];
DenseTensor h_counter, h_offsets;
h_counter.Resize({kernel_size});
h_offsets.Resize({kernel_size + 1});
int* h_counter_ptr = dev_ctx.template HostAlloc<int>(&h_counter);
int* h_offsets_ptr = dev_ctx.template HostAlloc<int>(&h_offsets);
// Second algorithm:
// https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf
// 1. product rulebook
DenseTensor counter_per_kernel = phi::Empty<int>(dev_ctx, {kernel_size});
DenseTensor offsets_per_kernel = phi::Empty<int>(dev_ctx, {kernel_size});
DenseTensor out_index = phi::Empty<int>(dev_ctx, {1});
DenseTensor unique_value = phi::Empty<int>(dev_ctx, {1});
if (is2D) {
VLOG(6) << "call SubmConv2D or Conv2D " << subm << " and the key is "
<< key;
} else {
VLOG(6) << "call SubmConv3D or Conv3D " << subm << " and the key is "
<< key;
}
int rulebook_len = 0;
const IntT* rulebook_ptr = nullptr;
bool need_product_rulebook = true;
if (subm && !key.empty()) {
rulebook_ptr = phi::funcs::sparse::PrepareSubm<T, IntT, GPUContext>(
dev_ctx,
x,
key,
out_dims,
out,
h_counter.data<int>(),
h_offsets.data<int>(),
&rulebook_len,
&need_product_rulebook);
}
if (need_product_rulebook) {
DenseTensor tmp_rulebook;
rulebook_len = ProductRuleBook<T, GPUContext, IntT>(dev_ctx,
x,
kernel_sizes,
subm_paddings,
dilations,
subm_strides,
out_dims,
subm,
&tmp_rulebook,
&counter_per_kernel,
&offsets_per_kernel,
&out_index,
&unique_value,
out,
h_counter_ptr,
h_offsets_ptr);
rulebook_ptr = tmp_rulebook.data<IntT>();
phi::funcs::sparse::SaveToTable(
dev_ctx, x, key, tmp_rulebook, h_counter, out, rulebook, counter);
}
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
bool mixed_precision = dev_ctx.GetComputeCapability() >= 75 &&
dev_ctx.GetComputeCapability() < 80 &&
std::is_same<T, float>::value;
bool cutlass = true;
if (dev_ctx.GetComputeCapability() < 75) cutlass = false;
if (in_channels % 8 != 0 || out_channels % 8 != 0) {
if (std::is_same<T, phi::dtype::float16>::value) cutlass = false;
if (mixed_precision) cutlass = false;
}
if (in_channels % 4 != 0 || out_channels % 4 != 0) {
if (std::is_same<T, float>::value) cutlass = false;
}
if (std::is_same<T, double>::value) cutlass = false;
if (!std::is_same<IntT, int32_t>::value) cutlass = false;
if (cutlass) {
auto* out_values = out->mutable_non_zero_elements();
T* out_values_ptr = out_values->data<T>();
phi::funcs::SetConstant<GPUContext, T> set_zero;
set_zero(dev_ctx, out_values, static_cast<T>(0.0f));
if (mixed_precision) {
DenseTensor kernel_fp16 =
phi::Cast<T, GPUContext>(dev_ctx, kernel, DataType::FLOAT16);
DenseTensor x_nnz_fp16 = phi::Cast<T, GPUContext>(
dev_ctx, x.non_zero_elements(), DataType::FLOAT16);
GATHER_GEMM_SCATTER(75, phi::dtype::float16, x_nnz_fp16, kernel_fp16);
} else {
if (dev_ctx.GetComputeCapability() < 80)
GATHER_GEMM_SCATTER(75, T, x.non_zero_elements(), kernel);
else
GATHER_GEMM_SCATTER(80, T, x.non_zero_elements(), kernel);
}
} else {
#endif
if (subm) {
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, rulebook_len, 1);
unique_value.ResizeAndAllocate(
{static_cast<int>(out->nnz() * kernel_size)});
out_index.ResizeAndAllocate({static_cast<int>(rulebook_len)});
int* out_index_ptr = out_index.data<int>();
int* unique_value_ptr = unique_value.data<int>();
phi::backends::gpu::GpuMemsetAsync(
out_index_ptr, 0, sizeof(int) * rulebook_len, dev_ctx.stream());
GroupIndexs<<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(rulebook_len,
kernel_size,
rulebook_ptr + rulebook_len,
out_index_ptr,
unique_value_ptr);
}
// 2. gather
phi::DenseTensor in_features =
phi::Empty<T>(dev_ctx, {rulebook_len, in_channels});
phi::DenseTensor out_features =
phi::Empty<T>(dev_ctx, {rulebook_len, out_channels});
T* in_features_ptr = in_features.data<T>();
T* out_features_ptr = out_features.data<T>();
phi::funcs::SetConstant<GPUContext, T> set_zero;
set_zero(dev_ctx, &out_features, static_cast<T>(0.0f));
Gather<T, IntT>(dev_ctx,
x.values().data<T>(),
rulebook_ptr,
rulebook_len,
in_channels,
in_features_ptr);
// 3. call gemm for every werght
auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx);
auto* out_values = out->mutable_values();
T* out_values_ptr = out_values->data<T>();
set_zero(dev_ctx, out_values, static_cast<T>(0.0f));
const T* kernel_ptr = kernel.data<T>();
for (int i = 0; i < kernel_size; i++) {
if (h_counter_ptr[i] <= 0) {
continue;
}
// call gemm: (n, in_channels) * (in_channels, out_channels)
const int M = h_counter_ptr[i];
const int K = in_channels;
const int N = out_channels;
T* tmp_in_ptr = in_features_ptr + h_offsets_ptr[i] * in_channels;
const T* tmp_kernel_ptr = kernel_ptr + i * K * N;
T* tmp_out_ptr = out_features_ptr + h_offsets_ptr[i] * out_channels;
blas.GEMM(CblasNoTrans,
CblasNoTrans,
M,
N,
K,
static_cast<T>(1),
tmp_in_ptr,
tmp_kernel_ptr,
static_cast<T>(0),
tmp_out_ptr);
}
// 4. scatter
phi::funcs::sparse::ScatterV2<T>(dev_ctx,
out_features_ptr,
out_index.data<int>(),
unique_value.data<int>(),
out->nnz(),
kernel_size,
out_channels,
1,
out_values_ptr);
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
}
#endif
}
/**
* x: the input SparseCooTensor, shape is (N, D, H, W, C)
* kernel: the weight data, shape is (D, H, W, C, OC)
* out: the output SparseCooTensor, shape is (N, D, H, W, OC)
* rulebook: return rulebook if key is not vailed else return nullptr
* counter: return counter if key is not vailed else return nullptr
**/
template <typename T, typename Context>
void Conv3dCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const bool subm,
const std::string& key,
SparseCooTensor* out,
DenseTensor* rulebook,
DenseTensor* counter) {
PD_VISIT_BASE_INTEGRAL_TYPES(x.indices().dtype(), "Conv3dCooGPUKernel", ([&] {
Conv3dCooGPUKernel<T, data_t>(dev_ctx,
x,
kernel,
paddings,
dilations,
strides,
groups,
subm,
key,
out,
rulebook,
counter);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(conv3d_coo,
GPU,
ALL_LAYOUT,
phi::sparse::Conv3dCooKernel,
float,
double,
phi::dtype::float16) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
kernel->OutputAt(0).SetDataType(paddle::DataType::UNDEFINED);
kernel->OutputAt(1).SetDataType(paddle::DataType::INT32);
kernel->OutputAt(2).SetDataType(paddle::DataType::INT32);
}
|
70000724973f0d8340bd0e94ce25fc4252c81c18.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device.hpp"
#include "texture_binder.hpp"
#include<iostream>
using namespace kfusion::device;
using namespace std;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume initialization
namespace kfusion
{
namespace device
{
__global__ void clear_volume_kernel(TsdfVolume tsdf)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < tsdf.dims.x && y < tsdf.dims.y)
{
ushort2 *beg = tsdf.beg(x, y); //data + x + dims.x * y
ushort2 *end = beg + tsdf.dims.x * tsdf.dims.y * tsdf.dims.z;
for(ushort2* pos = beg; pos != end; pos = tsdf.zstep(pos)) //pos + dims.x * dims.y
*pos = pack_tsdf (0.f, 0);
}
}
}
}
void kfusion::device::clear_volume(TsdfVolume volume)
{
dim3 block (32, 8);
dim3 grid (1, 1, 1);
// divUp(int total, int grain) { return (total + grain - 1) / grain; }
grid.x = divUp (volume.dims.x, block.x);
grid.y = divUp (volume.dims.y, block.y);
hipLaunchKernelGGL(( clear_volume_kernel), dim3(grid), dim3(block), 0, 0, volume); //gridblock
cudaSafeCall ( hipGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume integration
namespace kfusion
{
namespace device
{
//:https://blog.csdn.net/kelvin_yan/article/details/54019017
texture<float, 2> dists_tex(0, hipFilterModePoint, hipAddressModeBorder, cudaCreateChannelDescHalf());
struct TsdfIntegrator
{
Aff3f vol2cam;
Projector proj;
int2 dists_size;
float tranc_dist_inv;
__kf_device__
void operator()(TsdfVolume& volume) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= volume.dims.x || y >= volume.dims.y)
return;
//float3 zstep = vol2cam.R * make_float3(0.f, 0.f, volume.voxel_size.z);
float3 zstep = make_float3(vol2cam.R.data[0].z, vol2cam.R.data[1].z, vol2cam.R.data[2].z) * volume.voxel_size.z;
float3 vx = make_float3(x * volume.voxel_size.x, y * volume.voxel_size.y, 0);
float3 vc = vol2cam * vx; //tranform from volume coo frame to camera one
TsdfVolume::elem_type* vptr = volume.beg(x, y);
for(int i = 0; i < volume.dims.z; ++i, vc += zstep, vptr = volume.zstep(vptr))
{
float2 coo = proj(vc); //
//#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
// this is actually workaround for kepler. it doesn't return 0.f for texture
// fetches for out-of-border coordinates even for cudaaddressmodeborder mode
if (coo.x < 0 || coo.y < 0 || coo.x >= dists_size.x || coo.y >= dists_size.y)
continue;
//#endif
float Dp = tex2D(dists_tex, coo.x, coo.y);
if(Dp == 0 || vc.z <= 0)
continue;
//======================================================================
//if(Dp>1.25)
//continue;
//=========================================================================
float sdf = Dp - __fsqrt_rn(dot(vc, vc)); //Dp - norm(v)
if (sdf >= -volume.trunc_dist) //
{
float tsdf = fmin(1.f, sdf * tranc_dist_inv);
//read and unpack
int weight_prev;
float tsdf_prev = unpack_tsdf (gmem::LdCs(vptr), weight_prev);
float tsdf_new = __fdividef(__fmaf_rn(tsdf_prev, weight_prev, tsdf), weight_prev + 1); //tsdf_prev*weight_prev+tsdf
int weight_new = min (weight_prev + 1, volume.max_weight); //max_weight=64
//pack and write
gmem::StCs(pack_tsdf (tsdf_new, weight_new), vptr);
}
}
}
};
__global__ void integrate_kernel( const TsdfIntegrator integrator, TsdfVolume volume) { integrator(volume); };
}
}
void kfusion::device::integrate(const PtrStepSz<ushort>& dists, TsdfVolume& volume, const Aff3f& aff, const Projector& proj)
{
TsdfIntegrator ti;
ti.dists_size = make_int2(dists.cols, dists.rows);
ti.vol2cam = aff;
ti.proj = proj;
ti.tranc_dist_inv = 1.f/volume.trunc_dist; //trunc_dist=0.04
dists_tex.filterMode = hipFilterModePoint;
dists_tex.addressMode[0] = hipAddressModeBorder;
dists_tex.addressMode[1] = hipAddressModeBorder;
dists_tex.addressMode[2] = hipAddressModeBorder; //,x,y,z ,3
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf());
(void)binder; //texture_binder.hpp
dim3 block(32, 8);
dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y));
hipLaunchKernelGGL(( integrate_kernel), dim3(grid), dim3(block), 0, 0, ti, volume);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall ( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume ray casting
namespace kfusion
{
namespace device
{
__kf_device__ void intersect(float3 ray_org, float3 ray_dir, float3 box_max, float &tnear, float &tfar)
{
const float3 box_min = make_float3(0.f, 0.f, 0.f);
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.f/ray_dir.x, 1.f/ray_dir.y, 1.f/ray_dir.z);
float3 tbot = invR * (box_min - ray_org);
float3 ttop = invR * (box_max - ray_org);
// re-order intersections to find smallest and largest on each axis
float3 tmin = make_float3(fminf(ttop.x, tbot.x), fminf(ttop.y, tbot.y), fminf(ttop.z, tbot.z));
float3 tmax = make_float3(fmaxf(ttop.x, tbot.x), fmaxf(ttop.y, tbot.y), fmaxf(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
}
template<typename Vol>
__kf_device__ float interpolate(const Vol& volume, const float3& p_voxels)
{
float3 cf = p_voxels;
//rounding to negative infinity
int3 g = make_int3(__float2int_rd (cf.x), __float2int_rd (cf.y), __float2int_rd (cf.z));
if (g.x < 0 || g.x >= volume.dims.x - 1 || g.y < 0 || g.y >= volume.dims.y - 1 || g.z < 0 || g.z >= volume.dims.z - 1)
return numeric_limits<float>::quiet_NaN();
float a = cf.x - g.x;
float b = cf.y - g.y;
float c = cf.z - g.z;
float tsdf = 0.f;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 0)) * (1 - a) * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 1)) * (1 - a) * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 0)) * (1 - a) * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 1)) * (1 - a) * b * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 0)) * a * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 1)) * a * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 0)) * a * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 1)) * a * b * c;
return tsdf;
}
struct TsdfRaycaster
{
TsdfVolume volume;
Aff3f aff;
Mat3f Rinv;
Vec3f volume_size;
Reprojector reproj;
float time_step;
float3 gradient_delta;
float3 voxel_size_inv;
TsdfRaycaster(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& _reproj);
__kf_device__
float fetch_tsdf(const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return unpack_tsdf(*volume(x, y, z));
}
__kf_device__
void operator()(PtrStepSz<ushort> depth, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
depth(y, x) = 0;
normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0);
depth(y, x) = static_cast<ushort>(vertex.z * 1000);
}
break;
}
} /* for (;;) */
}
__kf_device__
void operator()(PtrStepSz<Point> points, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= points.cols || y >= points.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0.f);
points(y, x) = make_float4(vertex.x, vertex.y, vertex.z, 0.f);
}
break;
}
} /* for (;;) */
}
__kf_device__
float3 compute_normal(const float3& p) const
{
float3 n;
float Fx1 = interpolate(volume, make_float3(p.x + gradient_delta.x, p.y, p.z) * voxel_size_inv);
float Fx2 = interpolate(volume, make_float3(p.x - gradient_delta.x, p.y, p.z) * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
float Fy1 = interpolate(volume, make_float3(p.x, p.y + gradient_delta.y, p.z) * voxel_size_inv);
float Fy2 = interpolate(volume, make_float3(p.x, p.y - gradient_delta.y, p.z) * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
float Fz1 = interpolate(volume, make_float3(p.x, p.y, p.z + gradient_delta.z) * voxel_size_inv);
float Fz2 = interpolate(volume, make_float3(p.x, p.y, p.z - gradient_delta.z) * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
return normalized (n);
}
};
inline TsdfRaycaster::TsdfRaycaster(const TsdfVolume& _volume, const Aff3f& _aff, const Mat3f& _Rinv, const Reprojector& _reproj)
: volume(_volume), aff(_aff), Rinv(_Rinv), reproj(_reproj) {}
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<ushort> depth, PtrStep<Normal> normals)
{ raycaster(depth, normals); };
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<Point> points, PtrStep<Normal> normals)
{ raycaster(points, normals); };
}
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Depth& depth, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
hipLaunchKernelGGL(( raycast_kernel), dim3(grid), dim3(block), 0, 0, rc, (PtrStepSz<ushort>)depth, normals);
cudaSafeCall (hipGetLastError ());
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Points& points, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
hipLaunchKernelGGL(( raycast_kernel), dim3(grid), dim3(block), 0, 0, rc, (PtrStepSz<Point>)points, normals);
cudaSafeCall (hipGetLastError ());
}
////////////////////////////////////////////////////////////////////////////////////////
/// Volume cloud exctraction
namespace kfusion
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Prefix Scan utility
enum ScanKind { exclusive, inclusive };
template<ScanKind Kind, class T>
__kf_device__ T scan_warp ( volatile T *ptr, const unsigned int idx = threadIdx.x )
{
const unsigned int lane = idx & 31; // index of thread in warp (0..31)
if (lane >= 1) ptr[idx] = ptr[idx - 1] + ptr[idx];
if (lane >= 2) ptr[idx] = ptr[idx - 2] + ptr[idx];
if (lane >= 4) ptr[idx] = ptr[idx - 4] + ptr[idx];
if (lane >= 8) ptr[idx] = ptr[idx - 8] + ptr[idx];
if (lane >= 16) ptr[idx] = ptr[idx - 16] + ptr[idx];
if (Kind == inclusive)
return ptr[idx];
else
return (lane > 0) ? ptr[idx - 1] : 0;
}
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct FullScan6
{
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
TsdfVolume volume;
Aff3f aff;
FullScan6(const TsdfVolume& vol) : volume(vol) {}
__kf_device__ float fetch(int x, int y, int z, int& weight) const
{
return unpack_tsdf(*volume(x, y, z), weight);
}
__kf_device__ void operator () (PtrSz<Point> output) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if __CUDA_ARCH__ < 200
__shared__ int cta_buffer[CTA_SIZE]; //CTA_SIZEblock --
#endif
#if __CUDA_ARCH__ >= 120
if (__all (x >= volume.dims.x) || __all (y >= volume.dims.y))
return;
#else
if (Emulation::All(x >= volume.dims.x, cta_buffer) || Emulation::All(y >= volume.dims.y, cta_buffer))
return;
#endif
float3 V;
V.x = (x + 0.5f) * volume.voxel_size.x;
V.y = (y + 0.5f) * volume.voxel_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < volume.dims.z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < volume.dims.x && y < volume.dims.y)
{
int W;
float F = fetch(x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * volume.voxel_size.z;
//process dx
if (x + 1 < volume.dims.x)
{
int Wn;
float Fn = fetch(x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + volume.voxel_size.x;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.x = (V.x * fabs (Fn) + Vnx * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
}
//process dy
if (y + 1 < volume.dims.y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + volume.voxel_size.y;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.y = (V.y * fabs (Fn) + Vny * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
}
//process dz
//if (z + 1 < volume.dims.z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + volume.voxel_size.z;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.z = (V.z * fabs (Fn) + Vnz * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
}
} //if (W != 0 && F != 1.f)
}
#if __CUDA_ARCH__ >= 200
///not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = local_count;
int total_warp = Emulation::warp_reduce(cta_buffer, tid);
#endif
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS]; //32*6*3
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS; //32*3
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
Point *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
*pos = make_float4(x, y, z, 0.f);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < volume.dims.z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min ((int)output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
}
};
__global__ void extract_kernel(const FullScan6 fs, PtrSz<Point> output) { fs(output); }
struct ExtractNormals
{
typedef float8 float8;
TsdfVolume volume;
PtrSz<Point> points;
float3 voxel_size_inv;
float3 gradient_delta;
Aff3f aff;
Mat3f Rinv;
ExtractNormals(const TsdfVolume& vol) : volume(vol)
{
voxel_size_inv.x = 1.f/volume.voxel_size.x;
voxel_size_inv.y = 1.f/volume.voxel_size.y;
voxel_size_inv.z = 1.f/volume.voxel_size.z;
}
__kf_device__ int3 getVoxel (const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return make_int3 (x, y, z);
}
__kf_device__ void operator () (float4* output) const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = Rinv * (tr(points.data[idx]) - aff.t);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < volume.dims.x - 2 && g.y < volume.dims.y - 2 && g.z < volume.dims.z - 2)
{
float3 t;
t = point;
t.x += gradient_delta.x;;
float Fx1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.x -= gradient_delta.x;
float Fx2 = interpolate(volume, t * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
t = point;
t.y += gradient_delta.y;
float Fy1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.y -= gradient_delta.y;
float Fy2 = interpolate(volume, t * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
t = point;
t.z += gradient_delta.z;
float Fz1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.z -= gradient_delta.z;
float Fz2 = interpolate(volume, t * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
n = normalized (aff.R * n);
}
output[idx] = make_float4(n.x, n.y, n.z, 0);
}
};
__global__ void extract_normals_kernel (const ExtractNormals en, float4* output) { en(output); }
}
}
size_t kfusion::device::extractCloud (const TsdfVolume& volume, const Aff3f& aff, PtrSz<Point> output)
{
typedef FullScan6 FS;
FS fs(volume);
fs.aff = aff;
dim3 block (FS::CTA_SIZE_X, FS::CTA_SIZE_Y);
dim3 grid (divUp (volume.dims.x, block.x), divUp (volume.dims.y, block.y));
hipLaunchKernelGGL(( extract_kernel), dim3(grid), dim3(block), 0, 0, fs, output);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
int size;
cudaSafeCall ( hipMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return (size_t)size;
}
void kfusion::device::extractNormals (const TsdfVolume& volume, const PtrSz<Point>& points, const Aff3f& aff, const Mat3f& Rinv, float gradient_delta_factor, float4* output)
{
ExtractNormals en(volume);
en.points = points;
en.gradient_delta = volume.voxel_size * gradient_delta_factor;
en.aff = aff;
en.Rinv = Rinv;
dim3 block (256);
dim3 grid (divUp ((int)points.size, block.x));
hipLaunchKernelGGL(( extract_normals_kernel), dim3(grid), dim3(block), 0, 0, en, output);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
| 70000724973f0d8340bd0e94ce25fc4252c81c18.cu | #include "device.hpp"
#include "texture_binder.hpp"
#include<iostream>
using namespace kfusion::device;
using namespace std;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume initialization
namespace kfusion
{
namespace device
{
__global__ void clear_volume_kernel(TsdfVolume tsdf)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < tsdf.dims.x && y < tsdf.dims.y)
{
ushort2 *beg = tsdf.beg(x, y); //data + x + dims.x * y
ushort2 *end = beg + tsdf.dims.x * tsdf.dims.y * tsdf.dims.z;
for(ushort2* pos = beg; pos != end; pos = tsdf.zstep(pos)) //pos + dims.x * dims.y
*pos = pack_tsdf (0.f, 0);
}
}
}
}
void kfusion::device::clear_volume(TsdfVolume volume)
{
dim3 block (32, 8);
dim3 grid (1, 1, 1);
// divUp(int total, int grain) { return (total + grain - 1) / grain; }
grid.x = divUp (volume.dims.x, block.x);
grid.y = divUp (volume.dims.y, block.y);
clear_volume_kernel<<<grid, block>>>(volume); //感觉这里的grid是block
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume integration
namespace kfusion
{
namespace device
{
//定义含义具体可参考:https://blog.csdn.net/kelvin_yan/article/details/54019017
texture<float, 2> dists_tex(0, cudaFilterModePoint, cudaAddressModeBorder, cudaCreateChannelDescHalf());
struct TsdfIntegrator
{
Aff3f vol2cam;
Projector proj;
int2 dists_size;
float tranc_dist_inv;
__kf_device__
void operator()(TsdfVolume& volume) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= volume.dims.x || y >= volume.dims.y)
return;
//float3 zstep = vol2cam.R * make_float3(0.f, 0.f, volume.voxel_size.z);
float3 zstep = make_float3(vol2cam.R.data[0].z, vol2cam.R.data[1].z, vol2cam.R.data[2].z) * volume.voxel_size.z;
float3 vx = make_float3(x * volume.voxel_size.x, y * volume.voxel_size.y, 0);
float3 vc = vol2cam * vx; //tranform from volume coo frame to camera one
TsdfVolume::elem_type* vptr = volume.beg(x, y);
for(int i = 0; i < volume.dims.z; ++i, vc += zstep, vptr = volume.zstep(vptr))
{
float2 coo = proj(vc); //转换成像素坐标系下的坐标
//#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
// this is actually workaround for kepler. it doesn't return 0.f for texture
// fetches for out-of-border coordinates even for cudaaddressmodeborder mode
if (coo.x < 0 || coo.y < 0 || coo.x >= dists_size.x || coo.y >= dists_size.y)
continue;
//#endif
float Dp = tex2D(dists_tex, coo.x, coo.y);
if(Dp == 0 || vc.z <= 0)
continue;
//======================================================================
//if(Dp>1.25)
//continue;
//=========================================================================
float sdf = Dp - __fsqrt_rn(dot(vc, vc)); //Dp - norm(v)
if (sdf >= -volume.trunc_dist) //
{
float tsdf = fmin(1.f, sdf * tranc_dist_inv);
//read and unpack
int weight_prev;
float tsdf_prev = unpack_tsdf (gmem::LdCs(vptr), weight_prev);
float tsdf_new = __fdividef(__fmaf_rn(tsdf_prev, weight_prev, tsdf), weight_prev + 1); //tsdf_prev*weight_prev+tsdf
int weight_new = min (weight_prev + 1, volume.max_weight); //max_weight=64
//pack and write
gmem::StCs(pack_tsdf (tsdf_new, weight_new), vptr);
}
}
}
};
__global__ void integrate_kernel( const TsdfIntegrator integrator, TsdfVolume volume) { integrator(volume); };
}
}
void kfusion::device::integrate(const PtrStepSz<ushort>& dists, TsdfVolume& volume, const Aff3f& aff, const Projector& proj)
{
TsdfIntegrator ti;
ti.dists_size = make_int2(dists.cols, dists.rows);
ti.vol2cam = aff;
ti.proj = proj;
ti.tranc_dist_inv = 1.f/volume.trunc_dist; //trunc_dist=0.04
dists_tex.filterMode = cudaFilterModePoint;
dists_tex.addressMode[0] = cudaAddressModeBorder;
dists_tex.addressMode[1] = cudaAddressModeBorder;
dists_tex.addressMode[2] = cudaAddressModeBorder; //超出范围就用零代替,x,y,z 方向,所以一共设置3个
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf());
(void)binder; //绑定纹理,自定义的一个类,在texture_binder.hpp中有定义
dim3 block(32, 8);
dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y));
integrate_kernel<<<grid, block>>>(ti, volume);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall ( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume ray casting
namespace kfusion
{
namespace device
{
__kf_device__ void intersect(float3 ray_org, float3 ray_dir, float3 box_max, float &tnear, float &tfar)
{
const float3 box_min = make_float3(0.f, 0.f, 0.f);
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.f/ray_dir.x, 1.f/ray_dir.y, 1.f/ray_dir.z);
float3 tbot = invR * (box_min - ray_org);
float3 ttop = invR * (box_max - ray_org);
// re-order intersections to find smallest and largest on each axis
float3 tmin = make_float3(fminf(ttop.x, tbot.x), fminf(ttop.y, tbot.y), fminf(ttop.z, tbot.z));
float3 tmax = make_float3(fmaxf(ttop.x, tbot.x), fmaxf(ttop.y, tbot.y), fmaxf(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
}
template<typename Vol>
__kf_device__ float interpolate(const Vol& volume, const float3& p_voxels)
{
float3 cf = p_voxels;
//rounding to negative infinity
int3 g = make_int3(__float2int_rd (cf.x), __float2int_rd (cf.y), __float2int_rd (cf.z));
if (g.x < 0 || g.x >= volume.dims.x - 1 || g.y < 0 || g.y >= volume.dims.y - 1 || g.z < 0 || g.z >= volume.dims.z - 1)
return numeric_limits<float>::quiet_NaN();
float a = cf.x - g.x;
float b = cf.y - g.y;
float c = cf.z - g.z;
float tsdf = 0.f;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 0)) * (1 - a) * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 1)) * (1 - a) * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 0)) * (1 - a) * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 1)) * (1 - a) * b * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 0)) * a * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 1)) * a * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 0)) * a * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 1)) * a * b * c;
return tsdf;
}
struct TsdfRaycaster
{
TsdfVolume volume;
Aff3f aff;
Mat3f Rinv;
Vec3f volume_size;
Reprojector reproj;
float time_step;
float3 gradient_delta;
float3 voxel_size_inv;
TsdfRaycaster(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& _reproj);
__kf_device__
float fetch_tsdf(const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return unpack_tsdf(*volume(x, y, z));
}
__kf_device__
void operator()(PtrStepSz<ushort> depth, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
depth(y, x) = 0;
normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0);
depth(y, x) = static_cast<ushort>(vertex.z * 1000);
}
break;
}
} /* for (;;) */
}
__kf_device__
void operator()(PtrStepSz<Point> points, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= points.cols || y >= points.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0.f);
points(y, x) = make_float4(vertex.x, vertex.y, vertex.z, 0.f);
}
break;
}
} /* for (;;) */
}
__kf_device__
float3 compute_normal(const float3& p) const
{
float3 n;
float Fx1 = interpolate(volume, make_float3(p.x + gradient_delta.x, p.y, p.z) * voxel_size_inv);
float Fx2 = interpolate(volume, make_float3(p.x - gradient_delta.x, p.y, p.z) * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
float Fy1 = interpolate(volume, make_float3(p.x, p.y + gradient_delta.y, p.z) * voxel_size_inv);
float Fy2 = interpolate(volume, make_float3(p.x, p.y - gradient_delta.y, p.z) * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
float Fz1 = interpolate(volume, make_float3(p.x, p.y, p.z + gradient_delta.z) * voxel_size_inv);
float Fz2 = interpolate(volume, make_float3(p.x, p.y, p.z - gradient_delta.z) * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
return normalized (n);
}
};
inline TsdfRaycaster::TsdfRaycaster(const TsdfVolume& _volume, const Aff3f& _aff, const Mat3f& _Rinv, const Reprojector& _reproj)
: volume(_volume), aff(_aff), Rinv(_Rinv), reproj(_reproj) {}
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<ushort> depth, PtrStep<Normal> normals)
{ raycaster(depth, normals); };
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<Point> points, PtrStep<Normal> normals)
{ raycaster(points, normals); };
}
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Depth& depth, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
raycast_kernel<<<grid, block>>>(rc, (PtrStepSz<ushort>)depth, normals);
cudaSafeCall (cudaGetLastError ());
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Points& points, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
raycast_kernel<<<grid, block>>>(rc, (PtrStepSz<Point>)points, normals);
cudaSafeCall (cudaGetLastError ());
}
////////////////////////////////////////////////////////////////////////////////////////
/// Volume cloud exctraction
namespace kfusion
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Prefix Scan utility
enum ScanKind { exclusive, inclusive };
template<ScanKind Kind, class T>
__kf_device__ T scan_warp ( volatile T *ptr, const unsigned int idx = threadIdx.x )
{
const unsigned int lane = idx & 31; // index of thread in warp (0..31)
if (lane >= 1) ptr[idx] = ptr[idx - 1] + ptr[idx];
if (lane >= 2) ptr[idx] = ptr[idx - 2] + ptr[idx];
if (lane >= 4) ptr[idx] = ptr[idx - 4] + ptr[idx];
if (lane >= 8) ptr[idx] = ptr[idx - 8] + ptr[idx];
if (lane >= 16) ptr[idx] = ptr[idx - 16] + ptr[idx];
if (Kind == inclusive)
return ptr[idx];
else
return (lane > 0) ? ptr[idx - 1] : 0;
}
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct FullScan6
{
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
TsdfVolume volume;
Aff3f aff;
FullScan6(const TsdfVolume& vol) : volume(vol) {}
__kf_device__ float fetch(int x, int y, int z, int& weight) const
{
return unpack_tsdf(*volume(x, y, z), weight);
}
__kf_device__ void operator () (PtrSz<Point> output) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if __CUDA_ARCH__ < 200
__shared__ int cta_buffer[CTA_SIZE]; //CTA_SIZE:block的大小 --
#endif
#if __CUDA_ARCH__ >= 120
if (__all (x >= volume.dims.x) || __all (y >= volume.dims.y))
return;
#else
if (Emulation::All(x >= volume.dims.x, cta_buffer) || Emulation::All(y >= volume.dims.y, cta_buffer))
return;
#endif
float3 V;
V.x = (x + 0.5f) * volume.voxel_size.x;
V.y = (y + 0.5f) * volume.voxel_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < volume.dims.z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < volume.dims.x && y < volume.dims.y)
{
int W;
float F = fetch(x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * volume.voxel_size.z;
//process dx
if (x + 1 < volume.dims.x)
{
int Wn;
float Fn = fetch(x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + volume.voxel_size.x;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.x = (V.x * fabs (Fn) + Vnx * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
}
//process dy
if (y + 1 < volume.dims.y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + volume.voxel_size.y;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.y = (V.y * fabs (Fn) + Vny * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
}
//process dz
//if (z + 1 < volume.dims.z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + volume.voxel_size.z;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.z = (V.z * fabs (Fn) + Vnz * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
}
} //if (W != 0 && F != 1.f)
}
#if __CUDA_ARCH__ >= 200
///not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = local_count;
int total_warp = Emulation::warp_reduce(cta_buffer, tid);
#endif
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS]; //32*6*3
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS; //32*3
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
Point *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
*pos = make_float4(x, y, z, 0.f);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < volume.dims.z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min ((int)output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
}
};
__global__ void extract_kernel(const FullScan6 fs, PtrSz<Point> output) { fs(output); }
struct ExtractNormals
{
typedef float8 float8;
TsdfVolume volume;
PtrSz<Point> points;
float3 voxel_size_inv;
float3 gradient_delta;
Aff3f aff;
Mat3f Rinv;
ExtractNormals(const TsdfVolume& vol) : volume(vol)
{
voxel_size_inv.x = 1.f/volume.voxel_size.x;
voxel_size_inv.y = 1.f/volume.voxel_size.y;
voxel_size_inv.z = 1.f/volume.voxel_size.z;
}
__kf_device__ int3 getVoxel (const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return make_int3 (x, y, z);
}
__kf_device__ void operator () (float4* output) const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = Rinv * (tr(points.data[idx]) - aff.t);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < volume.dims.x - 2 && g.y < volume.dims.y - 2 && g.z < volume.dims.z - 2)
{
float3 t;
t = point;
t.x += gradient_delta.x;;
float Fx1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.x -= gradient_delta.x;
float Fx2 = interpolate(volume, t * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
t = point;
t.y += gradient_delta.y;
float Fy1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.y -= gradient_delta.y;
float Fy2 = interpolate(volume, t * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
t = point;
t.z += gradient_delta.z;
float Fz1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.z -= gradient_delta.z;
float Fz2 = interpolate(volume, t * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
n = normalized (aff.R * n);
}
output[idx] = make_float4(n.x, n.y, n.z, 0);
}
};
__global__ void extract_normals_kernel (const ExtractNormals en, float4* output) { en(output); }
}
}
size_t kfusion::device::extractCloud (const TsdfVolume& volume, const Aff3f& aff, PtrSz<Point> output)
{
typedef FullScan6 FS;
FS fs(volume);
fs.aff = aff;
dim3 block (FS::CTA_SIZE_X, FS::CTA_SIZE_Y);
dim3 grid (divUp (volume.dims.x, block.x), divUp (volume.dims.y, block.y));
extract_kernel<<<grid, block>>>(fs, output);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
int size;
cudaSafeCall ( cudaMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return (size_t)size;
}
void kfusion::device::extractNormals (const TsdfVolume& volume, const PtrSz<Point>& points, const Aff3f& aff, const Mat3f& Rinv, float gradient_delta_factor, float4* output)
{
ExtractNormals en(volume);
en.points = points;
en.gradient_delta = volume.voxel_size * gradient_delta_factor;
en.aff = aff;
en.Rinv = Rinv;
dim3 block (256);
dim3 grid (divUp ((int)points.size, block.x));
extract_normals_kernel<<<grid, block>>>(en, output);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
|
1862140741f1819b89f72d242f7e8e26a909b993.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <cstdio>
#include <cstring>
#include <string>
#include <algorithm>
#include <iostream>
#include <cstdlib>
// #include <unistd.h>
// #include <windows.h>
// #include <unistd.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
// #include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
using namespace std;
typedef double ld;
typedef long long LL;
const int chunk_size = 1<<16;
namespace io_impl
{
inline bool maybe_digit(char c)
{
return c >= '0' && c <= '9';
}
struct io_s
{
private:
FILE *fin;
FILE *fout;
bool negative;
bool ok;
char ch;
inline char next_char()
{
static char buf[100000], *p1 = buf, *p2 = buf;
return p1 == p2 && (p2 = (p1 = buf) + fread(buf, 1, 100000, fin), p1 == p2) ? EOF : *p1++;
}
public:
void init(FILE *_in, FILE *_out)
{
fin = _in;
fout = _out;
ch = next_char();
ok = true;
}
template <typename T>
bool run(T &_v)
{
_v = 0;
while (!maybe_digit(ch) && ch != EOF)
ch = next_char();
if (ch == EOF)
return ok = false;
do
{
_v = (_v << 1) + (_v << 3) + ch - '0';
} while (maybe_digit(ch = next_char()));
return true;
}
template <typename T>
bool rd(T &_v)
{
negative = false;
_v = 0;
while (!maybe_digit(ch) && ch != EOF)
{
negative = ch == '-';
ch = next_char();
}
if (ch == EOF)
return ok = false;
do
{
_v = (_v * 10) + (ch - '0');
} while (maybe_digit(ch = next_char()));
static double _map[] = {1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6};
if (ch == '.')
{
int tp = 0;
while (maybe_digit(ch = next_char()))
{
_v = (_v * 10) + (ch - '0');
++tp;
}
_v *= _map[tp];
}
if (negative)
_v = -_v;
return true;
}
};
} // namespace io_impl
using namespace io_impl;
io_s iokb;
namespace output {
const int OutputBufferSize = 1 << 20;
char buffer[OutputBufferSize];
char *s = buffer;
inline void flush() {
fwrite(buffer, 1, s-buffer, stdout);
s = buffer;
fflush(stdout);
}
inline void print(const char ch) {
// putchar(ch); return;
if (s-buffer>OutputBufferSize-2) flush();
*s++ = ch;
}
inline void print(char *str) {
while (*str!=0) print(char(*str++));
}
inline void print(int x) {
// printf("%d", x); return;
char buf[25] = {0}, *p = buf;
// if (x<0) print('-'), x=-x;
// if (x == 0) print('0');
while (x) *(++p) = x%10, x/=10;
while (p != buf) print(char(*(p--)+'0'));
}
inline void print(LL x) {
// printf("%d", x); return;
char buf[25] = {0}, *p = buf;
if (x == 0) print('0');
while (x) *(++p) = x%10, x/=10;
while (p != buf) print(char(*(p--)+'0'));
}
inline void print(ld v) {
// printf("%.2f", x);
// // static int stk[70], tp;
// // tp = 0;
// if (v < 1e18) {
// if (fabs(v) < 0.005)
// {
// print('0');
// return;
// }
// else
// {
// LL x = (LL)floor(v * 100 + 0.5);
// if (x<0) print('-'), x=-x;
// // cerr << "x=" << x << endl; exit(0);
// print((LL)(x / 100));
// print('.');
// print((char)(x / 10 % 10 + '0'));
// print((char)(x % 10 + '0'));
// }
// } else {
static char buf[30];
sprintf(buf, "%.2lf", v);
print(buf);
// }
}
}
struct ios {
inline ios & operator >> (int &x){
iokb.run(x);
return *this;
}
inline ios &operator>>(ld &x)
{
iokb.rd(x);
return *this;
}
} io;
inline void handleCudaError(hipError_t err, string name = "fuck") {
if (err != hipSuccess) {
cerr << name << endl;
cerr << hipGetErrorString(err) << endl;
exit(0);
}
}
const int B = 32;
ld *d_a, *d_b, *d_c, *h_a, *h_b, *h_c;
int an, am, bn, bm;
int n, m;
void copyMatrix(ld *&src, ld *&dst, int n, int m) {
int size = sizeof(ld) * n * m;
src = (ld*)malloc(size);
for (int i=0; i<n; ++i)
for (int j=0; j<m; ++j)
io >> src[i * m + j];
handleCudaError(hipMalloc(&dst, size), "hipMalloc in copyMatrix");
handleCudaError(hipMemcpy(dst, src, size, hipMemcpyHostToDevice), "memcpy in copyMatrix");
}
void copyMatrixAsync(ld *&src, ld *&dst, int n, int m, hipStream_t &stream) {
int size = sizeof(ld) * n * m;
handleCudaError(hipMalloc(&dst, size), "hipMalloc in copyMatrix");
handleCudaError(hipMemcpyAsync(dst, src, size, hipMemcpyHostToDevice, stream), "memcpyasync in copyMatrix");
}
template<typename T>
__global__ void matrixMult(T *d_a, T *d_b, T *d_c, int an, int bm, int am) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int i = index / bm, j = index % bm;
if (i >= an || j >= bm) return;
register ld sum = 0;
int basea = i * am;
for (int k=0; k<am; ++k)
sum += d_a[basea + k] * d_b[k * bm + j];
d_c[i * bm + j] = sum;
// int index = threadIdx.x;
// if (index < an * bm)
// d_c[index] = 1;
}
void simk(int grids, int block_size, ld *d_a, ld *d_b, ld *d_c, int an, int bm, int am) {
for (int blockIdxx=0; blockIdxx<grids; ++blockIdxx) {
for (int threadIdxx=0; threadIdxx<block_size; ++threadIdxx) {
// printf("%d %d\n", blockIdxx, threadIdxx);
int blockid = blockIdxx,
threadid = threadIdxx;
int i = threadid / B, j = threadid % B, tbm = (bm + B - 1) / B, tam = (am + B - 1) / B;
int rowInA = blockid / tam * B + i;
int colInB = blockid % tbm * B + j;
// if (i == 1 && j == 0) puts("FUCK");
printf("blockid=%d, threadid=%d, i=%d, j=%d, rowInA=%d, colInB=%d, an=%d, bm=%d, block_size=%d, B=%d, am=%d\n", blockIdxx, threadIdxx, i, j, rowInA, colInB, an, bm, block_size, B, am);
if (rowInA < an && j < am) printf("fill a[%d][%d]\n", i, j);
if (i < am && colInB < bm) printf("fill b[%d][%d]\n", i, j);
if (rowInA < an && colInB < bm) printf("fill c[%d][%d]\n", rowInA, colInB);
}
}
// exit(0);
}
__global__ void matrixMult2(ld *d_a, ld *d_b, ld *d_c, int an, int bm, int am) {
__shared__ ld a[B][B], b[B][B];
int blockid = blockIdx.x,
threadid = threadIdx.x;
int i = threadid / B, j = threadid % B;
int tbm = (bm + B - 1) / B;
int rowInA = blockid / tbm * B + i;
int colInB = blockid % tbm * B + j;
ld sum = 0;
for (int sub=0; sub<(am + B - 1) / B; ++sub) {
int x = rowInA, y = sub * B + j;
if (x < an && y < am)
a[i][j] = d_a[x * am + y];
else
a[i][j] = 0;
x = sub * B + i; y = colInB;
if (x < am && y < bm)
b[i][j] = d_b[x * bm + y];
else
b[i][j] = 0;
__syncthreads();
for (int k=0; k<B; ++k)
sum += a[i][k] * b[k][j];
__syncthreads();
}
if (rowInA < an && colInB < bm)
d_c[(rowInA) * bm + colInB] = sum;
}
void outputMatrix(ld *a, int n, int m) {
for (int i=0; i<n; ++i) {
int base = i * m;
output::print(a[base]);
for (int j=1; j<m; ++j) {
output::print(',');
output::print(a[base + j]);
}
output::print('\n');
}
}
void outputinterval(ld *c, int l, int r) {
if (l == 0) {
output::print(c[l++]);
}
for (register int i=l; i<r; ++i) {
if (i % m == 0) output::print('\n');
else output::print(',');
output::print(c[i]);
}
}
void outputMatrixAsync(ld *&a, ld *&d_a, int n, int m) {
int st = 0, ed = n * m;
// printf("st=%d ed=%d, a=%p\n", st, ed, a);
hipStream_t stream[2];
int mask = 0;
hipStreamCreate(&stream[0]);
hipStreamCreate(&stream[1]);
int size;
for (; st<ed; st+=size, mask^=1) {
size = min(chunk_size, ed - st);
// printf("st=%d st+size=%d, mask=%d\n", st, st+size, mask);
// handleCudaError(hipMemcpy(a + st, d_a + st, size * sizeof(ld), hipMemcpyDeviceToHost));
handleCudaError(hipMemcpyAsync(a + st, d_a + st, size * sizeof(ld), hipMemcpyDeviceToHost, stream[mask]));
// exit(0);
if (st - chunk_size >= 0) {
// printf("%d %d\n",st-chunk_size, st);
handleCudaError(hipStreamSynchronize(stream[mask^1]));
outputinterval(a, st-chunk_size, st);
}
}
st -= size;
// sleep(1000);
handleCudaError(hipStreamSynchronize(stream[0]), "sync stream0 last");
handleCudaError(hipStreamSynchronize(stream[1]), "sync stream1 last");
outputinterval(a, st, ed);
output::print('\n');
}
void build(ld *&h, ld *&d, int n, int m, hipStream_t &s) {
handleCudaError(hipHostMalloc(&h, sizeof(ld) * n * m, hipHostMallocDefault));
for (int i=0; i<n; ++i) {
for (int j=0; j<m; ++j) {
io >> h[i * m + j];
}
}
copyMatrixAsync(h, d, n, m, s);
}
int main()
{
freopen("output.txt", "w", stdout);
iokb.init(fopen("input.txt", "r"), fopen("output.txt", "w"));
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
cerr << prop.name << endl;
// hipStream_t mainstream;
// hipStreamCreate(&mainstream);
// #endif
io >> an >> am;
// build(h_a, d_a, an, am, mainstream);
copyMatrix(h_a, d_a, an, am);
io >> bn >> bm;
// build(h_b, d_b, bn, bm, mainstream);
copyMatrix(h_b, d_b, bn, bm);
handleCudaError(hipMalloc(&d_c, sizeof(ld) * an * bm), "allocate for d_c");
// handleCudaError(hipStreamSynchronize(mainstream));
int m = (an + B - 1) / B, n = (am + B - 1) / B, k = (bm + B - 1) / B;
// simk(m * k, B * B, d_a, d_b, d_c, an, bm, am);
fprintf(stderr, "stderr: m=%d, n=%d, k=%d\n", m, n, k);
hipLaunchKernelGGL(( matrixMult2), dim3(m * k), dim3(B * B), 0, 0, d_a, d_b, d_c, an, bm, am);
handleCudaError(hipGetLastError(), "kernel error");
fprintf(stderr, "stderr: running kernel completed\n");
h_c = (ld*)malloc(sizeof(ld) * an * bm);
// handleCudaError(hipHostMalloc(&h_c, sizeof(ld) * an * bm,hipHostMallocDefault), "hostalloc for c");
handleCudaError(hipMemcpy(h_c, d_c, sizeof(ld) * an * bm, hipMemcpyDeviceToHost), "mem back");
outputMatrix(h_c, an, bm);
output::flush();
return 0;
}
| 1862140741f1819b89f72d242f7e8e26a909b993.cu | #include <cmath>
#include <cstdio>
#include <cstring>
#include <string>
#include <algorithm>
#include <iostream>
#include <cstdlib>
// #include <unistd.h>
// #include <windows.h>
// #include <unistd.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
// #include <device_functions.h>
#include <cuda_runtime_api.h>
using namespace std;
typedef double ld;
typedef long long LL;
const int chunk_size = 1<<16;
namespace io_impl
{
inline bool maybe_digit(char c)
{
return c >= '0' && c <= '9';
}
struct io_s
{
private:
FILE *fin;
FILE *fout;
bool negative;
bool ok;
char ch;
inline char next_char()
{
static char buf[100000], *p1 = buf, *p2 = buf;
return p1 == p2 && (p2 = (p1 = buf) + fread(buf, 1, 100000, fin), p1 == p2) ? EOF : *p1++;
}
public:
void init(FILE *_in, FILE *_out)
{
fin = _in;
fout = _out;
ch = next_char();
ok = true;
}
template <typename T>
bool run(T &_v)
{
_v = 0;
while (!maybe_digit(ch) && ch != EOF)
ch = next_char();
if (ch == EOF)
return ok = false;
do
{
_v = (_v << 1) + (_v << 3) + ch - '0';
} while (maybe_digit(ch = next_char()));
return true;
}
template <typename T>
bool rd(T &_v)
{
negative = false;
_v = 0;
while (!maybe_digit(ch) && ch != EOF)
{
negative = ch == '-';
ch = next_char();
}
if (ch == EOF)
return ok = false;
do
{
_v = (_v * 10) + (ch - '0');
} while (maybe_digit(ch = next_char()));
static double _map[] = {1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6};
if (ch == '.')
{
int tp = 0;
while (maybe_digit(ch = next_char()))
{
_v = (_v * 10) + (ch - '0');
++tp;
}
_v *= _map[tp];
}
if (negative)
_v = -_v;
return true;
}
};
} // namespace io_impl
using namespace io_impl;
io_s iokb;
namespace output {
const int OutputBufferSize = 1 << 20;
char buffer[OutputBufferSize];
char *s = buffer;
inline void flush() {
fwrite(buffer, 1, s-buffer, stdout);
s = buffer;
fflush(stdout);
}
inline void print(const char ch) {
// putchar(ch); return;
if (s-buffer>OutputBufferSize-2) flush();
*s++ = ch;
}
inline void print(char *str) {
while (*str!=0) print(char(*str++));
}
inline void print(int x) {
// printf("%d", x); return;
char buf[25] = {0}, *p = buf;
// if (x<0) print('-'), x=-x;
// if (x == 0) print('0');
while (x) *(++p) = x%10, x/=10;
while (p != buf) print(char(*(p--)+'0'));
}
inline void print(LL x) {
// printf("%d", x); return;
char buf[25] = {0}, *p = buf;
if (x == 0) print('0');
while (x) *(++p) = x%10, x/=10;
while (p != buf) print(char(*(p--)+'0'));
}
inline void print(ld v) {
// printf("%.2f", x);
// // static int stk[70], tp;
// // tp = 0;
// if (v < 1e18) {
// if (fabs(v) < 0.005)
// {
// print('0');
// return;
// }
// else
// {
// LL x = (LL)floor(v * 100 + 0.5);
// if (x<0) print('-'), x=-x;
// // cerr << "x=" << x << endl; exit(0);
// print((LL)(x / 100));
// print('.');
// print((char)(x / 10 % 10 + '0'));
// print((char)(x % 10 + '0'));
// }
// } else {
static char buf[30];
sprintf(buf, "%.2lf", v);
print(buf);
// }
}
}
struct ios {
inline ios & operator >> (int &x){
iokb.run(x);
return *this;
}
inline ios &operator>>(ld &x)
{
iokb.rd(x);
return *this;
}
} io;
inline void handleCudaError(cudaError_t err, string name = "fuck") {
if (err != cudaSuccess) {
cerr << name << endl;
cerr << cudaGetErrorString(err) << endl;
exit(0);
}
}
const int B = 32;
ld *d_a, *d_b, *d_c, *h_a, *h_b, *h_c;
int an, am, bn, bm;
int n, m;
void copyMatrix(ld *&src, ld *&dst, int n, int m) {
int size = sizeof(ld) * n * m;
src = (ld*)malloc(size);
for (int i=0; i<n; ++i)
for (int j=0; j<m; ++j)
io >> src[i * m + j];
handleCudaError(cudaMalloc(&dst, size), "cudaMalloc in copyMatrix");
handleCudaError(cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice), "memcpy in copyMatrix");
}
void copyMatrixAsync(ld *&src, ld *&dst, int n, int m, cudaStream_t &stream) {
int size = sizeof(ld) * n * m;
handleCudaError(cudaMalloc(&dst, size), "cudaMalloc in copyMatrix");
handleCudaError(cudaMemcpyAsync(dst, src, size, cudaMemcpyHostToDevice, stream), "memcpyasync in copyMatrix");
}
template<typename T>
__global__ void matrixMult(T *d_a, T *d_b, T *d_c, int an, int bm, int am) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int i = index / bm, j = index % bm;
if (i >= an || j >= bm) return;
register ld sum = 0;
int basea = i * am;
for (int k=0; k<am; ++k)
sum += d_a[basea + k] * d_b[k * bm + j];
d_c[i * bm + j] = sum;
// int index = threadIdx.x;
// if (index < an * bm)
// d_c[index] = 1;
}
void simk(int grids, int block_size, ld *d_a, ld *d_b, ld *d_c, int an, int bm, int am) {
for (int blockIdxx=0; blockIdxx<grids; ++blockIdxx) {
for (int threadIdxx=0; threadIdxx<block_size; ++threadIdxx) {
// printf("%d %d\n", blockIdxx, threadIdxx);
int blockid = blockIdxx,
threadid = threadIdxx;
int i = threadid / B, j = threadid % B, tbm = (bm + B - 1) / B, tam = (am + B - 1) / B;
int rowInA = blockid / tam * B + i;
int colInB = blockid % tbm * B + j;
// if (i == 1 && j == 0) puts("FUCK");
printf("blockid=%d, threadid=%d, i=%d, j=%d, rowInA=%d, colInB=%d, an=%d, bm=%d, block_size=%d, B=%d, am=%d\n", blockIdxx, threadIdxx, i, j, rowInA, colInB, an, bm, block_size, B, am);
if (rowInA < an && j < am) printf("fill a[%d][%d]\n", i, j);
if (i < am && colInB < bm) printf("fill b[%d][%d]\n", i, j);
if (rowInA < an && colInB < bm) printf("fill c[%d][%d]\n", rowInA, colInB);
}
}
// exit(0);
}
__global__ void matrixMult2(ld *d_a, ld *d_b, ld *d_c, int an, int bm, int am) {
__shared__ ld a[B][B], b[B][B];
int blockid = blockIdx.x,
threadid = threadIdx.x;
int i = threadid / B, j = threadid % B;
int tbm = (bm + B - 1) / B;
int rowInA = blockid / tbm * B + i;
int colInB = blockid % tbm * B + j;
ld sum = 0;
for (int sub=0; sub<(am + B - 1) / B; ++sub) {
int x = rowInA, y = sub * B + j;
if (x < an && y < am)
a[i][j] = d_a[x * am + y];
else
a[i][j] = 0;
x = sub * B + i; y = colInB;
if (x < am && y < bm)
b[i][j] = d_b[x * bm + y];
else
b[i][j] = 0;
__syncthreads();
for (int k=0; k<B; ++k)
sum += a[i][k] * b[k][j];
__syncthreads();
}
if (rowInA < an && colInB < bm)
d_c[(rowInA) * bm + colInB] = sum;
}
void outputMatrix(ld *a, int n, int m) {
for (int i=0; i<n; ++i) {
int base = i * m;
output::print(a[base]);
for (int j=1; j<m; ++j) {
output::print(',');
output::print(a[base + j]);
}
output::print('\n');
}
}
void outputinterval(ld *c, int l, int r) {
if (l == 0) {
output::print(c[l++]);
}
for (register int i=l; i<r; ++i) {
if (i % m == 0) output::print('\n');
else output::print(',');
output::print(c[i]);
}
}
void outputMatrixAsync(ld *&a, ld *&d_a, int n, int m) {
int st = 0, ed = n * m;
// printf("st=%d ed=%d, a=%p\n", st, ed, a);
cudaStream_t stream[2];
int mask = 0;
cudaStreamCreate(&stream[0]);
cudaStreamCreate(&stream[1]);
int size;
for (; st<ed; st+=size, mask^=1) {
size = min(chunk_size, ed - st);
// printf("st=%d st+size=%d, mask=%d\n", st, st+size, mask);
// handleCudaError(cudaMemcpy(a + st, d_a + st, size * sizeof(ld), cudaMemcpyDeviceToHost));
handleCudaError(cudaMemcpyAsync(a + st, d_a + st, size * sizeof(ld), cudaMemcpyDeviceToHost, stream[mask]));
// exit(0);
if (st - chunk_size >= 0) {
// printf("%d %d\n",st-chunk_size, st);
handleCudaError(cudaStreamSynchronize(stream[mask^1]));
outputinterval(a, st-chunk_size, st);
}
}
st -= size;
// sleep(1000);
handleCudaError(cudaStreamSynchronize(stream[0]), "sync stream0 last");
handleCudaError(cudaStreamSynchronize(stream[1]), "sync stream1 last");
outputinterval(a, st, ed);
output::print('\n');
}
void build(ld *&h, ld *&d, int n, int m, cudaStream_t &s) {
handleCudaError(cudaHostAlloc(&h, sizeof(ld) * n * m, cudaHostAllocDefault));
for (int i=0; i<n; ++i) {
for (int j=0; j<m; ++j) {
io >> h[i * m + j];
}
}
copyMatrixAsync(h, d, n, m, s);
}
int main()
{
freopen("output.txt", "w", stdout);
iokb.init(fopen("input.txt", "r"), fopen("output.txt", "w"));
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
cerr << prop.name << endl;
// cudaStream_t mainstream;
// cudaStreamCreate(&mainstream);
// #endif
io >> an >> am;
// build(h_a, d_a, an, am, mainstream);
copyMatrix(h_a, d_a, an, am);
io >> bn >> bm;
// build(h_b, d_b, bn, bm, mainstream);
copyMatrix(h_b, d_b, bn, bm);
handleCudaError(cudaMalloc(&d_c, sizeof(ld) * an * bm), "allocate for d_c");
// handleCudaError(cudaStreamSynchronize(mainstream));
int m = (an + B - 1) / B, n = (am + B - 1) / B, k = (bm + B - 1) / B;
// simk(m * k, B * B, d_a, d_b, d_c, an, bm, am);
fprintf(stderr, "stderr: m=%d, n=%d, k=%d\n", m, n, k);
matrixMult2<<<m * k, B * B>>>(d_a, d_b, d_c, an, bm, am);
handleCudaError(cudaGetLastError(), "kernel error");
fprintf(stderr, "stderr: running kernel completed\n");
h_c = (ld*)malloc(sizeof(ld) * an * bm);
// handleCudaError(cudaHostAlloc(&h_c, sizeof(ld) * an * bm,cudaHostAllocDefault), "hostalloc for c");
handleCudaError(cudaMemcpy(h_c, d_c, sizeof(ld) * an * bm, cudaMemcpyDeviceToHost), "mem back");
outputMatrix(h_c, an, bm);
output::flush();
return 0;
}
|
719d302877f300cb028888d06440960f723eaf7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "LeNet5_cuda.h"
void LeNet5_cuda::predict(int batch) {
// TODO: Implement conv1
// TODO: Implement relu
// TODO: Implement pool1
// TODO: Implement conv2
// TODO: Implement relu
// TODO: Implement pool2
// TODO: Implement fc1
// TODO: Implement relu
// TODO: Implement fc2
// TODO: Implement relu
// TODO: Implement fc3
/* NOTE: unless you want to make a major change to this class structure,
* you need to write your output to the device memory d_output
* so that classify() can handle the rest.
*/
}
void LeNet5_cuda::prepare_device_memory(uint8_t* image) {
// Alloc Model Parameters
hipMalloc((void**)&d_conv1_weight,
sizeof(double) * conv1_in_channel * conv1_out_channel *
conv1_kernel_size * conv1_kernel_size);
hipMalloc((void**)&d_conv1_bias, sizeof(double) * conv1_out_channel);
hipMalloc((void**)&d_conv2_weight,
sizeof(double) * conv2_in_channel * conv2_out_channel *
conv2_kernel_size * conv2_kernel_size);
hipMalloc((void**)&d_conv2_bias, sizeof(double) * conv2_out_channel);
hipMalloc((void**)&d_fc1_weight,
sizeof(double) * fc1_in_channel * fc1_out_channel);
hipMalloc((void**)&d_fc1_bias, sizeof(double) * fc1_out_channel);
hipMalloc((void**)&d_fc2_weight,
sizeof(double) * fc2_in_channel * fc2_out_channel);
hipMalloc((void**)&d_fc2_bias, sizeof(double) * fc2_out_channel);
hipMalloc((void**)&d_fc3_weight,
sizeof(double) * fc3_in_channel * fc3_out_channel);
hipMalloc((void**)&d_fc3_bias, sizeof(double) * fc3_out_channel);
// Alloc Activations
hipMalloc((void**)&d_image,
sizeof(uint8_t) * batch * input_size * input_size * input_channel);
hipMalloc((void**)&d_input,
sizeof(double) * batch * input_channel * input_size * input_size);
hipMalloc((void**)&d_C1_feature_map,
sizeof(double) * batch * C1_channel * C1_size * C1_size);
hipMalloc((void**)&d_S2_feature_map,
sizeof(double) * batch * S2_channel * S2_size * S2_size);
hipMalloc((void**)&d_C3_feature_map,
sizeof(double) * batch * C3_channel * C3_size * C3_size);
hipMalloc((void**)&d_S4_feature_map,
sizeof(double) * batch * S4_channel * S4_size * S4_size);
hipMalloc((void**)&d_C5_layer, sizeof(double) * batch * C5_size);
hipMalloc((void**)&d_F6_layer, sizeof(double) * batch * F6_size);
hipMalloc((void**)&d_output, sizeof(double) * batch * output_size);
// Copy Parameters
hipMemcpy(d_conv1_weight, conv1_weight,
sizeof(double) * conv1_in_channel * conv1_out_channel *
conv1_kernel_size * conv1_kernel_size,
hipMemcpyHostToDevice);
hipMemcpy(d_conv1_bias, conv1_bias, sizeof(double) * conv1_out_channel,
hipMemcpyHostToDevice);
hipMemcpy(d_conv2_weight, conv2_weight,
sizeof(double) * conv2_in_channel * conv2_out_channel *
conv2_kernel_size * conv2_kernel_size,
hipMemcpyHostToDevice);
hipMemcpy(d_conv2_bias, conv2_bias, sizeof(double) * conv2_out_channel,
hipMemcpyHostToDevice);
hipMemcpy(d_fc1_weight, fc1_weight,
sizeof(double) * fc1_in_channel * fc1_out_channel,
hipMemcpyHostToDevice);
hipMemcpy(d_fc1_bias, fc1_bias, sizeof(double) * fc1_out_channel,
hipMemcpyHostToDevice);
hipMemcpy(d_fc2_weight, fc2_weight,
sizeof(double) * fc2_in_channel * fc2_out_channel,
hipMemcpyHostToDevice);
hipMemcpy(d_fc2_bias, fc2_bias, sizeof(double) * fc2_out_channel,
hipMemcpyHostToDevice);
hipMemcpy(d_fc3_weight, fc3_weight,
sizeof(double) * fc3_in_channel * fc3_out_channel,
hipMemcpyHostToDevice);
hipMemcpy(d_fc3_bias, fc3_bias, sizeof(double) * fc3_out_channel,
hipMemcpyHostToDevice);
// copy input image
size_t image_size = batch * input_size * input_size * input_channel;
hipMemcpy(d_image, image, image_size * sizeof(uint8_t),
hipMemcpyHostToDevice);
}
void LeNet5_cuda::classify(int* predict, int batch) {
// read logits back to cpu
hipMemcpy(output, d_output, sizeof(double) * output_size * batch,
hipMemcpyDeviceToHost);
// Softmax
softmax(output, predict, batch, output_size);
}
LeNet5_cuda::~LeNet5_cuda() {
hipFree(d_conv1_weight);
hipFree(d_conv2_weight);
hipFree(d_conv1_bias);
hipFree(d_conv2_bias);
hipFree(d_fc1_weight);
hipFree(d_fc2_weight);
hipFree(d_fc3_weight);
hipFree(d_fc1_bias);
hipFree(d_fc2_bias);
hipFree(d_fc3_bias);
hipFree(d_image);
hipFree(d_input);
hipFree(d_C1_feature_map);
hipFree(d_S2_feature_map);
hipFree(d_C3_feature_map);
hipFree(d_S4_feature_map);
hipFree(d_C5_layer);
hipFree(d_F6_layer);
hipFree(d_output);
hipFree(d_predict_cuda);
}
| 719d302877f300cb028888d06440960f723eaf7f.cu | #include "LeNet5_cuda.h"
void LeNet5_cuda::predict(int batch) {
// TODO: Implement conv1
// TODO: Implement relu
// TODO: Implement pool1
// TODO: Implement conv2
// TODO: Implement relu
// TODO: Implement pool2
// TODO: Implement fc1
// TODO: Implement relu
// TODO: Implement fc2
// TODO: Implement relu
// TODO: Implement fc3
/* NOTE: unless you want to make a major change to this class structure,
* you need to write your output to the device memory d_output
* so that classify() can handle the rest.
*/
}
void LeNet5_cuda::prepare_device_memory(uint8_t* image) {
// Alloc Model Parameters
cudaMalloc((void**)&d_conv1_weight,
sizeof(double) * conv1_in_channel * conv1_out_channel *
conv1_kernel_size * conv1_kernel_size);
cudaMalloc((void**)&d_conv1_bias, sizeof(double) * conv1_out_channel);
cudaMalloc((void**)&d_conv2_weight,
sizeof(double) * conv2_in_channel * conv2_out_channel *
conv2_kernel_size * conv2_kernel_size);
cudaMalloc((void**)&d_conv2_bias, sizeof(double) * conv2_out_channel);
cudaMalloc((void**)&d_fc1_weight,
sizeof(double) * fc1_in_channel * fc1_out_channel);
cudaMalloc((void**)&d_fc1_bias, sizeof(double) * fc1_out_channel);
cudaMalloc((void**)&d_fc2_weight,
sizeof(double) * fc2_in_channel * fc2_out_channel);
cudaMalloc((void**)&d_fc2_bias, sizeof(double) * fc2_out_channel);
cudaMalloc((void**)&d_fc3_weight,
sizeof(double) * fc3_in_channel * fc3_out_channel);
cudaMalloc((void**)&d_fc3_bias, sizeof(double) * fc3_out_channel);
// Alloc Activations
cudaMalloc((void**)&d_image,
sizeof(uint8_t) * batch * input_size * input_size * input_channel);
cudaMalloc((void**)&d_input,
sizeof(double) * batch * input_channel * input_size * input_size);
cudaMalloc((void**)&d_C1_feature_map,
sizeof(double) * batch * C1_channel * C1_size * C1_size);
cudaMalloc((void**)&d_S2_feature_map,
sizeof(double) * batch * S2_channel * S2_size * S2_size);
cudaMalloc((void**)&d_C3_feature_map,
sizeof(double) * batch * C3_channel * C3_size * C3_size);
cudaMalloc((void**)&d_S4_feature_map,
sizeof(double) * batch * S4_channel * S4_size * S4_size);
cudaMalloc((void**)&d_C5_layer, sizeof(double) * batch * C5_size);
cudaMalloc((void**)&d_F6_layer, sizeof(double) * batch * F6_size);
cudaMalloc((void**)&d_output, sizeof(double) * batch * output_size);
// Copy Parameters
cudaMemcpy(d_conv1_weight, conv1_weight,
sizeof(double) * conv1_in_channel * conv1_out_channel *
conv1_kernel_size * conv1_kernel_size,
cudaMemcpyHostToDevice);
cudaMemcpy(d_conv1_bias, conv1_bias, sizeof(double) * conv1_out_channel,
cudaMemcpyHostToDevice);
cudaMemcpy(d_conv2_weight, conv2_weight,
sizeof(double) * conv2_in_channel * conv2_out_channel *
conv2_kernel_size * conv2_kernel_size,
cudaMemcpyHostToDevice);
cudaMemcpy(d_conv2_bias, conv2_bias, sizeof(double) * conv2_out_channel,
cudaMemcpyHostToDevice);
cudaMemcpy(d_fc1_weight, fc1_weight,
sizeof(double) * fc1_in_channel * fc1_out_channel,
cudaMemcpyHostToDevice);
cudaMemcpy(d_fc1_bias, fc1_bias, sizeof(double) * fc1_out_channel,
cudaMemcpyHostToDevice);
cudaMemcpy(d_fc2_weight, fc2_weight,
sizeof(double) * fc2_in_channel * fc2_out_channel,
cudaMemcpyHostToDevice);
cudaMemcpy(d_fc2_bias, fc2_bias, sizeof(double) * fc2_out_channel,
cudaMemcpyHostToDevice);
cudaMemcpy(d_fc3_weight, fc3_weight,
sizeof(double) * fc3_in_channel * fc3_out_channel,
cudaMemcpyHostToDevice);
cudaMemcpy(d_fc3_bias, fc3_bias, sizeof(double) * fc3_out_channel,
cudaMemcpyHostToDevice);
// copy input image
size_t image_size = batch * input_size * input_size * input_channel;
cudaMemcpy(d_image, image, image_size * sizeof(uint8_t),
cudaMemcpyHostToDevice);
}
void LeNet5_cuda::classify(int* predict, int batch) {
// read logits back to cpu
cudaMemcpy(output, d_output, sizeof(double) * output_size * batch,
cudaMemcpyDeviceToHost);
// Softmax
softmax(output, predict, batch, output_size);
}
LeNet5_cuda::~LeNet5_cuda() {
cudaFree(d_conv1_weight);
cudaFree(d_conv2_weight);
cudaFree(d_conv1_bias);
cudaFree(d_conv2_bias);
cudaFree(d_fc1_weight);
cudaFree(d_fc2_weight);
cudaFree(d_fc3_weight);
cudaFree(d_fc1_bias);
cudaFree(d_fc2_bias);
cudaFree(d_fc3_bias);
cudaFree(d_image);
cudaFree(d_input);
cudaFree(d_C1_feature_map);
cudaFree(d_S2_feature_map);
cudaFree(d_C3_feature_map);
cudaFree(d_S4_feature_map);
cudaFree(d_C5_layer);
cudaFree(d_F6_layer);
cudaFree(d_output);
cudaFree(d_predict_cuda);
}
|
1cc19ddac20ec172d56111f6849c0ab6bddcde98.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/hip/HIPDeviceAssertion.h>
#include <c10/hip/HIPException.h>
#include <c10/hip/HIPFunctions.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
void did_not_fail_diagnostics() {
#ifdef TORCH_USE_CUDA_DSA
std::cerr << "DSA was enabled" << std::endl;
#else
std::cerr << "DSA was not enabled" << std::endl;
#endif
std::cerr
<< "c10::hip::CUDAKernelLaunchRegistry::get_singleton_ref().enabled = "
<< c10::hip::CUDAKernelLaunchRegistry::get_singleton_ref().enabled
<< std::endl;
std::cerr
<< "c10::hip::CUDAKernelLaunchRegistry::get_singleton_ref().is_enabled() = "
<< c10::hip::CUDAKernelLaunchRegistry::get_singleton_ref().is_enabled()
<< std::endl;
std::cerr
<< "c10::hip::CUDAKernelLaunchRegistry::get_singleton_ref().do_all_devices_support_managed_memory = "
<< c10::hip::CUDAKernelLaunchRegistry::get_singleton_ref()
.do_all_devices_support_managed_memory
<< std::endl;
}
/**
* Device kernel that takes a single integer parameter as argument and
* will always trigger a device side assertion.
*/
__global__ void cuda_always_fail_assertion_kernel(
const int a,
TORCH_DSA_KERNEL_ARGS) {
CUDA_KERNEL_ASSERT2(a != a);
}
/**
* TEST: Triggering device side assertion on a simple <<<1,1>>> config.
* kernel used takes only 1 variable as parameter function.
*/
void cuda_device_assertions_1_var_test() {
const auto stream = c10::hip::getStreamFromPoolMasqueradingAsCUDA();
TORCH_DSA_KERNEL_LAUNCH(
cuda_always_fail_assertion_kernel,
1, /* Blocks */
1, /* Threads */
0, /* Shared mem */
stream, /* Stream */
1);
try {
c10::hip::device_synchronize();
did_not_fail_diagnostics();
throw std::runtime_error("Test didn't fail, but should have.");
} catch (const c10::Error& err) {
const auto err_str = std::string(err.what());
ASSERT_THAT(
err_str,
HasSubstr("CUDA device-side assertion failures were found on GPU #0!"));
ASSERT_THAT(
err_str, HasSubstr("Thread ID that failed assertion = [0,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Block ID that failed assertion = [0,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Device that launched kernel = 0"));
ASSERT_THAT(
err_str,
HasSubstr(
"Name of kernel launched that led to failure = cuda_always_fail_assertion_kernel"));
ASSERT_THAT(
err_str, HasSubstr("File containing kernel launch = " __FILE__));
ASSERT_THAT(
err_str,
HasSubstr(
"Function containing kernel launch = " +
std::string(__FUNCTION__)));
ASSERT_THAT(
err_str,
HasSubstr(
"Stream kernel was launched on = " + std::to_string(stream.id())));
}
}
TEST(CUDATest, cuda_device_assertions_1_var_test) {
#ifdef TORCH_USE_CUDA_DSA
c10::hip::CUDAKernelLaunchRegistry::get_singleton_ref().enabled = true;
std::cerr << "BEFORE TEST" << std::endl;
did_not_fail_diagnostics();
cuda_device_assertions_1_var_test();
#else
GTEST_SKIP() << "CUDA device-side assertions (DSA) was not enabled.";
#endif
}
| 1cc19ddac20ec172d56111f6849c0ab6bddcde98.cu | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/cuda/CUDADeviceAssertion.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDAStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
void did_not_fail_diagnostics() {
#ifdef TORCH_USE_CUDA_DSA
std::cerr << "DSA was enabled" << std::endl;
#else
std::cerr << "DSA was not enabled" << std::endl;
#endif
std::cerr
<< "c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled = "
<< c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled
<< std::endl;
std::cerr
<< "c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().is_enabled() = "
<< c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().is_enabled()
<< std::endl;
std::cerr
<< "c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().do_all_devices_support_managed_memory = "
<< c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref()
.do_all_devices_support_managed_memory
<< std::endl;
}
/**
* Device kernel that takes a single integer parameter as argument and
* will always trigger a device side assertion.
*/
__global__ void cuda_always_fail_assertion_kernel(
const int a,
TORCH_DSA_KERNEL_ARGS) {
CUDA_KERNEL_ASSERT2(a != a);
}
/**
* TEST: Triggering device side assertion on a simple <<<1,1>>> config.
* kernel used takes only 1 variable as parameter function.
*/
void cuda_device_assertions_1_var_test() {
const auto stream = c10::cuda::getStreamFromPool();
TORCH_DSA_KERNEL_LAUNCH(
cuda_always_fail_assertion_kernel,
1, /* Blocks */
1, /* Threads */
0, /* Shared mem */
stream, /* Stream */
1);
try {
c10::cuda::device_synchronize();
did_not_fail_diagnostics();
throw std::runtime_error("Test didn't fail, but should have.");
} catch (const c10::Error& err) {
const auto err_str = std::string(err.what());
ASSERT_THAT(
err_str,
HasSubstr("CUDA device-side assertion failures were found on GPU #0!"));
ASSERT_THAT(
err_str, HasSubstr("Thread ID that failed assertion = [0,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Block ID that failed assertion = [0,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Device that launched kernel = 0"));
ASSERT_THAT(
err_str,
HasSubstr(
"Name of kernel launched that led to failure = cuda_always_fail_assertion_kernel"));
ASSERT_THAT(
err_str, HasSubstr("File containing kernel launch = " __FILE__));
ASSERT_THAT(
err_str,
HasSubstr(
"Function containing kernel launch = " +
std::string(__FUNCTION__)));
ASSERT_THAT(
err_str,
HasSubstr(
"Stream kernel was launched on = " + std::to_string(stream.id())));
}
}
TEST(CUDATest, cuda_device_assertions_1_var_test) {
#ifdef TORCH_USE_CUDA_DSA
c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled = true;
std::cerr << "BEFORE TEST" << std::endl;
did_not_fail_diagnostics();
cuda_device_assertions_1_var_test();
#else
GTEST_SKIP() << "CUDA device-side assertions (DSA) was not enabled.";
#endif
}
|
62840c6becd56fa1771bcb6e3c5a41037b77db99.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
#define blocksize 128
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
__global__ void kernScanHelper(int n, int pow2dMinus1, int *in, int *out)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
// Inclusive Scan
if (index >= pow2dMinus1) {
out[index] = in[index - pow2dMinus1] + in[index];
}
else {
out[index] = in[index];
}
}
__global__ void inclusiveToExclusive(int n, int *in, int *out)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
out[index] = index > 0 ? in[index - 1] : 0;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
dim3 fullBlocksPerGrid((n + blocksize - 1) / blocksize);
int *in;
int *out;
hipMalloc((void**)&in, n * sizeof(int));
hipMalloc((void**)&out, n * sizeof(int));
hipMemcpy(in, idata, sizeof(int) * n, hipMemcpyHostToDevice);
timer().startGpuTimer();
// TODO
for (int d = 1; d <= ilog2ceil(n); d++) {
int pow2dMinus1 = pow(2, d - 1);
kernScanHelper << <fullBlocksPerGrid, blocksize >> > (n, pow2dMinus1, in, out);
std::swap(in, out);
}
// Shift to the right
inclusiveToExclusive << < fullBlocksPerGrid, blocksize >> > (n, in, out);
timer().endGpuTimer();
// Copy final values into odata
hipMemcpy(odata, out, sizeof(int) * n, hipMemcpyDeviceToHost);
hipFree(in);
hipFree(out);
}
}
}
| 62840c6becd56fa1771bcb6e3c5a41037b77db99.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
#define blocksize 128
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
__global__ void kernScanHelper(int n, int pow2dMinus1, int *in, int *out)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
// Inclusive Scan
if (index >= pow2dMinus1) {
out[index] = in[index - pow2dMinus1] + in[index];
}
else {
out[index] = in[index];
}
}
__global__ void inclusiveToExclusive(int n, int *in, int *out)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
out[index] = index > 0 ? in[index - 1] : 0;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
dim3 fullBlocksPerGrid((n + blocksize - 1) / blocksize);
int *in;
int *out;
cudaMalloc((void**)&in, n * sizeof(int));
cudaMalloc((void**)&out, n * sizeof(int));
cudaMemcpy(in, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
timer().startGpuTimer();
// TODO
for (int d = 1; d <= ilog2ceil(n); d++) {
int pow2dMinus1 = pow(2, d - 1);
kernScanHelper << <fullBlocksPerGrid, blocksize >> > (n, pow2dMinus1, in, out);
std::swap(in, out);
}
// Shift to the right
inclusiveToExclusive << < fullBlocksPerGrid, blocksize >> > (n, in, out);
timer().endGpuTimer();
// Copy final values into odata
cudaMemcpy(odata, out, sizeof(int) * n, cudaMemcpyDeviceToHost);
cudaFree(in);
cudaFree(out);
}
}
}
|
f085b4849cfaf12441e7d4b635aeecd8e15bc839.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels.h"
#include "linalg.h"
#include "state.cuh"
#include <cstdio>
#include <vector>
// Gather data from SOA
// Ensure coalesced global memory access
// Do not consider sorting for now. Use atomics instead.
// One particle per thread
template <int dim>
__global__ void P2G(TState<dim> state) {
// constexpr int scratch_size = 8;
//__shared__ real scratch[dim + 1][scratch_size][scratch_size][scratch_size];
int part_id = blockIdx.x * blockDim.x + threadIdx.x;
if (part_id >= state.num_particles) {
return;
}
real dt = state.dt;
auto x = state.get_x(part_id);
for (int i = 0; i < dim; i++) {
x[i] = max(2 * state.dx, min(x[i], (state.res[i] - 2) * state.dx));
}
state.set_x(part_id, x);
auto v = state.get_v(part_id);
auto F = state.get_F(part_id);
auto C = state.get_C(part_id);
TransferCommon<dim> tc(state, x);
auto A = state.get_A(part_id);
// Fixed corotated
auto P = PK1(state.mu, state.lambda, F) + F * A;
state.set_P(part_id, P);
auto stress = -state.invD * dt * state.V_p * P * transposed(F);
auto affine =
real(mpm_enalbe_force) * stress + real(mpm_enalbe_apic) * state.m_p * C;
#pragma unroll
for (int i = 0; i < kernel_volume<dim>(); i++) {
auto dpos = tc.dpos(i);
real contrib[dim + 1];
auto tmp = affine * dpos + state.m_p * v;
auto w = tc.w(i);
for (int d = 0; d < dim; d++) {
contrib[d] = tmp[d] * w;
}
contrib[dim] = state.m_p * w;
auto node = state.grid_node(tc.base_coord + offset_from_scalar<dim>(i));
for (int p = 0; p < dim + 1; p++) {
atomicAdd(&node[p], contrib[p]);
}
}
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
// printf("forward A[%d][%d] %f\n", i, j, A[i][j]);
}
}
}
template <int dim>
__global__ void grid_forward(TState<dim> state) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
using Vector = TVector<real, dim>;
if (id < state.num_cells) {
auto node = state.grid_node(id);
auto v_i = Vector(node);
if (node[dim] > 0) {
real inv_m = 1.0f / node[dim];
v_i = inv_m * v_i;
auto grid_backup = state.grid_star_node(id);
for (int i = 0; i < dim; i++) {
grid_backup[i] = v_i[i];
}
for (int i = 0; i < dim; i++) {
v_i[i] += state.gravity[i] * state.dt;
}
auto bc = state.grid_node_bc(id);
auto normal = Vector(bc);
real coeff = bc[dim];
if (coeff == -1) {
v_i = Vector(0.0f);
} else if (normal.length2() > 0) {
auto lin = v_i.dot(normal);
auto vit = v_i - lin * normal;
auto lit = sqrt(vit.length2() + 1e-7);
auto vithat = (1 / lit) * vit;
auto litstar = max(lit + coeff * min(lin, 0.0f), 0.0f);
auto vistar = litstar * vithat + max(lin, 0.0f) * normal;
v_i = vistar;
}
for (int i = 0; i < dim; i++) {
node[i] = v_i[i];
}
}
}
}
template <int dim>
__global__ void G2P(TState<dim> state, TState<dim> next_state) {
int part_id = blockIdx.x * blockDim.x + threadIdx.x;
if (part_id >= state.num_particles) {
return;
}
real dt = state.dt;
auto x = state.get_x(part_id);
typename TState<dim>::Vector v;
auto F = state.get_F(part_id);
typename TState<dim>::Matrix C;
TransferCommon<dim> tc(state, x);
for (int i = 0; i < kernel_volume<dim>(); i++) {
auto dpos = tc.dpos(i);
auto node = state.grid_node(tc.base_coord + offset_from_scalar<dim>(i));
auto node_v = TState<dim>::Vector(node);
auto w = tc.w(i);
v = v + w * node_v;
C = C + TState<dim>::Matrix::outer_product(w * node_v, state.invD * dpos);
}
next_state.set_x(part_id, x + state.dt * v);
next_state.set_v(part_id, v);
next_state.set_F(part_id, (typename TState<dim>::Matrix(1) + dt * C) * F);
next_state.set_C(part_id, C);
}
template <int dim>
void advance(TState<dim> &state, TState<dim> &new_state) {
hipMemset(state.grid_storage, 0,
state.num_cells * (state.dim + 1) * sizeof(real));
int num_blocks =
(state.num_particles + particle_block_dim - 1) / particle_block_dim;
hipLaunchKernelGGL(( P2G<dim>), dim3(num_blocks), dim3(particle_block_dim), 0, 0, state);
hipLaunchKernelGGL(( grid_forward<dim>), dim3((state.grid_size() + grid_block_dim - 1) / grid_block_dim),
dim3(grid_block_dim), 0, 0, state);
hipLaunchKernelGGL(( G2P<dim>), dim3(num_blocks), dim3(particle_block_dim), 0, 0, state, new_state);
auto err = hipDeviceSynchronize();
if (err) {
printf("Launch: %s\n", hipGetErrorString(err));
exit(-1);
}
}
__global__ void inc(int n, const real *inx, real *outx) {
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < n) {
outx[id] = inx[id] + 1;
}
}
void IncKernelLauncher(const real *inx,
real *outx) {
int n = 256 * 256 * 1024;
int block_size = 1024;
hipLaunchKernelGGL(( inc), dim3(n / block_size), dim3(block_size), 0, 0, n, inx, outx);
}
// compability
void MPMKernelLauncher(int dim_,
int *res,
int num_particles,
real dx,
real dt,
real E,
real nu,
real m_p,
real V_p,
real *gravity,
const real *inx,
const real *inv,
const real *inF,
const real *inC,
const real *inA,
const real *ingrid,
real *outx,
real *outv,
real *outF,
real *outC,
real *outP,
real *outgrid,
real *outgrid_star) {
if (dim_ == 3) {
constexpr int dim = 3;
auto instate = new TState<dim>(res, num_particles, dx, dt, gravity,
(real *)inx, (real *)inv, (real *)inF,
(real *)inC, (real *)inA, outP, outgrid);
instate->grid_bc = const_cast<real *>(ingrid);
instate->grid_star_storage = outgrid_star;
instate->set(V_p, m_p, E, nu);
auto outstate =
new TState<dim>(res, num_particles, dx, dt, gravity, outx, outv, outF,
outC, nullptr, nullptr, nullptr);
outstate->set(V_p, m_p, E, nu);
advance<dim>(*instate, *outstate);
} else {
constexpr int dim = 2;
auto instate = new TState<dim>(res, num_particles, dx, dt, gravity,
(real *)inx, (real *)inv, (real *)inF,
(real *)inC, (real *)inA, outP, outgrid);
instate->grid_bc = const_cast<real *>(ingrid);
instate->grid_star_storage = outgrid_star;
instate->set(V_p, m_p, E, nu);
auto outstate =
new TState<dim>(res, num_particles, dx, dt, gravity, outx, outv, outF,
outC, nullptr, nullptr, nullptr);
outstate->set(V_p, m_p, E, nu);
advance<dim>(*instate, *outstate);
}
}
void P2GKernelLauncher(int dim_,
int *res,
int num_particles,
real dx,
real dt,
real E,
real nu,
real m_p,
real V_p,
real *gravity,
const real *inx,
const real *inv,
const real *inF,
const real *inC,
const real *inA,
real *outP,
real *outgrid) {
if (dim_ == 3) {
constexpr int dim = 3;
auto state = new TState<dim>(res, num_particles, dx, dt, gravity,
(real *)inx, (real *)inv, (real *)inF,
(real *)inC, (real *)inA, outP, outgrid);
int num_blocks =
(num_particles + particle_block_dim - 1) / particle_block_dim;
hipMemset(outgrid, 0, state->num_cells * (dim + 1) * sizeof(real));
state->set(V_p, m_p, E, nu);
hipLaunchKernelGGL(( P2G<dim>), dim3(num_blocks), dim3(particle_block_dim), 0, 0, *state);
} else {
constexpr int dim = 2;
auto state = new TState<dim>(res, num_particles, dx, dt, gravity,
(real *)inx, (real *)inv, (real *)inF,
(real *)inC, (real *)inA, outP, outgrid);
int num_blocks =
(num_particles + particle_block_dim - 1) / particle_block_dim;
hipMemset(outgrid, 0, state->num_cells * (dim + 1) * sizeof(real));
state->set(V_p, m_p, E, nu);
hipLaunchKernelGGL(( P2G<dim>), dim3(num_blocks), dim3(particle_block_dim), 0, 0, *state);
}
}
/*
void G2PKernelLauncher(int dim_,
int *res,
int num_particles,
real dx,
real dt,
real E,
real nu,
real m_p,
real V_p,
real *gravity,
const real *inx,
const real *inv,
const real *inF,
const real *inC,
const real *inP,
const real *ingrid,
real *outx,
real *outv,
real *outF,
real *outC) {
auto instate = new TState<dim>(res, num_particles, dx, dt, gravity,
(real *)inx, (real *)inv, (real *)inF,
(real *)inC, (real *)inP, (real *)ingrid);
auto outstate = new TState<dim>(res, num_particles, dx, dt, gravity, outx,
outv, outF, outC, nullptr, nullptr);
int num_blocks =
(num_particles + particle_block_dim - 1) / particle_block_dim;
hipLaunchKernelGGL(( G2P<dim>), dim3(num_blocks), dim3(particle_block_dim), 0, 0, *instate, *outstate);
}
*/
template <int dim>
void initialize_mpm_state(int *res,
int num_particles,
float *gravity,
void *&state_,
float dx,
float dt,
float *initial_positions) {
// State(int res[dim], int num_particles, real dx, real dt, real
auto state = new TState<dim>(res, num_particles, dx, dt, gravity);
state_ = state;
hipMemcpy(state->x_storage, initial_positions,
sizeof(TVector<real, dim>) * num_particles,
hipMemcpyHostToDevice);
}
template <int dim>
void forward_mpm_state(void *state_, void *new_state_) {
auto *state = reinterpret_cast<TState<dim> *>(state_);
auto *new_state = reinterpret_cast<TState<dim> *>(new_state_);
advance<dim>(*state, *new_state);
}
template void initialize_mpm_state<2>(int *res,
int num_particles,
float *gravity,
void *&state_,
float dx,
float dt,
float *initial_positions);
template void initialize_mpm_state<3>(int *res,
int num_particles,
float *gravity,
void *&state_,
float dx,
float dt,
float *initial_positions);
template void forward_mpm_state<2>(void *, void *);
template void forward_mpm_state<3>(void *, void *);
/*
constexpr int dim = 3;
void P2GKernelLauncher(int res[dim],
int num_particles,
real dx,
real dt,
real gravity[dim],
const real *inx,
const real *inv,
const real *inF,
const real *inC,
real *outP,
real *outgrid) {
auto state =
new TState<dim>(res, num_particles, dx, dt, gravity, (real *)inx,
(real *)inv, (real *)inF, (real *)inC, outP, outgrid);
hipMemset(outgrid, 0, state->num_cells * (dim + 1) * sizeof(real));
int num_blocks =
(num_particles + particle_block_dim - 1) / particle_block_dim;
hipLaunchKernelGGL(( P2G<dim>), dim3(num_blocks), dim3(particle_block_dim), 0, 0, *state);
}
void G2PKernelLauncher(int res[dim],
int num_particles,
real dx,
real dt,
real gravity[dim],
const real *inx,
const real *inv,
const real *inF,
const real *inC,
const real *inP,
const real *ingrid,
real *outx,
real *outv,
real *outF,
real *outC) {
auto instate = new TState<dim>(res, num_particles, dx, dt, gravity,
(real *)inx, (real *)inv, (real *)inF,
(real *)inC, (real *)inP, (real *)ingrid);
auto outstate = new TState<dim>(res, num_particles, dx, dt, gravity, outx,
outv, outF, outC, nullptr, nullptr);
int num_blocks =
(num_particles + particle_block_dim - 1) / particle_block_dim;
hipLaunchKernelGGL(( G2P<dim>), dim3(num_blocks), dim3(particle_block_dim), 0, 0, *instate, *outstate);
}
*/
| f085b4849cfaf12441e7d4b635aeecd8e15bc839.cu | #include "kernels.h"
#include "linalg.h"
#include "state.cuh"
#include <cstdio>
#include <vector>
// Gather data from SOA
// Ensure coalesced global memory access
// Do not consider sorting for now. Use atomics instead.
// One particle per thread
template <int dim>
__global__ void P2G(TState<dim> state) {
// constexpr int scratch_size = 8;
//__shared__ real scratch[dim + 1][scratch_size][scratch_size][scratch_size];
int part_id = blockIdx.x * blockDim.x + threadIdx.x;
if (part_id >= state.num_particles) {
return;
}
real dt = state.dt;
auto x = state.get_x(part_id);
for (int i = 0; i < dim; i++) {
x[i] = max(2 * state.dx, min(x[i], (state.res[i] - 2) * state.dx));
}
state.set_x(part_id, x);
auto v = state.get_v(part_id);
auto F = state.get_F(part_id);
auto C = state.get_C(part_id);
TransferCommon<dim> tc(state, x);
auto A = state.get_A(part_id);
// Fixed corotated
auto P = PK1(state.mu, state.lambda, F) + F * A;
state.set_P(part_id, P);
auto stress = -state.invD * dt * state.V_p * P * transposed(F);
auto affine =
real(mpm_enalbe_force) * stress + real(mpm_enalbe_apic) * state.m_p * C;
#pragma unroll
for (int i = 0; i < kernel_volume<dim>(); i++) {
auto dpos = tc.dpos(i);
real contrib[dim + 1];
auto tmp = affine * dpos + state.m_p * v;
auto w = tc.w(i);
for (int d = 0; d < dim; d++) {
contrib[d] = tmp[d] * w;
}
contrib[dim] = state.m_p * w;
auto node = state.grid_node(tc.base_coord + offset_from_scalar<dim>(i));
for (int p = 0; p < dim + 1; p++) {
atomicAdd(&node[p], contrib[p]);
}
}
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
// printf("forward A[%d][%d] %f\n", i, j, A[i][j]);
}
}
}
template <int dim>
__global__ void grid_forward(TState<dim> state) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
using Vector = TVector<real, dim>;
if (id < state.num_cells) {
auto node = state.grid_node(id);
auto v_i = Vector(node);
if (node[dim] > 0) {
real inv_m = 1.0f / node[dim];
v_i = inv_m * v_i;
auto grid_backup = state.grid_star_node(id);
for (int i = 0; i < dim; i++) {
grid_backup[i] = v_i[i];
}
for (int i = 0; i < dim; i++) {
v_i[i] += state.gravity[i] * state.dt;
}
auto bc = state.grid_node_bc(id);
auto normal = Vector(bc);
real coeff = bc[dim];
if (coeff == -1) {
v_i = Vector(0.0f);
} else if (normal.length2() > 0) {
auto lin = v_i.dot(normal);
auto vit = v_i - lin * normal;
auto lit = sqrt(vit.length2() + 1e-7);
auto vithat = (1 / lit) * vit;
auto litstar = max(lit + coeff * min(lin, 0.0f), 0.0f);
auto vistar = litstar * vithat + max(lin, 0.0f) * normal;
v_i = vistar;
}
for (int i = 0; i < dim; i++) {
node[i] = v_i[i];
}
}
}
}
template <int dim>
__global__ void G2P(TState<dim> state, TState<dim> next_state) {
int part_id = blockIdx.x * blockDim.x + threadIdx.x;
if (part_id >= state.num_particles) {
return;
}
real dt = state.dt;
auto x = state.get_x(part_id);
typename TState<dim>::Vector v;
auto F = state.get_F(part_id);
typename TState<dim>::Matrix C;
TransferCommon<dim> tc(state, x);
for (int i = 0; i < kernel_volume<dim>(); i++) {
auto dpos = tc.dpos(i);
auto node = state.grid_node(tc.base_coord + offset_from_scalar<dim>(i));
auto node_v = TState<dim>::Vector(node);
auto w = tc.w(i);
v = v + w * node_v;
C = C + TState<dim>::Matrix::outer_product(w * node_v, state.invD * dpos);
}
next_state.set_x(part_id, x + state.dt * v);
next_state.set_v(part_id, v);
next_state.set_F(part_id, (typename TState<dim>::Matrix(1) + dt * C) * F);
next_state.set_C(part_id, C);
}
template <int dim>
void advance(TState<dim> &state, TState<dim> &new_state) {
cudaMemset(state.grid_storage, 0,
state.num_cells * (state.dim + 1) * sizeof(real));
int num_blocks =
(state.num_particles + particle_block_dim - 1) / particle_block_dim;
P2G<dim><<<num_blocks, particle_block_dim>>>(state);
grid_forward<dim><<<(state.grid_size() + grid_block_dim - 1) / grid_block_dim,
grid_block_dim>>>(state);
G2P<dim><<<num_blocks, particle_block_dim>>>(state, new_state);
auto err = cudaThreadSynchronize();
if (err) {
printf("Launch: %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void inc(int n, const real *inx, real *outx) {
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < n) {
outx[id] = inx[id] + 1;
}
}
void IncKernelLauncher(const real *inx,
real *outx) {
int n = 256 * 256 * 1024;
int block_size = 1024;
inc<<<n / block_size, block_size>>>(n, inx, outx);
}
// compability
void MPMKernelLauncher(int dim_,
int *res,
int num_particles,
real dx,
real dt,
real E,
real nu,
real m_p,
real V_p,
real *gravity,
const real *inx,
const real *inv,
const real *inF,
const real *inC,
const real *inA,
const real *ingrid,
real *outx,
real *outv,
real *outF,
real *outC,
real *outP,
real *outgrid,
real *outgrid_star) {
if (dim_ == 3) {
constexpr int dim = 3;
auto instate = new TState<dim>(res, num_particles, dx, dt, gravity,
(real *)inx, (real *)inv, (real *)inF,
(real *)inC, (real *)inA, outP, outgrid);
instate->grid_bc = const_cast<real *>(ingrid);
instate->grid_star_storage = outgrid_star;
instate->set(V_p, m_p, E, nu);
auto outstate =
new TState<dim>(res, num_particles, dx, dt, gravity, outx, outv, outF,
outC, nullptr, nullptr, nullptr);
outstate->set(V_p, m_p, E, nu);
advance<dim>(*instate, *outstate);
} else {
constexpr int dim = 2;
auto instate = new TState<dim>(res, num_particles, dx, dt, gravity,
(real *)inx, (real *)inv, (real *)inF,
(real *)inC, (real *)inA, outP, outgrid);
instate->grid_bc = const_cast<real *>(ingrid);
instate->grid_star_storage = outgrid_star;
instate->set(V_p, m_p, E, nu);
auto outstate =
new TState<dim>(res, num_particles, dx, dt, gravity, outx, outv, outF,
outC, nullptr, nullptr, nullptr);
outstate->set(V_p, m_p, E, nu);
advance<dim>(*instate, *outstate);
}
}
void P2GKernelLauncher(int dim_,
int *res,
int num_particles,
real dx,
real dt,
real E,
real nu,
real m_p,
real V_p,
real *gravity,
const real *inx,
const real *inv,
const real *inF,
const real *inC,
const real *inA,
real *outP,
real *outgrid) {
if (dim_ == 3) {
constexpr int dim = 3;
auto state = new TState<dim>(res, num_particles, dx, dt, gravity,
(real *)inx, (real *)inv, (real *)inF,
(real *)inC, (real *)inA, outP, outgrid);
int num_blocks =
(num_particles + particle_block_dim - 1) / particle_block_dim;
cudaMemset(outgrid, 0, state->num_cells * (dim + 1) * sizeof(real));
state->set(V_p, m_p, E, nu);
P2G<dim><<<num_blocks, particle_block_dim>>>(*state);
} else {
constexpr int dim = 2;
auto state = new TState<dim>(res, num_particles, dx, dt, gravity,
(real *)inx, (real *)inv, (real *)inF,
(real *)inC, (real *)inA, outP, outgrid);
int num_blocks =
(num_particles + particle_block_dim - 1) / particle_block_dim;
cudaMemset(outgrid, 0, state->num_cells * (dim + 1) * sizeof(real));
state->set(V_p, m_p, E, nu);
P2G<dim><<<num_blocks, particle_block_dim>>>(*state);
}
}
/*
void G2PKernelLauncher(int dim_,
int *res,
int num_particles,
real dx,
real dt,
real E,
real nu,
real m_p,
real V_p,
real *gravity,
const real *inx,
const real *inv,
const real *inF,
const real *inC,
const real *inP,
const real *ingrid,
real *outx,
real *outv,
real *outF,
real *outC) {
auto instate = new TState<dim>(res, num_particles, dx, dt, gravity,
(real *)inx, (real *)inv, (real *)inF,
(real *)inC, (real *)inP, (real *)ingrid);
auto outstate = new TState<dim>(res, num_particles, dx, dt, gravity, outx,
outv, outF, outC, nullptr, nullptr);
int num_blocks =
(num_particles + particle_block_dim - 1) / particle_block_dim;
G2P<dim><<<num_blocks, particle_block_dim>>>(*instate, *outstate);
}
*/
template <int dim>
void initialize_mpm_state(int *res,
int num_particles,
float *gravity,
void *&state_,
float dx,
float dt,
float *initial_positions) {
// State(int res[dim], int num_particles, real dx, real dt, real
auto state = new TState<dim>(res, num_particles, dx, dt, gravity);
state_ = state;
cudaMemcpy(state->x_storage, initial_positions,
sizeof(TVector<real, dim>) * num_particles,
cudaMemcpyHostToDevice);
}
template <int dim>
void forward_mpm_state(void *state_, void *new_state_) {
auto *state = reinterpret_cast<TState<dim> *>(state_);
auto *new_state = reinterpret_cast<TState<dim> *>(new_state_);
advance<dim>(*state, *new_state);
}
template void initialize_mpm_state<2>(int *res,
int num_particles,
float *gravity,
void *&state_,
float dx,
float dt,
float *initial_positions);
template void initialize_mpm_state<3>(int *res,
int num_particles,
float *gravity,
void *&state_,
float dx,
float dt,
float *initial_positions);
template void forward_mpm_state<2>(void *, void *);
template void forward_mpm_state<3>(void *, void *);
/*
constexpr int dim = 3;
void P2GKernelLauncher(int res[dim],
int num_particles,
real dx,
real dt,
real gravity[dim],
const real *inx,
const real *inv,
const real *inF,
const real *inC,
real *outP,
real *outgrid) {
auto state =
new TState<dim>(res, num_particles, dx, dt, gravity, (real *)inx,
(real *)inv, (real *)inF, (real *)inC, outP, outgrid);
cudaMemset(outgrid, 0, state->num_cells * (dim + 1) * sizeof(real));
int num_blocks =
(num_particles + particle_block_dim - 1) / particle_block_dim;
P2G<dim><<<num_blocks, particle_block_dim>>>(*state);
}
void G2PKernelLauncher(int res[dim],
int num_particles,
real dx,
real dt,
real gravity[dim],
const real *inx,
const real *inv,
const real *inF,
const real *inC,
const real *inP,
const real *ingrid,
real *outx,
real *outv,
real *outF,
real *outC) {
auto instate = new TState<dim>(res, num_particles, dx, dt, gravity,
(real *)inx, (real *)inv, (real *)inF,
(real *)inC, (real *)inP, (real *)ingrid);
auto outstate = new TState<dim>(res, num_particles, dx, dt, gravity, outx,
outv, outF, outC, nullptr, nullptr);
int num_blocks =
(num_particles + particle_block_dim - 1) / particle_block_dim;
G2P<dim><<<num_blocks, particle_block_dim>>>(*instate, *outstate);
}
*/
|
689dfaf568677af47981514a2290ed063c4de78a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Reduce the derivatives computed in the N^2 energy kernel, and compute all per-particle energy terms.
*/
extern "C" __global__ void computePerParticleEnergy(long long* __restrict__ forceBuffers, real* __restrict__ energyBuffer, const real4* __restrict__ posq
PARAMETER_ARGUMENTS) {
real energy = 0;
for (unsigned int index = blockIdx.x*blockDim.x+threadIdx.x; index < NUM_ATOMS; index += blockDim.x*gridDim.x) {
// Load the derivatives
LOAD_DERIVATIVES
// Now calculate the per-particle energy terms.
real4 pos = posq[index];
real3 force = make_real3(0, 0, 0);
COMPUTE_ENERGY
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
}
| 689dfaf568677af47981514a2290ed063c4de78a.cu | /**
* Reduce the derivatives computed in the N^2 energy kernel, and compute all per-particle energy terms.
*/
extern "C" __global__ void computePerParticleEnergy(long long* __restrict__ forceBuffers, real* __restrict__ energyBuffer, const real4* __restrict__ posq
PARAMETER_ARGUMENTS) {
real energy = 0;
for (unsigned int index = blockIdx.x*blockDim.x+threadIdx.x; index < NUM_ATOMS; index += blockDim.x*gridDim.x) {
// Load the derivatives
LOAD_DERIVATIVES
// Now calculate the per-particle energy terms.
real4 pos = posq[index];
real3 force = make_real3(0, 0, 0);
COMPUTE_ENERGY
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
}
|
67957db5e72491f92410e78afa8211525eec93e7.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <dlfcn.h>
#include <fcntl.h>
#include <gelf.h>
#include <map>
#include <setjmp.h>
#include <string>
#include <string.h>
#include <sstream>
#include <vector>
#include <unistd.h>
#define ASYNCIO_BUFFER_LENGTH (1024 * 1024 * 16)
#define ASYNCIO_DEFAULT_UNIT -1
#define ASYNCIO_DEFAULT_FORMAT -1
#define ASYNCIO_UNFORMATTED -2
using namespace std;
enum kind_t
{
TRANSACTION_TYPE_UNKNOWN,
TRANSACTION_TYPE_READ,
TRANSACTION_TYPE_WRITE
};
struct transaction_t
{
int unit;
int format;
void* func;
int nitems;
int offset;
char* buffer;
int* iostat;
kind_t kind;
};
enum type_t
{
READ_INT,
READ_INT_1D,
READ_INT_2D,
READ_LONG_LONG,
READ_FLOAT,
READ_FLOAT_1D,
READ_DOUBLE,
READ_DOUBLE_1D,
READ_DOUBLE_2D,
READ_DOUBLE_3D,
READ_DOUBLE_4D,
READ_BOOLEAN,
READ_BOOLEAN_1D,
READ_CHAR,
READ_CHAR_1D,
READ_CHAR_2D,
WRITE_INT,
WRITE_INT_1D,
WRITE_INT_2D,
WRITE_LONG_LONG,
WRITE_FLOAT,
WRITE_FLOAT_1D,
WRITE_DOUBLE,
WRITE_DOUBLE_1D,
WRITE_DOUBLE_2D,
WRITE_DOUBLE_3D,
WRITE_DOUBLE_4D,
WRITE_BOOLEAN,
WRITE_BOOLEAN_1D,
WRITE_CHAR,
WRITE_CHAR_1D,
WRITE_CHAR_2D
};
#ifdef __HIPCC__
#define DEVICE __device__
#define NAMESPACE gpu
#else
#define DEVICE
#define NAMESPACE cpu
#define trap() exit(1)
#endif
namespace NAMESPACE
{
DEVICE bool asyncio_error = false;
DEVICE char asyncio_buffer[ASYNCIO_BUFFER_LENGTH];
DEVICE size_t asyncio_buffer_length = 0;
DEVICE char* asyncio_pbuffer = NULL;
DEVICE transaction_t* t_curr = NULL;
DEVICE int t_curr_nitems = 0;
}
using namespace NAMESPACE;
// On GPU all I/O routines work with thread 0 only.
extern "C" DEVICE void asyncio_begin_default_unit_default_format_c(char kind, char unit, char format, int* iostat)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (t_curr)
{
printf("ASYNCIO ERROR: Previous transaction has not been closed correctly\n");
asyncio_error = true;
trap();
}
if (unit != '*')
{
printf("ASYNCIO ERROR: Invalid unit specifier: %c\n", unit);
asyncio_error = true;
trap();
}
if (format != '*')
{
printf("ASYNCIO ERROR: Invalid format specifier: %c\n", format);
asyncio_error = true;
trap();
}
if (!asyncio_pbuffer) asyncio_pbuffer = asyncio_buffer;
transaction_t t;
t.kind = TRANSACTION_TYPE_UNKNOWN;
if (kind == 'r')
t.kind = TRANSACTION_TYPE_READ;
if (kind == 'w')
t.kind = TRANSACTION_TYPE_WRITE;
t.unit = ASYNCIO_DEFAULT_UNIT;
t.format = ASYNCIO_DEFAULT_FORMAT;
#ifdef __HIPCC__
*iostat = 0;
t.iostat = NULL;
#else
t.iostat = iostat;
#endif
t_curr_nitems = 0;
memcpy(asyncio_pbuffer, &t, sizeof(transaction_t));
t_curr = (transaction_t*)asyncio_pbuffer;
asyncio_pbuffer += sizeof(transaction_t);
}
extern "C" DEVICE void asyncio_begin_unit_default_format_c(char kind, int unit, char format, int* iostat)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (t_curr)
{
printf("ASYNCIO ERROR: Previous transaction has not been closed correctly\n");
asyncio_error = true;
trap();
}
if (format != '*')
{
printf("ASYNCIO ERROR: Invalid format specifier: %c\n", format);
asyncio_error = true;
trap();
}
if (!asyncio_pbuffer) asyncio_pbuffer = asyncio_buffer;
transaction_t t;
t.kind = TRANSACTION_TYPE_UNKNOWN;
if (kind == 'r')
t.kind = TRANSACTION_TYPE_READ;
if (kind == 'w')
t.kind = TRANSACTION_TYPE_WRITE;
t.unit = unit;
t.format = ASYNCIO_DEFAULT_FORMAT;
#ifdef __HIPCC__
*iostat = 0;
t.iostat = NULL;
#else
t.iostat = iostat;
#endif
t_curr_nitems = 0;
memcpy(asyncio_pbuffer, &t, sizeof(transaction_t));
t_curr = (transaction_t*)asyncio_pbuffer;
asyncio_pbuffer += sizeof(transaction_t);
}
extern "C" DEVICE void asyncio_begin_unit_unformatted_c(char kind, int unit, int* iostat)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (t_curr)
{
printf("ASYNCIO ERROR: Previous transaction has not been closed correctly\n");
asyncio_error = true;
trap();
}
if (!asyncio_pbuffer) asyncio_pbuffer = asyncio_buffer;
transaction_t t;
t.kind = TRANSACTION_TYPE_UNKNOWN;
if (kind == 'r')
t.kind = TRANSACTION_TYPE_READ;
if (kind == 'w')
t.kind = TRANSACTION_TYPE_WRITE;
t.unit = unit;
t.format = ASYNCIO_UNFORMATTED;
#ifdef __HIPCC__
*iostat = 0;
t.iostat = NULL;
#else
t.iostat = iostat;
#endif
t_curr_nitems = 0;
memcpy(asyncio_pbuffer, &t, sizeof(transaction_t));
t_curr = (transaction_t*)asyncio_pbuffer;
asyncio_pbuffer += sizeof(transaction_t);
}
extern "C" DEVICE void asyncio_begin_default_unit_formatted_c(char kind, char unit, void* func, int format, int* iostat)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (t_curr)
{
printf("ASYNCIO ERROR: Previous transaction has not been closed correctly\n");
asyncio_error = true;
trap();
}
if (unit != '*')
{
printf("ASYNCIO ERROR: Invalid unit specifier: %c\n", unit);
asyncio_error = true;
trap();
}
if (!asyncio_pbuffer) asyncio_pbuffer = asyncio_buffer;
transaction_t t;
t.kind = TRANSACTION_TYPE_UNKNOWN;
if (kind == 'r')
t.kind = TRANSACTION_TYPE_READ;
if (kind == 'w')
t.kind = TRANSACTION_TYPE_WRITE;
t.unit = ASYNCIO_DEFAULT_UNIT;
t.format = format;
t.func = func;
#ifdef __HIPCC__
*iostat = 0;
t.iostat = NULL;
#else
t.iostat = iostat;
#endif
t_curr_nitems = 0;
memcpy(asyncio_pbuffer, &t, sizeof(transaction_t));
t_curr = (transaction_t*)asyncio_pbuffer;
asyncio_pbuffer += sizeof(transaction_t);
}
extern "C" DEVICE void asyncio_begin_unit_formatted_c(char kind, int unit, void* func, int format, int* iostat)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (t_curr)
{
printf("ASYNCIO ERROR: Previous transaction has not been closed correctly\n");
asyncio_error = true;
trap();
}
if (!asyncio_pbuffer) asyncio_pbuffer = asyncio_buffer;
transaction_t t;
t.kind = TRANSACTION_TYPE_UNKNOWN;
if (kind == 'r')
t.kind = TRANSACTION_TYPE_READ;
if (kind == 'w')
t.kind = TRANSACTION_TYPE_WRITE;
t.unit = unit;
t.format = format;
t.func = func;
#ifdef __HIPCC__
*iostat = 0;
t.iostat = NULL;
#else
t.iostat = iostat;
#endif
t_curr_nitems = 0;
memcpy(asyncio_pbuffer, &t, sizeof(transaction_t));
t_curr = (transaction_t*)asyncio_pbuffer;
asyncio_pbuffer += sizeof(transaction_t);
}
extern "C" DEVICE void asyncio_read_integer_c(int* val)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_INT;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(int*));
asyncio_pbuffer += sizeof(int*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_integer_c(int val)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_INT;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
*(int*)asyncio_pbuffer = val;
asyncio_pbuffer += sizeof(int);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_long_long_c(long long* val)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_LONG_LONG;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(long long*));
asyncio_pbuffer += sizeof(long long*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_long_long_c(long long val)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_LONG_LONG;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(long long));
asyncio_pbuffer += sizeof(long long);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_float_c(float* val)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_FLOAT;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(float*));
asyncio_pbuffer += sizeof(float*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_float_c(float val)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_FLOAT;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(float));
asyncio_pbuffer += sizeof(float);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_double_c(double* val)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_DOUBLE;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(double*));
asyncio_pbuffer += sizeof(double*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_double_c(double val)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_DOUBLE;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(double));
asyncio_pbuffer += sizeof(double);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_logical_c(bool* val)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_BOOLEAN;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(bool*));
asyncio_pbuffer += sizeof(bool*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_logical_c(bool val)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_BOOLEAN;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(bool));
asyncio_pbuffer += sizeof(bool);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_char_c(char* val, int length)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_CHAR;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &length, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(char*));
asyncio_pbuffer += sizeof(char*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_char_c(char* val, int length)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_CHAR;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &length, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(char) * length);
asyncio_pbuffer += sizeof(char) * length;
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_char_array1d_c(char** val, int dim_1, int* lengths)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
printf("ASYNCIO ERROR: not implemented\n");
asyncio_error = true;
trap();
}
extern "C" DEVICE void asyncio_write_char_array1d_c(char** val, int dim_1, int* lengths)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
printf("ASYNCIO ERROR: not implemented\n");
asyncio_error = true;
trap();
}
extern "C" DEVICE void asyncio_read_char_array2d_c(char** val, int dim_1, int dim_2, int* lengths)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
printf("ASYNCIO ERROR: not implemented\n");
asyncio_error = true;
trap();
}
extern "C" DEVICE void asyncio_write_char_array2d_c(char** val, int dim_1, int dim_2, int* lengths)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
printf("ASYNCIO ERROR: not implemented\n");
asyncio_error = true;
trap();
}
extern "C" DEVICE void asyncio_read_logical_array1d_c(bool* val, int dim_1)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_BOOLEAN_1D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(bool*));
asyncio_pbuffer += sizeof(bool*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_logical_array1d_c(bool* val, int dim_1)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_BOOLEAN_1D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(bool) * dim_1);
asyncio_pbuffer += sizeof(bool) * dim_1;
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_integer_array1d_c(int* val, int dim_1)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_INT_1D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(int*));
asyncio_pbuffer += sizeof(int*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_integer_array1d_c(int* val, int dim_1)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_INT_1D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(int) * dim_1);
asyncio_pbuffer += sizeof(int) * dim_1;
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_integer_array2d_c(int* val, int dim_1, int dim_2)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_INT_2D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_2, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(int*));
asyncio_pbuffer += sizeof(int*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_integer_array2d_c(int* val, int dim_1, int dim_2)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_INT_2D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_2, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(int) * dim_1 * dim_2);
asyncio_pbuffer += sizeof(int) * dim_1 * dim_2;
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_float_array1d_c(float* val, int dim_1)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_FLOAT_1D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(float*));
asyncio_pbuffer += sizeof(float*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_float_array1d_c(float* val, int dim_1)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_FLOAT_1D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(float) * dim_1);
asyncio_pbuffer += sizeof(float) * dim_1;
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_double_array1d_c(double* val, int dim_1)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_DOUBLE_1D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(double*));
asyncio_pbuffer += sizeof(double*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_double_array1d_c(double* val, int dim_1)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_DOUBLE_1D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(double) * dim_1);
asyncio_pbuffer += sizeof(double) * dim_1;
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_double_array2d_c(double* val, int dim_1, int dim_2)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_DOUBLE_2D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_2, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(double*));
asyncio_pbuffer += sizeof(double*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_double_array2d_c(double* val, int dim_1, int dim_2)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_DOUBLE_2D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_2, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(double) * dim_1 * dim_2);
asyncio_pbuffer += sizeof(double) * dim_1 * dim_2;
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_double_array3d_c(double* val, int dim_1, int dim_2, int dim_3)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_DOUBLE_3D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_2, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_3, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(double*));
asyncio_pbuffer += sizeof(double*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_double_array3d_c(double* val, int dim_1, int dim_2, int dim_3)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_DOUBLE_3D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_2, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_3, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(double) * dim_1 * dim_2 * dim_3);
asyncio_pbuffer += sizeof(double) * dim_1 * dim_2 * dim_3;
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_double_array4d_c(double* val, int dim_1, int dim_2, int dim_3, int dim_4)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_DOUBLE_3D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_2, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_3, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_4, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(double*));
asyncio_pbuffer += sizeof(double*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_double_array4d_c(double* val, int dim_1, int dim_2, int dim_3, int dim_4)
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_DOUBLE_3D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_2, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_3, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_4, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(double) * dim_1 * dim_2 * dim_3 * dim_4);
asyncio_pbuffer += sizeof(double) * dim_1 * dim_2 * dim_3 * dim_4;
t_curr_nitems++;
}
extern "C" void asyncio_flush();
extern "C" DEVICE void asyncio_end()
{
#ifdef __HIPCC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to end without an active transaction\n");
asyncio_error = true;
trap();
}
memcpy(&t_curr->nitems, &t_curr_nitems, sizeof(int));
t_curr = NULL;
// Save the current buffer length.
asyncio_buffer_length = (size_t)asyncio_pbuffer - (size_t)asyncio_buffer;
#ifndef __HIPCC__
// On host we can flush each individual write statement.
asyncio_flush();
#endif
}
#define CUDA_ERR_CHECK(x) \
do { hipError_t err = x; \
if (err != hipSuccess) { \
printf("CUDA error %d \"%s\" at %s:%d\n", \
(int)err, hipGetErrorString(err), \
__FILE__, __LINE__); exit(1); \
}} while (0);
struct st_parameter_dt;
extern "C" void asyncio_hook_read_default_unit_default_format(int*);
extern "C" void asyncio_hook_read_default_unit_formatted(size_t, char*, int*);
extern "C" void asyncio_hook_read_unit_unformatted(int, int*);
extern "C" void asyncio_hook_read_unit_default_format(int, int*);
extern "C" void asyncio_hook_read_unit_formatted(int, size_t, char*, int*);
extern "C" void asyncio_hook_write_default_unit_default_format(int*);
extern "C" void asyncio_hook_write_default_unit_formatted(size_t, char*, int*);
extern "C" void asyncio_hook_write_unit_unformatted(int, int*);
extern "C" void asyncio_hook_write_unit_default_format(int, int*);
extern "C" void asyncio_hook_write_unit_formatted(int, size_t, char*, int*);
extern "C" void asyncio_hook_read_integer_array_1d(void*, int);
extern "C" void asyncio_hook_read_integer_array_2d(void*, int, int);
extern "C" void asyncio_hook_read_float_array_1d(void*, int);
extern "C" void asyncio_hook_read_double_array_1d(void*, int);
extern "C" void asyncio_hook_read_double_array_2d(void*, int, int);
extern "C" void asyncio_hook_read_double_array_3d(void*, int, int, int);
extern "C" void asyncio_hook_read_double_array_4d(void*, int, int, int, int);
extern "C" void asyncio_hook_read_boolean_array_1d(void*, int);
extern "C" void asyncio_hook_write_integer_array_1d(void*, int);
extern "C" void asyncio_hook_write_integer_array_2d(void*, int, int);
extern "C" void asyncio_hook_write_float_array_1d(void*, int);
extern "C" void asyncio_hook_write_double_array_1d(void*, int);
extern "C" void asyncio_hook_write_double_array_2d(void*, int, int);
extern "C" void asyncio_hook_write_double_array_3d(void*, int, int, int);
extern "C" void asyncio_hook_write_double_array_4d(void*, int, int, int, int);
extern "C" void asyncio_hook_write_boolean_array_1d(void*, int);
static bool inside_hook = false;
static bool inside_hook_array = false;
static jmp_buf get_st_parameter_jmp;
typedef void (*st_callback_t)(transaction_t*, st_parameter_dt*);
static st_callback_t callback;
static transaction_t* transaction;
static st_parameter_dt* st_parameter = NULL;
#ifdef DYNAMIC
#define LIBGFORTRAN "libgfortran.so.3"
static void* libgfortran = NULL;
#define bind_lib(lib) \
if (!libgfortran) \
{ \
libgfortran = dlopen(lib, RTLD_NOW | RTLD_GLOBAL); \
if (!libgfortran) \
{ \
fprintf(stderr, "Error loading %s: %s\n", lib, dlerror()); \
abort(); \
} \
}
#define bind_sym(handle, sym, retty, ...) \
typedef retty (*sym##_func_t)(__VA_ARGS__); \
static sym##_func_t sym##_real = NULL; \
if (!sym##_real) \
{ \
sym##_real = (sym##_func_t)dlsym(handle, #sym); \
if (!sym##_real) \
{ \
fprintf(stderr, "Error loading %s: %s\n", #sym, dlerror()); \
abort(); \
} \
}
extern "C" void _gfortran_st_read(st_parameter_dt * stp)
#else
extern "C" void __real__gfortran_st_read(st_parameter_dt * stp);
extern "C" void __wrap__gfortran_st_read(st_parameter_dt * stp)
#endif
{
// In case of array writing hook, we discard _gfortran_st_write
// completely.
if (inside_hook_array)
return;
#ifdef DYNAMIC
bind_lib(LIBGFORTRAN);
bind_sym(libgfortran, _gfortran_st_read, void, st_parameter_dt*);
_gfortran_st_read_real(stp);
#else
__real__gfortran_st_read(stp);
#endif
if (inside_hook)
{
st_parameter = stp;
callback(transaction, stp);
longjmp(get_st_parameter_jmp, 1);
}
}
extern "C" void _gfortran_st_read_done(st_parameter_dt * stp);
#ifdef DYNAMIC
extern "C" void _gfortran_st_read_done(st_parameter_dt * stp)
#else
extern "C" void __real__gfortran_st_read_done(st_parameter_dt * stp);
extern "C" void __wrap__gfortran_st_read_done(st_parameter_dt * stp)
#endif
{
// In case of array writing hook, we discard _gfortran_st_read_done
// completely.
if (inside_hook_array)
return;
#ifdef DYNAMIC
bind_lib(LIBGFORTRAN);
bind_sym(libgfortran, _gfortran_st_read_done, void, st_parameter_dt*);
_gfortran_st_read_done_real(stp);
#else
__real__gfortran_st_read_done(stp);
#endif
}
#ifdef DYNAMIC
extern "C" void _gfortran_transfer_array(st_parameter_dt* stp, void* desc, int kind, size_t charlen)
#else
extern "C" void __real__gfortran_transfer_array(st_parameter_dt* stp, void* desc, int kind, size_t charlen);
extern "C" void __wrap__gfortran_transfer_array(st_parameter_dt* stp, void* desc, int kind, size_t charlen)
#endif
{
// In case of array writing hook, we use our own st_parameter
if (inside_hook_array)
stp = st_parameter;
#ifdef DYNAMIC
bind_lib(LIBGFORTRAN);
bind_sym(libgfortran, _gfortran_transfer_array, void, st_parameter_dt*, void*, int, size_t);
_gfortran_transfer_array_real(stp, desc, kind, charlen);
#else
__real__gfortran_transfer_array(stp, desc, kind, charlen);
#endif
}
extern "C" void _gfortran_transfer_integer_write(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_real_write(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_logical_write(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_character_write(st_parameter_dt *, void *, int);
#ifdef DYNAMIC
extern "C" void _gfortran_st_write(st_parameter_dt * stp)
#else
extern "C" void __real__gfortran_st_write(st_parameter_dt * stp);
extern "C" void __wrap__gfortran_st_write(st_parameter_dt * stp)
#endif
{
// In case of array writing hook, we discard _gfortran_st_write
// completely.
if (inside_hook_array)
return;
#ifdef DYNAMIC
bind_lib(LIBGFORTRAN);
bind_sym(libgfortran, _gfortran_st_write, void, st_parameter_dt*);
_gfortran_st_write_real(stp);
#else
__real__gfortran_st_write(stp);
#endif
if (inside_hook)
{
st_parameter = stp;
callback(transaction, stp);
longjmp(get_st_parameter_jmp, 1);
}
}
extern "C" void _gfortran_st_write_done(st_parameter_dt * stp);
#ifdef DYNAMIC
extern "C" void _gfortran_st_write_done(st_parameter_dt * stp)
#else
extern "C" void __real__gfortran_st_write_done(st_parameter_dt * stp);
extern "C" void __wrap__gfortran_st_write_done(st_parameter_dt * stp)
#endif
{
// In case of array writing hook, we discard _gfortran_st_write_done
// completely.
if (inside_hook_array)
return;
#ifdef DYNAMIC
bind_lib(LIBGFORTRAN);
bind_sym(libgfortran, _gfortran_st_write_done, void, st_parameter_dt*);
_gfortran_st_write_done_real(stp);
#else
__real__gfortran_st_write_done(stp);
#endif
}
#ifdef DYNAMIC
extern "C" void _gfortran_transfer_array_write(st_parameter_dt* stp, void* desc, int kind, size_t charlen)
#else
extern "C" void __real__gfortran_transfer_array_write(st_parameter_dt* stp, void* desc, int kind, size_t charlen);
extern "C" void __wrap__gfortran_transfer_array_write(st_parameter_dt* stp, void* desc, int kind, size_t charlen)
#endif
{
// In case of array writing hook, we use our own st_parameter
if (inside_hook_array)
stp = st_parameter;
#ifdef DYNAMIC
bind_lib(LIBGFORTRAN);
bind_sym(libgfortran, _gfortran_transfer_array_write, void, st_parameter_dt*, void*, int, size_t);
_gfortran_transfer_array_write_real(stp, desc, kind, charlen);
#else
__real__gfortran_transfer_array_write(stp, desc, kind, charlen);
#endif
}
extern "C" void _gfortran_transfer_integer(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_real(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_logical(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_character(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_integer_write(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_real_write(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_logical_write(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_character_write(st_parameter_dt *, void *, int);
static map<void*, string>* pfuncs = NULL, funcs;
static map<string, void*> formats;
static bool funcs_resolved = false;
static void st_callback(transaction_t* t, st_parameter_dt* st_parameter)
{
for (int i = 0, e = t->nitems; i != e; i++)
{
type_t type = *(type_t*)(t->buffer + t->offset);
t->offset += sizeof(type_t);
void* value = (void*)(t->buffer + t->offset);
switch (type)
{
case READ_INT :
_gfortran_transfer_integer(st_parameter, *(void**)value, sizeof(int));
t->offset += sizeof(int*);
break;
case WRITE_INT :
_gfortran_transfer_integer_write(st_parameter, value, sizeof(int));
t->offset += sizeof(int);
break;
case READ_INT_1D :
{
int* length = (int*)value;
t->offset += sizeof(int);
value = *(void**)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_integer_array_1d(value, length[0]);
inside_hook_array = false;
t->offset += sizeof(int*);
}
break;
case WRITE_INT_1D :
{
int* length = (int*)value;
t->offset += sizeof(int);
value = (void*)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_integer_array_1d(value, length[0]);
inside_hook_array = false;
t->offset += sizeof(int) * length[0];
}
break;
case READ_INT_2D :
{
int* length = (int*)value;
t->offset += sizeof(int) * 2;
value = *(void**)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_integer_array_2d(value, length[0], length[1]);
inside_hook_array = false;
t->offset += sizeof(int*);
}
break;
case WRITE_INT_2D :
{
int* length = (int*)value;
t->offset += sizeof(int) * 2;
value = (void*)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_integer_array_2d(value, length[0], length[1]);
inside_hook_array = false;
t->offset += sizeof(int) * length[0] * length[1];
}
break;
case READ_LONG_LONG :
_gfortran_transfer_integer(st_parameter, *(void**)value, sizeof(long long));
t->offset += sizeof(long long*);
break;
case WRITE_LONG_LONG :
_gfortran_transfer_integer_write(st_parameter, value, sizeof(long long));
t->offset += sizeof(long long);
break;
case READ_FLOAT :
_gfortran_transfer_real(st_parameter, *(void**)value, sizeof(float));
t->offset += sizeof(float*);
break;
case WRITE_FLOAT :
_gfortran_transfer_real_write(st_parameter, value, sizeof(float));
t->offset += sizeof(float);
break;
case READ_FLOAT_1D :
{
int* length = (int*)value;
t->offset += sizeof(int);
value = *(void**)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_float_array_1d(value, length[0]);
inside_hook_array = false;
t->offset += sizeof(float*);
}
break;
case WRITE_FLOAT_1D :
{
int* length = (int*)value;
t->offset += sizeof(int);
value = (void*)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_float_array_1d(value, length[0]);
inside_hook_array = false;
t->offset += sizeof(float) * length[0];
}
break;
case READ_DOUBLE :
_gfortran_transfer_real(st_parameter, *(void**)value, sizeof(double));
t->offset += sizeof(double*);
break;
case WRITE_DOUBLE :
_gfortran_transfer_real_write(st_parameter, value, sizeof(double));
t->offset += sizeof(double);
break;
case READ_DOUBLE_1D :
{
int* length = (int*)value;
t->offset += sizeof(int);
value = *(void**)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_double_array_1d(value, length[0]);
inside_hook_array = false;
t->offset += sizeof(double*);
}
break;
case WRITE_DOUBLE_1D :
{
int* length = (int*)value;
t->offset += sizeof(int);
value = (void*)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_double_array_1d(value, length[0]);
inside_hook_array = false;
t->offset += sizeof(double) * length[0];
}
break;
case READ_DOUBLE_2D :
{
int* length = (int*)value;
t->offset += sizeof(int) * 2;
value = *(void**)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_double_array_2d(value, length[0], length[1]);
inside_hook_array = false;
t->offset += sizeof(double*);
}
break;
case WRITE_DOUBLE_2D :
{
int* length = (int*)value;
t->offset += sizeof(int) * 2;
value = (void*)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_double_array_2d(value, length[0], length[1]);
inside_hook_array = false;
t->offset += sizeof(double) * length[0] * length[1];
}
break;
case READ_DOUBLE_3D :
{
int* length = (int*)value;
t->offset += sizeof(int) * 3;
value = *(void**)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_double_array_3d(value, length[0], length[1], length[2]);
inside_hook_array = false;
t->offset += sizeof(double*);
}
break;
case WRITE_DOUBLE_3D :
{
int* length = (int*)value;
t->offset += sizeof(int) * 3;
value = (void*)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_double_array_3d(value, length[0], length[1], length[2]);
inside_hook_array = false;
t->offset += sizeof(double) * length[0] * length[1] * length[2];
}
break;
case READ_DOUBLE_4D :
{
int* length = (int*)value;
t->offset += sizeof(int) * 4;
value = *(void**)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_double_array_4d(value, length[0], length[1], length[2], length[3]);
inside_hook_array = false;
t->offset += sizeof(double*);
}
break;
case WRITE_DOUBLE_4D :
{
int* length = (int*)value;
t->offset += sizeof(int) * 4;
value = (void*)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_double_array_4d(value, length[0], length[1], length[2], length[3]);
inside_hook_array = false;
t->offset += sizeof(double) * length[0] * length[1] * length[2] * length[3];
}
break;
case READ_BOOLEAN :
_gfortran_transfer_logical(st_parameter, *(void**)value, sizeof(bool));
t->offset += sizeof(bool*);
break;
case WRITE_BOOLEAN :
_gfortran_transfer_logical_write(st_parameter, value, sizeof(bool));
t->offset += sizeof(bool);
break;
case READ_BOOLEAN_1D :
{
int* length = (int*)value;
t->offset += sizeof(int);
value = *(void**)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_boolean_array_1d(value, length[0]);
inside_hook_array = false;
t->offset += sizeof(bool*);
}
break;
case WRITE_BOOLEAN_1D :
{
int* length = (int*)value;
t->offset += sizeof(int);
value = (void*)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_boolean_array_1d(value, length[0]);
inside_hook_array = false;
t->offset += sizeof(bool) * length[0];
}
break;
case READ_CHAR :
{
int length = *(int*)value;
t->offset += sizeof(int);
value = *(void**)(t->buffer + t->offset);
_gfortran_transfer_character(st_parameter, value, sizeof(char) * length);
t->offset += sizeof(char*);
}
break;
case WRITE_CHAR :
{
int length = *(int*)value;
t->offset += sizeof(int);
value = (void*)(t->buffer + t->offset);
_gfortran_transfer_character_write(st_parameter, value, sizeof(char) * length);
t->offset += sizeof(char) * length;
}
break;
case READ_CHAR_1D :
case WRITE_CHAR_1D :
case READ_CHAR_2D :
case WRITE_CHAR_2D :
fprintf(stderr, "ASYNCIO ERROR: not implemented\n");
exit(1);
break;
default :
fprintf(stderr, "ASYNCIO ERROR: Unknown data type %d\n", type);
exit(1);
}
}
if (t->kind == TRANSACTION_TYPE_READ)
_gfortran_st_read_done(st_parameter);
else
_gfortran_st_write_done(st_parameter);
}
#ifdef __HIPCC__
static char* get_format(void* func, int format)
{
if (!funcs_resolved)
{
// 1) Resolve device functions addresses.
for (map<void*, string>::iterator i = pfuncs->begin(), e = pfuncs->end(); i != e; i++)
{
void* gpuAddress = NULL;
CUDA_ERR_CHECK(hipGetSymbolAddress(&gpuAddress, (const void*)i->first));
funcs[gpuAddress] = i->second;
}
delete pfuncs;
pfuncs = &funcs;
// 2) Find addresses of all __GPUFMT_* variables in host executable.
int fd = -1;
Elf *e = NULL;
try
{
if (elf_version(EV_CURRENT) == EV_NONE)
{
fprintf(stderr, "Cannot initialize ELF library: %s\n",
elf_errmsg(-1));
throw;
}
if ((fd = open("/proc/self/exe", O_RDONLY)) < 0)
{
fprintf(stderr, "Cannot open self executable\n");
throw;
}
if ((e = elf_begin(fd, ELF_C_READ, e)) == 0) {
fprintf(stderr, "Cannot read ELF image: %s\n", elf_errmsg(-1));
throw;
}
size_t shstrndx;
if (elf_getshdrstrndx(e, &shstrndx)) {
fprintf(stderr, "elf_getshdrstrndx() failed: %s\n", elf_errmsg(-1));
throw;
}
// Locate the symbol table.
Elf_Scn* scn = elf_nextscn(e, NULL);
for (int i = 1; scn != NULL; scn = elf_nextscn(e, scn), i++)
{
// Get section header.
GElf_Shdr shdr;
if (!gelf_getshdr(scn, &shdr))
{
fprintf(stderr, "gelf_getshdr() failed: %s\n", elf_errmsg(-1));
throw;
}
// If section is not a symbol table:
if (shdr.sh_type != SHT_SYMTAB) continue;
// Load symbols.
Elf_Data* data = elf_getdata(scn, NULL);
if (!data)
{
fprintf(stderr, "Expected data section for SYMTAB\n");
throw;
}
if (shdr.sh_size && !shdr.sh_entsize)
{
fprintf(stderr, "Cannot get the number of symbols\n");
throw;
}
int nsymbols = 0;
if (shdr.sh_size)
nsymbols = shdr.sh_size / shdr.sh_entsize;
int strndx = shdr.sh_link;
for (int i = 0; i < nsymbols; i++)
{
GElf_Sym sym;
if (!gelf_getsym(data, i, &sym))
{
fprintf(stderr, "gelf_getsym() failed: %s\n", elf_errmsg(-1));
throw;
}
char* name = elf_strptr(e, strndx, sym.st_name);
if (!name)
{
fprintf(stderr, "Cannot get the name of %d-th symbol: %s\n", i, elf_errmsg(-1));
throw;
}
if (!strncmp(name, "__GPUFMT_", strlen("__GPUFMT_")))
{
name += strlen("__GPUFMT_");
char* format = (char*)(size_t)sym.st_value;
// Perform basic format checks:
// 1) last character is null
// 2) first and last non-null characters are brackets
if (format[sym.st_size - 1] != '\0')
{
bool null_found = false;
for (int i = sym.st_size - 1; i >= 0; i--)
if (format[i] == '\0')
{
null_found = true;
break;
}
if (!null_found)
{
fprintf(stderr, "Error: format string \"%s\" is not terminated by c_null_char\n",
name);
exit(1);
}
else
{
fprintf(stderr, "Warning: format string \"%s\" has extra symbols after c_null_char",
name);
fprintf(stderr, " length should be %zu instead of %zu\n",
strlen(format) + 1, (size_t)sym.st_size);
}
}
if (format[0] != '(')
{
fprintf(stderr, "Error: format string \"%s\" = \"%s\" must start with '(' symbol\n",
name, format);
exit(1);
}
if (format[strlen(format) - 1] != ')')
{
fprintf(stderr, "Error: format string \"%s\" = \"%s\" must finish with ')' symbol\n",
name, format);
exit(1);
}
// This symbol is a valid format string - record it.
formats[name] = (void*)format;
}
}
elf_end(e);
close(fd);
e = NULL;
funcs_resolved = true;
break;
}
if (!funcs_resolved)
{
fprintf(stderr, "Cannot locate the symbol table of executable\n");
throw;
}
}
catch (...)
{
if (e)
elf_end(e);
if (fd >= 0)
close(fd);
exit(1);
}
}
map<void*, string>::iterator i = funcs.find((void*)func);
if (i == funcs.end())
{
fprintf(stderr, "ASYNCIO ERROR: Unknown function @ %p\n", (void*)func);
exit(1);
}
stringstream svarname;
svarname << i->second << "_" << format;
string varname = svarname.str();
map<string, void*>::iterator j = formats.find(varname);
if (j == formats.end())
{
fprintf(stderr, "ASYNCIO ERROR: Undefined format spec \"%s\"\n", varname.c_str());
exit(1);
}
char* result = (char*)j->second;
return result;
}
extern "C" void CUDARTAPI __real___cudaRegisterVar(
void **fatCubinHandle,
char *hostVar,
char *deviceAddress,
const char *deviceName,
int ext,
int size,
int constant,
int global
);
extern "C" void CUDARTAPI __wrap___cudaRegisterVar(
void **fatCubinHandle,
char *hostVar,
char *deviceAddress,
const char *deviceName,
int ext,
int size,
int constant,
int global
)
{
// Workaround in case if static funcs map could happen to be
// initialized later.
if (!pfuncs)
pfuncs = new map<void*, string>();
__real___cudaRegisterVar(
fatCubinHandle,
hostVar,
deviceAddress,
deviceName,
ext,
size,
constant,
global);
if (strncmp(deviceName, "__FUNC_", strlen("__FUNC_")))
return;
// This symbol is a function name anchor - record it.
string& name = pfuncs->operator[]((void*)hostVar);
name = deviceAddress + strlen("__FUNC_");
}
#else
static char* get_format(void* func, int format)
{
if (!funcs_resolved)
{
// 1) Find addresses of all __GPUFMT_* variables in host executable.
int fd = -1;
Elf *e = NULL;
try
{
if (elf_version(EV_CURRENT) == EV_NONE)
{
fprintf(stderr, "Cannot initialize ELF library: %s\n",
elf_errmsg(-1));
throw;
}
if ((fd = open("/proc/self/exe", O_RDONLY)) < 0)
{
fprintf(stderr, "Cannot open self executable\n");
throw;
}
if ((e = elf_begin(fd, ELF_C_READ, e)) == 0) {
fprintf(stderr, "Cannot read ELF image: %s\n", elf_errmsg(-1));
throw;
}
size_t shstrndx;
if (elf_getshdrstrndx(e, &shstrndx)) {
fprintf(stderr, "elf_getshdrstrndx() failed: %s\n", elf_errmsg(-1));
throw;
}
// Locate the symbol table.
Elf_Scn* scn = elf_nextscn(e, NULL);
for (int i = 1; scn != NULL; scn = elf_nextscn(e, scn), i++)
{
// Get section header.
GElf_Shdr shdr;
if (!gelf_getshdr(scn, &shdr))
{
fprintf(stderr, "gelf_getshdr() failed: %s\n", elf_errmsg(-1));
throw;
}
// If section is not a symbol table:
if (shdr.sh_type != SHT_SYMTAB) continue;
// Load symbols.
Elf_Data* data = elf_getdata(scn, NULL);
if (!data)
{
fprintf(stderr, "Expected data section for SYMTAB\n");
throw;
}
if (shdr.sh_size && !shdr.sh_entsize)
{
fprintf(stderr, "Cannot get the number of symbols\n");
throw;
}
int nsymbols = 0;
if (shdr.sh_size)
nsymbols = shdr.sh_size / shdr.sh_entsize;
int strndx = shdr.sh_link;
for (int i = 0; i < nsymbols; i++)
{
GElf_Sym sym;
if (!gelf_getsym(data, i, &sym))
{
fprintf(stderr, "gelf_getsym() failed: %s\n", elf_errmsg(-1));
throw;
}
char* name = elf_strptr(e, strndx, sym.st_name);
if (!name)
{
fprintf(stderr, "Cannot get the name of %d-th symbol: %s\n", i, elf_errmsg(-1));
throw;
}
if (!strncmp(name, "__FUNC_", strlen("__FUNC_")))
{
// This symbol is a function name anchor - record it.
name += strlen("__FUNC_");
funcs[(void*)(size_t)sym.st_value] = name;
}
if (!strncmp(name, "__GPUFMT_", strlen("__GPUFMT_")))
{
name += strlen("__GPUFMT_");
char* format = (char*)(size_t)sym.st_value;
// Perform basic format checks:
// 1) last character is null
// 2) first and last non-null characters are brackets
if (format[sym.st_size - 1] != '\0')
{
bool null_found = false;
for (int i = sym.st_size - 1; i >= 0; i--)
if (format[i] == '\0')
{
null_found = true;
break;
}
if (!null_found)
{
fprintf(stderr, "Error: format string \"%s\" is not terminated by c_null_char\n",
name);
exit(1);
}
else
{
fprintf(stderr, "Warning: format string \"%s\" has extra symbols after c_null_char",
name);
fprintf(stderr, " length should be %zu instead of %zu\n",
strlen(format) + 1, (size_t)sym.st_size);
}
}
if (format[0] != '(')
{
fprintf(stderr, "Error: format string \"%s\" = \"%s\" must start with '(' symbol\n",
name, format);
exit(1);
}
if (format[strlen(format) - 1] != ')')
{
fprintf(stderr, "Error: format string \"%s\" = \"%s\" must finish with ')' symbol\n",
name, format);
exit(1);
}
#if 0
// Additional format checks to ensure no default
// forms of "i" and "d" formats are used (by some
// reason, gfortran does not support them).
for (int i = 0, e = strlen(format) - 2; i != e; i++)
{
if (((format[i] == ',') || (format[i] == '(')) &&
((format[i + 1] == 'i') || (format[i + 1] == 'd')))
{
if ((format[i + 2] >= '0') && (format[i + 2] <= '9'))
continue;
fprintf(stderr, "Error: malformed integer format in \"%s\" = \"%s\"\n",
name, format);
exit(1);
}
}
#endif
// This symbol is a valid format string - record it.
formats[name] = (void*)format;
}
}
elf_end(e);
close(fd);
e = NULL;
funcs_resolved = true;
break;
}
if (!funcs_resolved)
{
fprintf(stderr, "Cannot locate the symbol table of executable\n");
throw;
}
}
catch (...)
{
if (e)
elf_end(e);
if (fd >= 0)
close(fd);
exit(1);
}
}
map<void*, string>::iterator i = funcs.find((void*)func);
if (i == funcs.end())
{
fprintf(stderr, "ASYNCIO ERROR: Unknown function @ %p\n", (void*)func);
exit(1);
}
stringstream svarname;
svarname << i->second << "_" << format;
string varname = svarname.str();
map<string, void*>::iterator j = formats.find(varname);
if (j == formats.end())
{
fprintf(stderr, "ASYNCIO ERROR: Undefined format spec \"%s\"\n", varname.c_str());
exit(1);
}
char* result = (char*)j->second;
return result;
}
#endif // __HIPCC__
extern "C" void asyncio_flush()
{
#ifdef __HIPCC__
// Transfer asyncio error status.
static bool* pdevice_error = NULL;
if (!pdevice_error)
CUDA_ERR_CHECK(hipGetSymbolAddress((void**)&pdevice_error, asyncio_error));
bool host_error = true;
CUDA_ERR_CHECK(hipMemcpy(&host_error, pdevice_error, sizeof(bool),
hipMemcpyDeviceToHost));
// Do nothing, if error status is true.
if (host_error) return;
// Transfer asyncio buffer length.
static size_t* pdevice_length = NULL;
if (!pdevice_length)
CUDA_ERR_CHECK(hipGetSymbolAddress((void**)&pdevice_length, asyncio_buffer_length));
size_t host_length = 0;
CUDA_ERR_CHECK(hipMemcpy(&host_length, pdevice_length, sizeof(size_t),
hipMemcpyDeviceToHost));
// Do nothing, if buffer length is zero.
if (host_length == 0)
{
CUDA_ERR_CHECK(hipMemset(pdevice_error, 0, sizeof(bool)));
return;
}
// Transfer asyncio buffer contents.
static char* pdevice_buffer = NULL;
if (!pdevice_buffer)
CUDA_ERR_CHECK(hipGetSymbolAddress((void**)&pdevice_buffer, asyncio_buffer));
vector<char> vhost_buffer;
vhost_buffer.resize(host_length);
char* host_buffer = &vhost_buffer[0];
CUDA_ERR_CHECK(hipMemcpy(host_buffer, pdevice_buffer, host_length,
hipMemcpyDeviceToHost));
for (int offset = 0; offset < host_length; )
#else
// Do nothing, if error status is true.
if (asyncio_error)
{
asyncio_error = false;
return;
}
for (int offset = 0; offset < asyncio_buffer_length; )
#endif
{
#ifdef __HIPCC__
transaction_t* t = (transaction_t*)(host_buffer + offset);
#else
transaction_t* t = (transaction_t*)(asyncio_buffer + offset);
#endif
offset += sizeof(transaction_t);
t->offset = offset;
#ifdef __HIPCC__
t->buffer = host_buffer;
#else
t->buffer = asyncio_buffer;
#endif
inside_hook = true;
callback = st_callback;
transaction = t;
#ifdef __HIPCC__
// On GPU iostat does not make sense, so we always use a dummy variable.
int iostat = 0;
t->iostat = &iostat;
#endif
if ((t->format == ASYNCIO_DEFAULT_FORMAT) && (t->unit == ASYNCIO_DEFAULT_UNIT))
{
int get_st_parameter_val = setjmp(get_st_parameter_jmp);
if (!get_st_parameter_val)
if (t->kind == TRANSACTION_TYPE_READ)
asyncio_hook_read_default_unit_default_format(t->iostat);
else
asyncio_hook_write_default_unit_default_format(t->iostat);
}
else if ((t->format != ASYNCIO_DEFAULT_FORMAT) && (t->unit == ASYNCIO_DEFAULT_UNIT))
{
char* format = get_format(t->func, t->format);
int get_st_parameter_val = setjmp(get_st_parameter_jmp);
if (!get_st_parameter_val)
if (t->kind == TRANSACTION_TYPE_READ)
asyncio_hook_read_default_unit_formatted(strlen(format), format, t->iostat);
else
asyncio_hook_write_default_unit_formatted(strlen(format), format, t->iostat);
}
else if ((t->format == ASYNCIO_DEFAULT_FORMAT) && (t->unit != ASYNCIO_DEFAULT_UNIT))
{
int get_st_parameter_val = setjmp(get_st_parameter_jmp);
if (!get_st_parameter_val)
if (t->kind == TRANSACTION_TYPE_READ)
asyncio_hook_read_unit_default_format(t->unit, t->iostat);
else
asyncio_hook_write_unit_default_format(t->unit, t->iostat);
}
else if ((t->format == ASYNCIO_UNFORMATTED) && (t->unit != ASYNCIO_DEFAULT_UNIT))
{
int get_st_parameter_val = setjmp(get_st_parameter_jmp);
if (!get_st_parameter_val)
if (t->kind == TRANSACTION_TYPE_READ)
asyncio_hook_read_unit_unformatted(t->unit, t->iostat);
else
asyncio_hook_write_unit_unformatted(t->unit, t->iostat);
}
else
{
char* format = get_format(t->func, t->format);
int get_st_parameter_val = setjmp(get_st_parameter_jmp);
if (!get_st_parameter_val)
if (t->kind == TRANSACTION_TYPE_READ)
asyncio_hook_read_unit_formatted(t->unit, strlen(format), format, t->iostat);
else
asyncio_hook_write_unit_formatted(t->unit, strlen(format), format, t->iostat);
}
inside_hook = false;
offset = t->offset;
#ifdef __HIPCC__
if (iostat != 0)
{
fprintf(stderr, "ASYNCIO ERROR: iostat returned %d\n", iostat);
exit(1);
}
#endif
}
#ifdef __HIPCC__
// Reset device pointer to 0, length to 0.
static char* pdevice_pbuffer = NULL;
if (!pdevice_pbuffer)
CUDA_ERR_CHECK(hipGetSymbolAddress((void**)&pdevice_pbuffer, asyncio_pbuffer));
CUDA_ERR_CHECK(hipMemset(pdevice_pbuffer, 0, sizeof(char*)));
CUDA_ERR_CHECK(hipMemset(pdevice_length, 0, sizeof(size_t)));
#else
// Reset device pointer to 0, length to 0.
asyncio_pbuffer = NULL;
asyncio_buffer_length = 0;
#endif
}
| 67957db5e72491f92410e78afa8211525eec93e7.cu | #include <cstdio>
#include <cstdlib>
#include <dlfcn.h>
#include <fcntl.h>
#include <gelf.h>
#include <map>
#include <setjmp.h>
#include <string>
#include <string.h>
#include <sstream>
#include <vector>
#include <unistd.h>
#define ASYNCIO_BUFFER_LENGTH (1024 * 1024 * 16)
#define ASYNCIO_DEFAULT_UNIT -1
#define ASYNCIO_DEFAULT_FORMAT -1
#define ASYNCIO_UNFORMATTED -2
using namespace std;
enum kind_t
{
TRANSACTION_TYPE_UNKNOWN,
TRANSACTION_TYPE_READ,
TRANSACTION_TYPE_WRITE
};
struct transaction_t
{
int unit;
int format;
void* func;
int nitems;
int offset;
char* buffer;
int* iostat;
kind_t kind;
};
enum type_t
{
READ_INT,
READ_INT_1D,
READ_INT_2D,
READ_LONG_LONG,
READ_FLOAT,
READ_FLOAT_1D,
READ_DOUBLE,
READ_DOUBLE_1D,
READ_DOUBLE_2D,
READ_DOUBLE_3D,
READ_DOUBLE_4D,
READ_BOOLEAN,
READ_BOOLEAN_1D,
READ_CHAR,
READ_CHAR_1D,
READ_CHAR_2D,
WRITE_INT,
WRITE_INT_1D,
WRITE_INT_2D,
WRITE_LONG_LONG,
WRITE_FLOAT,
WRITE_FLOAT_1D,
WRITE_DOUBLE,
WRITE_DOUBLE_1D,
WRITE_DOUBLE_2D,
WRITE_DOUBLE_3D,
WRITE_DOUBLE_4D,
WRITE_BOOLEAN,
WRITE_BOOLEAN_1D,
WRITE_CHAR,
WRITE_CHAR_1D,
WRITE_CHAR_2D
};
#ifdef __CUDACC__
#define DEVICE __device__
#define NAMESPACE gpu
#else
#define DEVICE
#define NAMESPACE cpu
#define trap() exit(1)
#endif
namespace NAMESPACE
{
DEVICE bool asyncio_error = false;
DEVICE char asyncio_buffer[ASYNCIO_BUFFER_LENGTH];
DEVICE size_t asyncio_buffer_length = 0;
DEVICE char* asyncio_pbuffer = NULL;
DEVICE transaction_t* t_curr = NULL;
DEVICE int t_curr_nitems = 0;
}
using namespace NAMESPACE;
// On GPU all I/O routines work with thread 0 only.
extern "C" DEVICE void asyncio_begin_default_unit_default_format_c(char kind, char unit, char format, int* iostat)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (t_curr)
{
printf("ASYNCIO ERROR: Previous transaction has not been closed correctly\n");
asyncio_error = true;
trap();
}
if (unit != '*')
{
printf("ASYNCIO ERROR: Invalid unit specifier: %c\n", unit);
asyncio_error = true;
trap();
}
if (format != '*')
{
printf("ASYNCIO ERROR: Invalid format specifier: %c\n", format);
asyncio_error = true;
trap();
}
if (!asyncio_pbuffer) asyncio_pbuffer = asyncio_buffer;
transaction_t t;
t.kind = TRANSACTION_TYPE_UNKNOWN;
if (kind == 'r')
t.kind = TRANSACTION_TYPE_READ;
if (kind == 'w')
t.kind = TRANSACTION_TYPE_WRITE;
t.unit = ASYNCIO_DEFAULT_UNIT;
t.format = ASYNCIO_DEFAULT_FORMAT;
#ifdef __CUDACC__
*iostat = 0;
t.iostat = NULL;
#else
t.iostat = iostat;
#endif
t_curr_nitems = 0;
memcpy(asyncio_pbuffer, &t, sizeof(transaction_t));
t_curr = (transaction_t*)asyncio_pbuffer;
asyncio_pbuffer += sizeof(transaction_t);
}
extern "C" DEVICE void asyncio_begin_unit_default_format_c(char kind, int unit, char format, int* iostat)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (t_curr)
{
printf("ASYNCIO ERROR: Previous transaction has not been closed correctly\n");
asyncio_error = true;
trap();
}
if (format != '*')
{
printf("ASYNCIO ERROR: Invalid format specifier: %c\n", format);
asyncio_error = true;
trap();
}
if (!asyncio_pbuffer) asyncio_pbuffer = asyncio_buffer;
transaction_t t;
t.kind = TRANSACTION_TYPE_UNKNOWN;
if (kind == 'r')
t.kind = TRANSACTION_TYPE_READ;
if (kind == 'w')
t.kind = TRANSACTION_TYPE_WRITE;
t.unit = unit;
t.format = ASYNCIO_DEFAULT_FORMAT;
#ifdef __CUDACC__
*iostat = 0;
t.iostat = NULL;
#else
t.iostat = iostat;
#endif
t_curr_nitems = 0;
memcpy(asyncio_pbuffer, &t, sizeof(transaction_t));
t_curr = (transaction_t*)asyncio_pbuffer;
asyncio_pbuffer += sizeof(transaction_t);
}
extern "C" DEVICE void asyncio_begin_unit_unformatted_c(char kind, int unit, int* iostat)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (t_curr)
{
printf("ASYNCIO ERROR: Previous transaction has not been closed correctly\n");
asyncio_error = true;
trap();
}
if (!asyncio_pbuffer) asyncio_pbuffer = asyncio_buffer;
transaction_t t;
t.kind = TRANSACTION_TYPE_UNKNOWN;
if (kind == 'r')
t.kind = TRANSACTION_TYPE_READ;
if (kind == 'w')
t.kind = TRANSACTION_TYPE_WRITE;
t.unit = unit;
t.format = ASYNCIO_UNFORMATTED;
#ifdef __CUDACC__
*iostat = 0;
t.iostat = NULL;
#else
t.iostat = iostat;
#endif
t_curr_nitems = 0;
memcpy(asyncio_pbuffer, &t, sizeof(transaction_t));
t_curr = (transaction_t*)asyncio_pbuffer;
asyncio_pbuffer += sizeof(transaction_t);
}
extern "C" DEVICE void asyncio_begin_default_unit_formatted_c(char kind, char unit, void* func, int format, int* iostat)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (t_curr)
{
printf("ASYNCIO ERROR: Previous transaction has not been closed correctly\n");
asyncio_error = true;
trap();
}
if (unit != '*')
{
printf("ASYNCIO ERROR: Invalid unit specifier: %c\n", unit);
asyncio_error = true;
trap();
}
if (!asyncio_pbuffer) asyncio_pbuffer = asyncio_buffer;
transaction_t t;
t.kind = TRANSACTION_TYPE_UNKNOWN;
if (kind == 'r')
t.kind = TRANSACTION_TYPE_READ;
if (kind == 'w')
t.kind = TRANSACTION_TYPE_WRITE;
t.unit = ASYNCIO_DEFAULT_UNIT;
t.format = format;
t.func = func;
#ifdef __CUDACC__
*iostat = 0;
t.iostat = NULL;
#else
t.iostat = iostat;
#endif
t_curr_nitems = 0;
memcpy(asyncio_pbuffer, &t, sizeof(transaction_t));
t_curr = (transaction_t*)asyncio_pbuffer;
asyncio_pbuffer += sizeof(transaction_t);
}
extern "C" DEVICE void asyncio_begin_unit_formatted_c(char kind, int unit, void* func, int format, int* iostat)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (t_curr)
{
printf("ASYNCIO ERROR: Previous transaction has not been closed correctly\n");
asyncio_error = true;
trap();
}
if (!asyncio_pbuffer) asyncio_pbuffer = asyncio_buffer;
transaction_t t;
t.kind = TRANSACTION_TYPE_UNKNOWN;
if (kind == 'r')
t.kind = TRANSACTION_TYPE_READ;
if (kind == 'w')
t.kind = TRANSACTION_TYPE_WRITE;
t.unit = unit;
t.format = format;
t.func = func;
#ifdef __CUDACC__
*iostat = 0;
t.iostat = NULL;
#else
t.iostat = iostat;
#endif
t_curr_nitems = 0;
memcpy(asyncio_pbuffer, &t, sizeof(transaction_t));
t_curr = (transaction_t*)asyncio_pbuffer;
asyncio_pbuffer += sizeof(transaction_t);
}
extern "C" DEVICE void asyncio_read_integer_c(int* val)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_INT;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(int*));
asyncio_pbuffer += sizeof(int*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_integer_c(int val)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_INT;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
*(int*)asyncio_pbuffer = val;
asyncio_pbuffer += sizeof(int);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_long_long_c(long long* val)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_LONG_LONG;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(long long*));
asyncio_pbuffer += sizeof(long long*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_long_long_c(long long val)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_LONG_LONG;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(long long));
asyncio_pbuffer += sizeof(long long);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_float_c(float* val)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_FLOAT;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(float*));
asyncio_pbuffer += sizeof(float*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_float_c(float val)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_FLOAT;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(float));
asyncio_pbuffer += sizeof(float);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_double_c(double* val)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_DOUBLE;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(double*));
asyncio_pbuffer += sizeof(double*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_double_c(double val)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_DOUBLE;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(double));
asyncio_pbuffer += sizeof(double);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_logical_c(bool* val)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_BOOLEAN;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(bool*));
asyncio_pbuffer += sizeof(bool*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_logical_c(bool val)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_BOOLEAN;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &val, sizeof(bool));
asyncio_pbuffer += sizeof(bool);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_char_c(char* val, int length)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_CHAR;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &length, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(char*));
asyncio_pbuffer += sizeof(char*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_char_c(char* val, int length)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_CHAR;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &length, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(char) * length);
asyncio_pbuffer += sizeof(char) * length;
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_char_array1d_c(char** val, int dim_1, int* lengths)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
printf("ASYNCIO ERROR: not implemented\n");
asyncio_error = true;
trap();
}
extern "C" DEVICE void asyncio_write_char_array1d_c(char** val, int dim_1, int* lengths)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
printf("ASYNCIO ERROR: not implemented\n");
asyncio_error = true;
trap();
}
extern "C" DEVICE void asyncio_read_char_array2d_c(char** val, int dim_1, int dim_2, int* lengths)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
printf("ASYNCIO ERROR: not implemented\n");
asyncio_error = true;
trap();
}
extern "C" DEVICE void asyncio_write_char_array2d_c(char** val, int dim_1, int dim_2, int* lengths)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
printf("ASYNCIO ERROR: not implemented\n");
asyncio_error = true;
trap();
}
extern "C" DEVICE void asyncio_read_logical_array1d_c(bool* val, int dim_1)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_BOOLEAN_1D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(bool*));
asyncio_pbuffer += sizeof(bool*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_logical_array1d_c(bool* val, int dim_1)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_BOOLEAN_1D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(bool) * dim_1);
asyncio_pbuffer += sizeof(bool) * dim_1;
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_integer_array1d_c(int* val, int dim_1)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_INT_1D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(int*));
asyncio_pbuffer += sizeof(int*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_integer_array1d_c(int* val, int dim_1)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_INT_1D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(int) * dim_1);
asyncio_pbuffer += sizeof(int) * dim_1;
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_integer_array2d_c(int* val, int dim_1, int dim_2)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_INT_2D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_2, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(int*));
asyncio_pbuffer += sizeof(int*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_integer_array2d_c(int* val, int dim_1, int dim_2)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_INT_2D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_2, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(int) * dim_1 * dim_2);
asyncio_pbuffer += sizeof(int) * dim_1 * dim_2;
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_float_array1d_c(float* val, int dim_1)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_FLOAT_1D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(float*));
asyncio_pbuffer += sizeof(float*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_float_array1d_c(float* val, int dim_1)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_FLOAT_1D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(float) * dim_1);
asyncio_pbuffer += sizeof(float) * dim_1;
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_double_array1d_c(double* val, int dim_1)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_DOUBLE_1D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(double*));
asyncio_pbuffer += sizeof(double*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_double_array1d_c(double* val, int dim_1)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_DOUBLE_1D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(double) * dim_1);
asyncio_pbuffer += sizeof(double) * dim_1;
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_double_array2d_c(double* val, int dim_1, int dim_2)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_DOUBLE_2D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_2, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(double*));
asyncio_pbuffer += sizeof(double*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_double_array2d_c(double* val, int dim_1, int dim_2)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_DOUBLE_2D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_2, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(double) * dim_1 * dim_2);
asyncio_pbuffer += sizeof(double) * dim_1 * dim_2;
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_double_array3d_c(double* val, int dim_1, int dim_2, int dim_3)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_DOUBLE_3D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_2, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_3, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(double*));
asyncio_pbuffer += sizeof(double*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_double_array3d_c(double* val, int dim_1, int dim_2, int dim_3)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_DOUBLE_3D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_2, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_3, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(double) * dim_1 * dim_2 * dim_3);
asyncio_pbuffer += sizeof(double) * dim_1 * dim_2 * dim_3;
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_read_double_array4d_c(double* val, int dim_1, int dim_2, int dim_3, int dim_4)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_READ)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = READ_DOUBLE_3D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_2, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_3, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_4, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &val, sizeof(double*));
asyncio_pbuffer += sizeof(double*);
t_curr_nitems++;
}
extern "C" DEVICE void asyncio_write_double_array4d_c(double* val, int dim_1, int dim_2, int dim_3, int dim_4)
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to write without an active transaction\n");
asyncio_error = true;
trap();
}
if (t_curr->kind != TRANSACTION_TYPE_WRITE)
{
printf("ASYNCIO ERROR: Cannot read in write and write in read transaction\n");
asyncio_error = true;
trap();
}
type_t type = WRITE_DOUBLE_3D;
memcpy(asyncio_pbuffer, &type, sizeof(type_t));
asyncio_pbuffer += sizeof(type_t);
memcpy(asyncio_pbuffer, &dim_1, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_2, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_3, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, &dim_4, sizeof(int));
asyncio_pbuffer += sizeof(int);
memcpy(asyncio_pbuffer, val, sizeof(double) * dim_1 * dim_2 * dim_3 * dim_4);
asyncio_pbuffer += sizeof(double) * dim_1 * dim_2 * dim_3 * dim_4;
t_curr_nitems++;
}
extern "C" void asyncio_flush();
extern "C" DEVICE void asyncio_end()
{
#ifdef __CUDACC__
if (threadIdx.x) return;
#endif
if (!t_curr)
{
printf("ASYNCIO ERROR: Attempted to end without an active transaction\n");
asyncio_error = true;
trap();
}
memcpy(&t_curr->nitems, &t_curr_nitems, sizeof(int));
t_curr = NULL;
// Save the current buffer length.
asyncio_buffer_length = (size_t)asyncio_pbuffer - (size_t)asyncio_buffer;
#ifndef __CUDACC__
// On host we can flush each individual write statement.
asyncio_flush();
#endif
}
#define CUDA_ERR_CHECK(x) \
do { cudaError_t err = x; \
if (err != cudaSuccess) { \
printf("CUDA error %d \"%s\" at %s:%d\n", \
(int)err, cudaGetErrorString(err), \
__FILE__, __LINE__); exit(1); \
}} while (0);
struct st_parameter_dt;
extern "C" void asyncio_hook_read_default_unit_default_format(int*);
extern "C" void asyncio_hook_read_default_unit_formatted(size_t, char*, int*);
extern "C" void asyncio_hook_read_unit_unformatted(int, int*);
extern "C" void asyncio_hook_read_unit_default_format(int, int*);
extern "C" void asyncio_hook_read_unit_formatted(int, size_t, char*, int*);
extern "C" void asyncio_hook_write_default_unit_default_format(int*);
extern "C" void asyncio_hook_write_default_unit_formatted(size_t, char*, int*);
extern "C" void asyncio_hook_write_unit_unformatted(int, int*);
extern "C" void asyncio_hook_write_unit_default_format(int, int*);
extern "C" void asyncio_hook_write_unit_formatted(int, size_t, char*, int*);
extern "C" void asyncio_hook_read_integer_array_1d(void*, int);
extern "C" void asyncio_hook_read_integer_array_2d(void*, int, int);
extern "C" void asyncio_hook_read_float_array_1d(void*, int);
extern "C" void asyncio_hook_read_double_array_1d(void*, int);
extern "C" void asyncio_hook_read_double_array_2d(void*, int, int);
extern "C" void asyncio_hook_read_double_array_3d(void*, int, int, int);
extern "C" void asyncio_hook_read_double_array_4d(void*, int, int, int, int);
extern "C" void asyncio_hook_read_boolean_array_1d(void*, int);
extern "C" void asyncio_hook_write_integer_array_1d(void*, int);
extern "C" void asyncio_hook_write_integer_array_2d(void*, int, int);
extern "C" void asyncio_hook_write_float_array_1d(void*, int);
extern "C" void asyncio_hook_write_double_array_1d(void*, int);
extern "C" void asyncio_hook_write_double_array_2d(void*, int, int);
extern "C" void asyncio_hook_write_double_array_3d(void*, int, int, int);
extern "C" void asyncio_hook_write_double_array_4d(void*, int, int, int, int);
extern "C" void asyncio_hook_write_boolean_array_1d(void*, int);
static bool inside_hook = false;
static bool inside_hook_array = false;
static jmp_buf get_st_parameter_jmp;
typedef void (*st_callback_t)(transaction_t*, st_parameter_dt*);
static st_callback_t callback;
static transaction_t* transaction;
static st_parameter_dt* st_parameter = NULL;
#ifdef DYNAMIC
#define LIBGFORTRAN "libgfortran.so.3"
static void* libgfortran = NULL;
#define bind_lib(lib) \
if (!libgfortran) \
{ \
libgfortran = dlopen(lib, RTLD_NOW | RTLD_GLOBAL); \
if (!libgfortran) \
{ \
fprintf(stderr, "Error loading %s: %s\n", lib, dlerror()); \
abort(); \
} \
}
#define bind_sym(handle, sym, retty, ...) \
typedef retty (*sym##_func_t)(__VA_ARGS__); \
static sym##_func_t sym##_real = NULL; \
if (!sym##_real) \
{ \
sym##_real = (sym##_func_t)dlsym(handle, #sym); \
if (!sym##_real) \
{ \
fprintf(stderr, "Error loading %s: %s\n", #sym, dlerror()); \
abort(); \
} \
}
extern "C" void _gfortran_st_read(st_parameter_dt * stp)
#else
extern "C" void __real__gfortran_st_read(st_parameter_dt * stp);
extern "C" void __wrap__gfortran_st_read(st_parameter_dt * stp)
#endif
{
// In case of array writing hook, we discard _gfortran_st_write
// completely.
if (inside_hook_array)
return;
#ifdef DYNAMIC
bind_lib(LIBGFORTRAN);
bind_sym(libgfortran, _gfortran_st_read, void, st_parameter_dt*);
_gfortran_st_read_real(stp);
#else
__real__gfortran_st_read(stp);
#endif
if (inside_hook)
{
st_parameter = stp;
callback(transaction, stp);
longjmp(get_st_parameter_jmp, 1);
}
}
extern "C" void _gfortran_st_read_done(st_parameter_dt * stp);
#ifdef DYNAMIC
extern "C" void _gfortran_st_read_done(st_parameter_dt * stp)
#else
extern "C" void __real__gfortran_st_read_done(st_parameter_dt * stp);
extern "C" void __wrap__gfortran_st_read_done(st_parameter_dt * stp)
#endif
{
// In case of array writing hook, we discard _gfortran_st_read_done
// completely.
if (inside_hook_array)
return;
#ifdef DYNAMIC
bind_lib(LIBGFORTRAN);
bind_sym(libgfortran, _gfortran_st_read_done, void, st_parameter_dt*);
_gfortran_st_read_done_real(stp);
#else
__real__gfortran_st_read_done(stp);
#endif
}
#ifdef DYNAMIC
extern "C" void _gfortran_transfer_array(st_parameter_dt* stp, void* desc, int kind, size_t charlen)
#else
extern "C" void __real__gfortran_transfer_array(st_parameter_dt* stp, void* desc, int kind, size_t charlen);
extern "C" void __wrap__gfortran_transfer_array(st_parameter_dt* stp, void* desc, int kind, size_t charlen)
#endif
{
// In case of array writing hook, we use our own st_parameter
if (inside_hook_array)
stp = st_parameter;
#ifdef DYNAMIC
bind_lib(LIBGFORTRAN);
bind_sym(libgfortran, _gfortran_transfer_array, void, st_parameter_dt*, void*, int, size_t);
_gfortran_transfer_array_real(stp, desc, kind, charlen);
#else
__real__gfortran_transfer_array(stp, desc, kind, charlen);
#endif
}
extern "C" void _gfortran_transfer_integer_write(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_real_write(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_logical_write(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_character_write(st_parameter_dt *, void *, int);
#ifdef DYNAMIC
extern "C" void _gfortran_st_write(st_parameter_dt * stp)
#else
extern "C" void __real__gfortran_st_write(st_parameter_dt * stp);
extern "C" void __wrap__gfortran_st_write(st_parameter_dt * stp)
#endif
{
// In case of array writing hook, we discard _gfortran_st_write
// completely.
if (inside_hook_array)
return;
#ifdef DYNAMIC
bind_lib(LIBGFORTRAN);
bind_sym(libgfortran, _gfortran_st_write, void, st_parameter_dt*);
_gfortran_st_write_real(stp);
#else
__real__gfortran_st_write(stp);
#endif
if (inside_hook)
{
st_parameter = stp;
callback(transaction, stp);
longjmp(get_st_parameter_jmp, 1);
}
}
extern "C" void _gfortran_st_write_done(st_parameter_dt * stp);
#ifdef DYNAMIC
extern "C" void _gfortran_st_write_done(st_parameter_dt * stp)
#else
extern "C" void __real__gfortran_st_write_done(st_parameter_dt * stp);
extern "C" void __wrap__gfortran_st_write_done(st_parameter_dt * stp)
#endif
{
// In case of array writing hook, we discard _gfortran_st_write_done
// completely.
if (inside_hook_array)
return;
#ifdef DYNAMIC
bind_lib(LIBGFORTRAN);
bind_sym(libgfortran, _gfortran_st_write_done, void, st_parameter_dt*);
_gfortran_st_write_done_real(stp);
#else
__real__gfortran_st_write_done(stp);
#endif
}
#ifdef DYNAMIC
extern "C" void _gfortran_transfer_array_write(st_parameter_dt* stp, void* desc, int kind, size_t charlen)
#else
extern "C" void __real__gfortran_transfer_array_write(st_parameter_dt* stp, void* desc, int kind, size_t charlen);
extern "C" void __wrap__gfortran_transfer_array_write(st_parameter_dt* stp, void* desc, int kind, size_t charlen)
#endif
{
// In case of array writing hook, we use our own st_parameter
if (inside_hook_array)
stp = st_parameter;
#ifdef DYNAMIC
bind_lib(LIBGFORTRAN);
bind_sym(libgfortran, _gfortran_transfer_array_write, void, st_parameter_dt*, void*, int, size_t);
_gfortran_transfer_array_write_real(stp, desc, kind, charlen);
#else
__real__gfortran_transfer_array_write(stp, desc, kind, charlen);
#endif
}
extern "C" void _gfortran_transfer_integer(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_real(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_logical(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_character(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_integer_write(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_real_write(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_logical_write(st_parameter_dt *, void *, int);
extern "C" void _gfortran_transfer_character_write(st_parameter_dt *, void *, int);
static map<void*, string>* pfuncs = NULL, funcs;
static map<string, void*> formats;
static bool funcs_resolved = false;
static void st_callback(transaction_t* t, st_parameter_dt* st_parameter)
{
for (int i = 0, e = t->nitems; i != e; i++)
{
type_t type = *(type_t*)(t->buffer + t->offset);
t->offset += sizeof(type_t);
void* value = (void*)(t->buffer + t->offset);
switch (type)
{
case READ_INT :
_gfortran_transfer_integer(st_parameter, *(void**)value, sizeof(int));
t->offset += sizeof(int*);
break;
case WRITE_INT :
_gfortran_transfer_integer_write(st_parameter, value, sizeof(int));
t->offset += sizeof(int);
break;
case READ_INT_1D :
{
int* length = (int*)value;
t->offset += sizeof(int);
value = *(void**)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_integer_array_1d(value, length[0]);
inside_hook_array = false;
t->offset += sizeof(int*);
}
break;
case WRITE_INT_1D :
{
int* length = (int*)value;
t->offset += sizeof(int);
value = (void*)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_integer_array_1d(value, length[0]);
inside_hook_array = false;
t->offset += sizeof(int) * length[0];
}
break;
case READ_INT_2D :
{
int* length = (int*)value;
t->offset += sizeof(int) * 2;
value = *(void**)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_integer_array_2d(value, length[0], length[1]);
inside_hook_array = false;
t->offset += sizeof(int*);
}
break;
case WRITE_INT_2D :
{
int* length = (int*)value;
t->offset += sizeof(int) * 2;
value = (void*)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_integer_array_2d(value, length[0], length[1]);
inside_hook_array = false;
t->offset += sizeof(int) * length[0] * length[1];
}
break;
case READ_LONG_LONG :
_gfortran_transfer_integer(st_parameter, *(void**)value, sizeof(long long));
t->offset += sizeof(long long*);
break;
case WRITE_LONG_LONG :
_gfortran_transfer_integer_write(st_parameter, value, sizeof(long long));
t->offset += sizeof(long long);
break;
case READ_FLOAT :
_gfortran_transfer_real(st_parameter, *(void**)value, sizeof(float));
t->offset += sizeof(float*);
break;
case WRITE_FLOAT :
_gfortran_transfer_real_write(st_parameter, value, sizeof(float));
t->offset += sizeof(float);
break;
case READ_FLOAT_1D :
{
int* length = (int*)value;
t->offset += sizeof(int);
value = *(void**)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_float_array_1d(value, length[0]);
inside_hook_array = false;
t->offset += sizeof(float*);
}
break;
case WRITE_FLOAT_1D :
{
int* length = (int*)value;
t->offset += sizeof(int);
value = (void*)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_float_array_1d(value, length[0]);
inside_hook_array = false;
t->offset += sizeof(float) * length[0];
}
break;
case READ_DOUBLE :
_gfortran_transfer_real(st_parameter, *(void**)value, sizeof(double));
t->offset += sizeof(double*);
break;
case WRITE_DOUBLE :
_gfortran_transfer_real_write(st_parameter, value, sizeof(double));
t->offset += sizeof(double);
break;
case READ_DOUBLE_1D :
{
int* length = (int*)value;
t->offset += sizeof(int);
value = *(void**)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_double_array_1d(value, length[0]);
inside_hook_array = false;
t->offset += sizeof(double*);
}
break;
case WRITE_DOUBLE_1D :
{
int* length = (int*)value;
t->offset += sizeof(int);
value = (void*)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_double_array_1d(value, length[0]);
inside_hook_array = false;
t->offset += sizeof(double) * length[0];
}
break;
case READ_DOUBLE_2D :
{
int* length = (int*)value;
t->offset += sizeof(int) * 2;
value = *(void**)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_double_array_2d(value, length[0], length[1]);
inside_hook_array = false;
t->offset += sizeof(double*);
}
break;
case WRITE_DOUBLE_2D :
{
int* length = (int*)value;
t->offset += sizeof(int) * 2;
value = (void*)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_double_array_2d(value, length[0], length[1]);
inside_hook_array = false;
t->offset += sizeof(double) * length[0] * length[1];
}
break;
case READ_DOUBLE_3D :
{
int* length = (int*)value;
t->offset += sizeof(int) * 3;
value = *(void**)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_double_array_3d(value, length[0], length[1], length[2]);
inside_hook_array = false;
t->offset += sizeof(double*);
}
break;
case WRITE_DOUBLE_3D :
{
int* length = (int*)value;
t->offset += sizeof(int) * 3;
value = (void*)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_double_array_3d(value, length[0], length[1], length[2]);
inside_hook_array = false;
t->offset += sizeof(double) * length[0] * length[1] * length[2];
}
break;
case READ_DOUBLE_4D :
{
int* length = (int*)value;
t->offset += sizeof(int) * 4;
value = *(void**)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_double_array_4d(value, length[0], length[1], length[2], length[3]);
inside_hook_array = false;
t->offset += sizeof(double*);
}
break;
case WRITE_DOUBLE_4D :
{
int* length = (int*)value;
t->offset += sizeof(int) * 4;
value = (void*)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_double_array_4d(value, length[0], length[1], length[2], length[3]);
inside_hook_array = false;
t->offset += sizeof(double) * length[0] * length[1] * length[2] * length[3];
}
break;
case READ_BOOLEAN :
_gfortran_transfer_logical(st_parameter, *(void**)value, sizeof(bool));
t->offset += sizeof(bool*);
break;
case WRITE_BOOLEAN :
_gfortran_transfer_logical_write(st_parameter, value, sizeof(bool));
t->offset += sizeof(bool);
break;
case READ_BOOLEAN_1D :
{
int* length = (int*)value;
t->offset += sizeof(int);
value = *(void**)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_boolean_array_1d(value, length[0]);
inside_hook_array = false;
t->offset += sizeof(bool*);
}
break;
case WRITE_BOOLEAN_1D :
{
int* length = (int*)value;
t->offset += sizeof(int);
value = (void*)(t->buffer + t->offset);
inside_hook_array = true;
asyncio_hook_write_boolean_array_1d(value, length[0]);
inside_hook_array = false;
t->offset += sizeof(bool) * length[0];
}
break;
case READ_CHAR :
{
int length = *(int*)value;
t->offset += sizeof(int);
value = *(void**)(t->buffer + t->offset);
_gfortran_transfer_character(st_parameter, value, sizeof(char) * length);
t->offset += sizeof(char*);
}
break;
case WRITE_CHAR :
{
int length = *(int*)value;
t->offset += sizeof(int);
value = (void*)(t->buffer + t->offset);
_gfortran_transfer_character_write(st_parameter, value, sizeof(char) * length);
t->offset += sizeof(char) * length;
}
break;
case READ_CHAR_1D :
case WRITE_CHAR_1D :
case READ_CHAR_2D :
case WRITE_CHAR_2D :
fprintf(stderr, "ASYNCIO ERROR: not implemented\n");
exit(1);
break;
default :
fprintf(stderr, "ASYNCIO ERROR: Unknown data type %d\n", type);
exit(1);
}
}
if (t->kind == TRANSACTION_TYPE_READ)
_gfortran_st_read_done(st_parameter);
else
_gfortran_st_write_done(st_parameter);
}
#ifdef __CUDACC__
static char* get_format(void* func, int format)
{
if (!funcs_resolved)
{
// 1) Resolve device functions addresses.
for (map<void*, string>::iterator i = pfuncs->begin(), e = pfuncs->end(); i != e; i++)
{
void* gpuAddress = NULL;
CUDA_ERR_CHECK(cudaGetSymbolAddress(&gpuAddress, (const void*)i->first));
funcs[gpuAddress] = i->second;
}
delete pfuncs;
pfuncs = &funcs;
// 2) Find addresses of all __GPUFMT_* variables in host executable.
int fd = -1;
Elf *e = NULL;
try
{
if (elf_version(EV_CURRENT) == EV_NONE)
{
fprintf(stderr, "Cannot initialize ELF library: %s\n",
elf_errmsg(-1));
throw;
}
if ((fd = open("/proc/self/exe", O_RDONLY)) < 0)
{
fprintf(stderr, "Cannot open self executable\n");
throw;
}
if ((e = elf_begin(fd, ELF_C_READ, e)) == 0) {
fprintf(stderr, "Cannot read ELF image: %s\n", elf_errmsg(-1));
throw;
}
size_t shstrndx;
if (elf_getshdrstrndx(e, &shstrndx)) {
fprintf(stderr, "elf_getshdrstrndx() failed: %s\n", elf_errmsg(-1));
throw;
}
// Locate the symbol table.
Elf_Scn* scn = elf_nextscn(e, NULL);
for (int i = 1; scn != NULL; scn = elf_nextscn(e, scn), i++)
{
// Get section header.
GElf_Shdr shdr;
if (!gelf_getshdr(scn, &shdr))
{
fprintf(stderr, "gelf_getshdr() failed: %s\n", elf_errmsg(-1));
throw;
}
// If section is not a symbol table:
if (shdr.sh_type != SHT_SYMTAB) continue;
// Load symbols.
Elf_Data* data = elf_getdata(scn, NULL);
if (!data)
{
fprintf(stderr, "Expected data section for SYMTAB\n");
throw;
}
if (shdr.sh_size && !shdr.sh_entsize)
{
fprintf(stderr, "Cannot get the number of symbols\n");
throw;
}
int nsymbols = 0;
if (shdr.sh_size)
nsymbols = shdr.sh_size / shdr.sh_entsize;
int strndx = shdr.sh_link;
for (int i = 0; i < nsymbols; i++)
{
GElf_Sym sym;
if (!gelf_getsym(data, i, &sym))
{
fprintf(stderr, "gelf_getsym() failed: %s\n", elf_errmsg(-1));
throw;
}
char* name = elf_strptr(e, strndx, sym.st_name);
if (!name)
{
fprintf(stderr, "Cannot get the name of %d-th symbol: %s\n", i, elf_errmsg(-1));
throw;
}
if (!strncmp(name, "__GPUFMT_", strlen("__GPUFMT_")))
{
name += strlen("__GPUFMT_");
char* format = (char*)(size_t)sym.st_value;
// Perform basic format checks:
// 1) last character is null
// 2) first and last non-null characters are brackets
if (format[sym.st_size - 1] != '\0')
{
bool null_found = false;
for (int i = sym.st_size - 1; i >= 0; i--)
if (format[i] == '\0')
{
null_found = true;
break;
}
if (!null_found)
{
fprintf(stderr, "Error: format string \"%s\" is not terminated by c_null_char\n",
name);
exit(1);
}
else
{
fprintf(stderr, "Warning: format string \"%s\" has extra symbols after c_null_char",
name);
fprintf(stderr, " length should be %zu instead of %zu\n",
strlen(format) + 1, (size_t)sym.st_size);
}
}
if (format[0] != '(')
{
fprintf(stderr, "Error: format string \"%s\" = \"%s\" must start with '(' symbol\n",
name, format);
exit(1);
}
if (format[strlen(format) - 1] != ')')
{
fprintf(stderr, "Error: format string \"%s\" = \"%s\" must finish with ')' symbol\n",
name, format);
exit(1);
}
// This symbol is a valid format string - record it.
formats[name] = (void*)format;
}
}
elf_end(e);
close(fd);
e = NULL;
funcs_resolved = true;
break;
}
if (!funcs_resolved)
{
fprintf(stderr, "Cannot locate the symbol table of executable\n");
throw;
}
}
catch (...)
{
if (e)
elf_end(e);
if (fd >= 0)
close(fd);
exit(1);
}
}
map<void*, string>::iterator i = funcs.find((void*)func);
if (i == funcs.end())
{
fprintf(stderr, "ASYNCIO ERROR: Unknown function @ %p\n", (void*)func);
exit(1);
}
stringstream svarname;
svarname << i->second << "_" << format;
string varname = svarname.str();
map<string, void*>::iterator j = formats.find(varname);
if (j == formats.end())
{
fprintf(stderr, "ASYNCIO ERROR: Undefined format spec \"%s\"\n", varname.c_str());
exit(1);
}
char* result = (char*)j->second;
return result;
}
extern "C" void CUDARTAPI __real___cudaRegisterVar(
void **fatCubinHandle,
char *hostVar,
char *deviceAddress,
const char *deviceName,
int ext,
int size,
int constant,
int global
);
extern "C" void CUDARTAPI __wrap___cudaRegisterVar(
void **fatCubinHandle,
char *hostVar,
char *deviceAddress,
const char *deviceName,
int ext,
int size,
int constant,
int global
)
{
// Workaround in case if static funcs map could happen to be
// initialized later.
if (!pfuncs)
pfuncs = new map<void*, string>();
__real___cudaRegisterVar(
fatCubinHandle,
hostVar,
deviceAddress,
deviceName,
ext,
size,
constant,
global);
if (strncmp(deviceName, "__FUNC_", strlen("__FUNC_")))
return;
// This symbol is a function name anchor - record it.
string& name = pfuncs->operator[]((void*)hostVar);
name = deviceAddress + strlen("__FUNC_");
}
#else
static char* get_format(void* func, int format)
{
if (!funcs_resolved)
{
// 1) Find addresses of all __GPUFMT_* variables in host executable.
int fd = -1;
Elf *e = NULL;
try
{
if (elf_version(EV_CURRENT) == EV_NONE)
{
fprintf(stderr, "Cannot initialize ELF library: %s\n",
elf_errmsg(-1));
throw;
}
if ((fd = open("/proc/self/exe", O_RDONLY)) < 0)
{
fprintf(stderr, "Cannot open self executable\n");
throw;
}
if ((e = elf_begin(fd, ELF_C_READ, e)) == 0) {
fprintf(stderr, "Cannot read ELF image: %s\n", elf_errmsg(-1));
throw;
}
size_t shstrndx;
if (elf_getshdrstrndx(e, &shstrndx)) {
fprintf(stderr, "elf_getshdrstrndx() failed: %s\n", elf_errmsg(-1));
throw;
}
// Locate the symbol table.
Elf_Scn* scn = elf_nextscn(e, NULL);
for (int i = 1; scn != NULL; scn = elf_nextscn(e, scn), i++)
{
// Get section header.
GElf_Shdr shdr;
if (!gelf_getshdr(scn, &shdr))
{
fprintf(stderr, "gelf_getshdr() failed: %s\n", elf_errmsg(-1));
throw;
}
// If section is not a symbol table:
if (shdr.sh_type != SHT_SYMTAB) continue;
// Load symbols.
Elf_Data* data = elf_getdata(scn, NULL);
if (!data)
{
fprintf(stderr, "Expected data section for SYMTAB\n");
throw;
}
if (shdr.sh_size && !shdr.sh_entsize)
{
fprintf(stderr, "Cannot get the number of symbols\n");
throw;
}
int nsymbols = 0;
if (shdr.sh_size)
nsymbols = shdr.sh_size / shdr.sh_entsize;
int strndx = shdr.sh_link;
for (int i = 0; i < nsymbols; i++)
{
GElf_Sym sym;
if (!gelf_getsym(data, i, &sym))
{
fprintf(stderr, "gelf_getsym() failed: %s\n", elf_errmsg(-1));
throw;
}
char* name = elf_strptr(e, strndx, sym.st_name);
if (!name)
{
fprintf(stderr, "Cannot get the name of %d-th symbol: %s\n", i, elf_errmsg(-1));
throw;
}
if (!strncmp(name, "__FUNC_", strlen("__FUNC_")))
{
// This symbol is a function name anchor - record it.
name += strlen("__FUNC_");
funcs[(void*)(size_t)sym.st_value] = name;
}
if (!strncmp(name, "__GPUFMT_", strlen("__GPUFMT_")))
{
name += strlen("__GPUFMT_");
char* format = (char*)(size_t)sym.st_value;
// Perform basic format checks:
// 1) last character is null
// 2) first and last non-null characters are brackets
if (format[sym.st_size - 1] != '\0')
{
bool null_found = false;
for (int i = sym.st_size - 1; i >= 0; i--)
if (format[i] == '\0')
{
null_found = true;
break;
}
if (!null_found)
{
fprintf(stderr, "Error: format string \"%s\" is not terminated by c_null_char\n",
name);
exit(1);
}
else
{
fprintf(stderr, "Warning: format string \"%s\" has extra symbols after c_null_char",
name);
fprintf(stderr, " length should be %zu instead of %zu\n",
strlen(format) + 1, (size_t)sym.st_size);
}
}
if (format[0] != '(')
{
fprintf(stderr, "Error: format string \"%s\" = \"%s\" must start with '(' symbol\n",
name, format);
exit(1);
}
if (format[strlen(format) - 1] != ')')
{
fprintf(stderr, "Error: format string \"%s\" = \"%s\" must finish with ')' symbol\n",
name, format);
exit(1);
}
#if 0
// Additional format checks to ensure no default
// forms of "i" and "d" formats are used (by some
// reason, gfortran does not support them).
for (int i = 0, e = strlen(format) - 2; i != e; i++)
{
if (((format[i] == ',') || (format[i] == '(')) &&
((format[i + 1] == 'i') || (format[i + 1] == 'd')))
{
if ((format[i + 2] >= '0') && (format[i + 2] <= '9'))
continue;
fprintf(stderr, "Error: malformed integer format in \"%s\" = \"%s\"\n",
name, format);
exit(1);
}
}
#endif
// This symbol is a valid format string - record it.
formats[name] = (void*)format;
}
}
elf_end(e);
close(fd);
e = NULL;
funcs_resolved = true;
break;
}
if (!funcs_resolved)
{
fprintf(stderr, "Cannot locate the symbol table of executable\n");
throw;
}
}
catch (...)
{
if (e)
elf_end(e);
if (fd >= 0)
close(fd);
exit(1);
}
}
map<void*, string>::iterator i = funcs.find((void*)func);
if (i == funcs.end())
{
fprintf(stderr, "ASYNCIO ERROR: Unknown function @ %p\n", (void*)func);
exit(1);
}
stringstream svarname;
svarname << i->second << "_" << format;
string varname = svarname.str();
map<string, void*>::iterator j = formats.find(varname);
if (j == formats.end())
{
fprintf(stderr, "ASYNCIO ERROR: Undefined format spec \"%s\"\n", varname.c_str());
exit(1);
}
char* result = (char*)j->second;
return result;
}
#endif // __CUDACC__
extern "C" void asyncio_flush()
{
#ifdef __CUDACC__
// Transfer asyncio error status.
static bool* pdevice_error = NULL;
if (!pdevice_error)
CUDA_ERR_CHECK(cudaGetSymbolAddress((void**)&pdevice_error, asyncio_error));
bool host_error = true;
CUDA_ERR_CHECK(cudaMemcpy(&host_error, pdevice_error, sizeof(bool),
cudaMemcpyDeviceToHost));
// Do nothing, if error status is true.
if (host_error) return;
// Transfer asyncio buffer length.
static size_t* pdevice_length = NULL;
if (!pdevice_length)
CUDA_ERR_CHECK(cudaGetSymbolAddress((void**)&pdevice_length, asyncio_buffer_length));
size_t host_length = 0;
CUDA_ERR_CHECK(cudaMemcpy(&host_length, pdevice_length, sizeof(size_t),
cudaMemcpyDeviceToHost));
// Do nothing, if buffer length is zero.
if (host_length == 0)
{
CUDA_ERR_CHECK(cudaMemset(pdevice_error, 0, sizeof(bool)));
return;
}
// Transfer asyncio buffer contents.
static char* pdevice_buffer = NULL;
if (!pdevice_buffer)
CUDA_ERR_CHECK(cudaGetSymbolAddress((void**)&pdevice_buffer, asyncio_buffer));
vector<char> vhost_buffer;
vhost_buffer.resize(host_length);
char* host_buffer = &vhost_buffer[0];
CUDA_ERR_CHECK(cudaMemcpy(host_buffer, pdevice_buffer, host_length,
cudaMemcpyDeviceToHost));
for (int offset = 0; offset < host_length; )
#else
// Do nothing, if error status is true.
if (asyncio_error)
{
asyncio_error = false;
return;
}
for (int offset = 0; offset < asyncio_buffer_length; )
#endif
{
#ifdef __CUDACC__
transaction_t* t = (transaction_t*)(host_buffer + offset);
#else
transaction_t* t = (transaction_t*)(asyncio_buffer + offset);
#endif
offset += sizeof(transaction_t);
t->offset = offset;
#ifdef __CUDACC__
t->buffer = host_buffer;
#else
t->buffer = asyncio_buffer;
#endif
inside_hook = true;
callback = st_callback;
transaction = t;
#ifdef __CUDACC__
// On GPU iostat does not make sense, so we always use a dummy variable.
int iostat = 0;
t->iostat = &iostat;
#endif
if ((t->format == ASYNCIO_DEFAULT_FORMAT) && (t->unit == ASYNCIO_DEFAULT_UNIT))
{
int get_st_parameter_val = setjmp(get_st_parameter_jmp);
if (!get_st_parameter_val)
if (t->kind == TRANSACTION_TYPE_READ)
asyncio_hook_read_default_unit_default_format(t->iostat);
else
asyncio_hook_write_default_unit_default_format(t->iostat);
}
else if ((t->format != ASYNCIO_DEFAULT_FORMAT) && (t->unit == ASYNCIO_DEFAULT_UNIT))
{
char* format = get_format(t->func, t->format);
int get_st_parameter_val = setjmp(get_st_parameter_jmp);
if (!get_st_parameter_val)
if (t->kind == TRANSACTION_TYPE_READ)
asyncio_hook_read_default_unit_formatted(strlen(format), format, t->iostat);
else
asyncio_hook_write_default_unit_formatted(strlen(format), format, t->iostat);
}
else if ((t->format == ASYNCIO_DEFAULT_FORMAT) && (t->unit != ASYNCIO_DEFAULT_UNIT))
{
int get_st_parameter_val = setjmp(get_st_parameter_jmp);
if (!get_st_parameter_val)
if (t->kind == TRANSACTION_TYPE_READ)
asyncio_hook_read_unit_default_format(t->unit, t->iostat);
else
asyncio_hook_write_unit_default_format(t->unit, t->iostat);
}
else if ((t->format == ASYNCIO_UNFORMATTED) && (t->unit != ASYNCIO_DEFAULT_UNIT))
{
int get_st_parameter_val = setjmp(get_st_parameter_jmp);
if (!get_st_parameter_val)
if (t->kind == TRANSACTION_TYPE_READ)
asyncio_hook_read_unit_unformatted(t->unit, t->iostat);
else
asyncio_hook_write_unit_unformatted(t->unit, t->iostat);
}
else
{
char* format = get_format(t->func, t->format);
int get_st_parameter_val = setjmp(get_st_parameter_jmp);
if (!get_st_parameter_val)
if (t->kind == TRANSACTION_TYPE_READ)
asyncio_hook_read_unit_formatted(t->unit, strlen(format), format, t->iostat);
else
asyncio_hook_write_unit_formatted(t->unit, strlen(format), format, t->iostat);
}
inside_hook = false;
offset = t->offset;
#ifdef __CUDACC__
if (iostat != 0)
{
fprintf(stderr, "ASYNCIO ERROR: iostat returned %d\n", iostat);
exit(1);
}
#endif
}
#ifdef __CUDACC__
// Reset device pointer to 0, length to 0.
static char* pdevice_pbuffer = NULL;
if (!pdevice_pbuffer)
CUDA_ERR_CHECK(cudaGetSymbolAddress((void**)&pdevice_pbuffer, asyncio_pbuffer));
CUDA_ERR_CHECK(cudaMemset(pdevice_pbuffer, 0, sizeof(char*)));
CUDA_ERR_CHECK(cudaMemset(pdevice_length, 0, sizeof(size_t)));
#else
// Reset device pointer to 0, length to 0.
asyncio_pbuffer = NULL;
asyncio_buffer_length = 0;
#endif
}
|
639fac0ef007cc7fda7e889ad444cc79a7fb65d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include <iostream>
#include "kernel.h"
#define TPB 64
__global__
void ddKernel(float* d_out, const float* d_in, int size, float h) {
const int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= size) return;
d_out[i] = (d_in[i - 1] - 2.f * d_in[i] + d_in[i + 1]) / (h * h);
}
void ddParallel(float* out, const float* in, int n, float h) {
float* d_in = 0, *d_out = 0;
float tiempo_computo;
hipEvent_t inicio, alto;
hipEventCreate(&inicio); hipEventCreate(&alto); //Creamos los eventos
hipEventRecord(inicio); //Creamos una marca temporal, una especia de bandera
hipMalloc(&d_in, n * sizeof(float));
hipMalloc(&d_out, n * sizeof(float));
hipMemcpy(d_in, in, n * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ddKernel) , dim3((n + TPB - 1) / TPB), dim3(TPB) , 0, 0, d_out, d_in, n, h);
hipMemcpy(out, d_out, n * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_in);
hipFree(d_out);
hipEventRecord(alto); // Creamos una marca temporal, otra bandera
hipEventSynchronize(alto); // Bloquea la CPU para evitar que se continue con el programa hasta que se completen los eventos
hipEventElapsedTime(&tiempo_computo, inicio, alto); //Calcula el tiempo entre los eventos
hipEventDestroy(inicio); hipEventDestroy(alto); // Se liberan los espacios de los eventos para poder medir de nuevo ms tarde
std::cout << tiempo_computo << "\n";
}
| 639fac0ef007cc7fda7e889ad444cc79a7fb65d7.cu | #pragma once
#include <iostream>
#include "kernel.h"
#define TPB 64
__global__
void ddKernel(float* d_out, const float* d_in, int size, float h) {
const int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= size) return;
d_out[i] = (d_in[i - 1] - 2.f * d_in[i] + d_in[i + 1]) / (h * h);
}
void ddParallel(float* out, const float* in, int n, float h) {
float* d_in = 0, *d_out = 0;
float tiempo_computo;
cudaEvent_t inicio, alto;
cudaEventCreate(&inicio); cudaEventCreate(&alto); //Creamos los eventos
cudaEventRecord(inicio); //Creamos una marca temporal, una especia de bandera
cudaMalloc(&d_in, n * sizeof(float));
cudaMalloc(&d_out, n * sizeof(float));
cudaMemcpy(d_in, in, n * sizeof(float), cudaMemcpyHostToDevice);
ddKernel <<<(n + TPB - 1) / TPB, TPB >>> (d_out, d_in, n, h);
cudaMemcpy(out, d_out, n * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
cudaEventRecord(alto); // Creamos una marca temporal, otra bandera
cudaEventSynchronize(alto); // Bloquea la CPU para evitar que se continue con el programa hasta que se completen los eventos
cudaEventElapsedTime(&tiempo_computo, inicio, alto); //Calcula el tiempo entre los eventos
cudaEventDestroy(inicio); cudaEventDestroy(alto); // Se liberan los espacios de los eventos para poder medir de nuevo más tarde
std::cout << tiempo_computo << "\n";
}
|
1b1ca885e7821ee41befb7e663871a1386cdd65f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <helper_math.h>
#include <helper_functions.h>
#include <helper_cuda.h> // CUDA device initialization helper functions
#include <thrust/window_2d.h>
#include <thrust/window_transform.h>
__constant__ float cGaussian[64]; //gaussian array in device side
texture<uchar4, 2, hipReadModeNormalizedFloat> rgbaTex;
uint *dImage = NULL; //original image
uint *dTemp = NULL; //temp array for iterations
size_t pitch;
/*
Perform a simple bilateral filter.
Bilateral filter is a nonlinear filter that is a mixture of range
filter and domain filter, the previous one preserves crisp edges and
the latter one filters noise. The intensity value at each pixel in
an image is replaced by a weighted average of intensity values from
nearby pixels.
The weight factor is calculated by the product of domain filter
component(using the gaussian distribution as a spatial distance) as
well as range filter component(Euclidean distance between center pixel
and the current neighbor pixel). Because this process is nonlinear,
the sample just uses a simple pixel by pixel step.
Texture fetches automatically clamp to edge of image. 1D gaussian array
is mapped to a 1D texture instead of using shared memory, which may
cause severe bank conflict.
Threads are y-pass(column-pass), because the output is coalesced.
Parameters
od - pointer to output data in global memory
d_f - pointer to the 1D gaussian array
e_d - euclidean delta
w - image width
h - image height
r - filter radius
*/
//Euclidean Distance (x, y, d) = exp((|x - y| / d)^2 / 2)
__device__ float euclideanLen(float4 a, float4 b, float d)
{
float mod = (b.x - a.x) * (b.x - a.x) +
(b.y - a.y) * (b.y - a.y) +
(b.z - a.z) * (b.z - a.z);
return __expf(-mod / (2.f * d * d));
}
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(fabs(rgba.x)); // clamp to [0.0, 1.0]
rgba.y = __saturatef(fabs(rgba.y));
rgba.z = __saturatef(fabs(rgba.z));
rgba.w = __saturatef(fabs(rgba.w));
return (uint(rgba.w * 255.0f) << 24) | (uint(rgba.z * 255.0f) << 16) | (uint(rgba.y * 255.0f) << 8) | uint(rgba.x * 255.0f);
}
__device__ float4 rgbaIntToFloat(uint c)
{
float4 rgba;
rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f;
rgba.y = ((c>>8) & 0xff) * 0.003921568627f; // /255.0f;
rgba.z = ((c>>16) & 0xff) * 0.003921568627f; // /255.0f;
rgba.w = ((c>>24) & 0xff) * 0.003921568627f; // /255.0f;
return rgba;
}
//column pass using coalesced global memory reads
__global__ void
d_bilateral_filter(uint *od, int w, int h,
float e_d, int r)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= w || y >= h)
{
return;
}
float sum = 0.0f;
float factor;
float4 t = {0.f, 0.f, 0.f, 0.f};
float4 center = tex2D(rgbaTex, x, y);
for (int i = -r; i <= r; i++)
{
for (int j = -r; j <= r; j++)
{
float4 curPix = tex2D(rgbaTex, x + j, y + i);
factor = cGaussian[i + r] * cGaussian[j + r] * //domain factor
euclideanLen(curPix, center, e_d); //range factor
t += factor * curPix;
sum += factor;
}
}
od[y * w + x] = rgbaFloatToInt(t/sum);
}
class d_bilateral_filter_functor
{
float e_d;
int r;
public:
d_bilateral_filter_functor(float e_d, int r)
{
this->e_d = e_d;
this->r = r;
}
__device__ int operator() ( const thrust::window_2d<uchar4> &input, const thrust::window_2d<uint> &output ) const
{
float sum = 0.0f;
float factor;
float4 t = {0.f, 0.f, 0.f, 0.f};
uchar4 center_int = input[r][r];
float4 center = {(float)center_int.x,(float)center_int.y,(float)center_int.z,(float)center_int.w};
for (int i = 0; i <= 2*r; i++)
{
for (int j = 0; j <= 2*r; j++)
{
uchar4 temp = input[j][i];
float4 curPix = {(float)temp.x,(float)temp.y,(float)temp.z,(float)temp.w,};
factor = cGaussian[i + r] * cGaussian[j + r] * //domain factor
euclideanLen(curPix, center, e_d); //range factor
t += factor * curPix;
sum += factor;
}
}
output[r][r]=rgbaFloatToInt(t/sum);\
return 1;
}
};
extern "C"
void initTexture(int width, int height, uint *hImage)
{
// copy image data to array
checkCudaErrors(hipMallocPitch(&dImage, &pitch, sizeof(uint)*width, height));
checkCudaErrors(hipMallocPitch(&dTemp, &pitch, sizeof(uint)*width, height));
checkCudaErrors(hipMemcpy2D(dImage, pitch, hImage, sizeof(uint)*width,
sizeof(uint)*width, height, hipMemcpyHostToDevice));
}
extern "C"
void freeTextures()
{
checkCudaErrors(hipFree(dImage));
checkCudaErrors(hipFree(dTemp));
}
/*
Because a 2D gaussian mask is symmetry in row and column,
here only generate a 1D mask, and use the product by row
and column index later.
1D gaussian distribution :
g(x, d) -- C * exp(-x^2/d^2), C is a constant amplifier
parameters:
og - output gaussian array in global memory
delta - the 2nd parameter 'd' in the above function
radius - half of the filter size
(total filter size = 2 * radius + 1)
*/
extern "C"
void updateGaussian(float delta, int radius)
{
float fGaussian[64];
for (int i = 0; i < 2*radius + 1; ++i)
{
float x = i-radius;
fGaussian[i] = expf(-(x*x) / (2*delta*delta));
}
checkCudaErrors(hipMemcpyToSymbol(cGaussian, fGaussian, sizeof(float)*(2*radius+1)));
}
/*
Perform 2D bilateral filter on image using CUDA
Parameters:
d_dest - pointer to destination image in device memory
width - image width
height - image height
e_d - euclidean delta
radius - filter radius
iterations - number of iterations
*/
// RGBA version
extern "C"
double bilateralFilterRGBA(uint *dDest,
int width, int height,
float e_d, int radius, int iterations,
StopWatchInterface *timer)
{
// var for kernel computation timing
double dKernelTime;
// Bind the array to the texture
hipChannelFormatDesc desc = hipCreateChannelDesc<uchar4>();
checkCudaErrors(hipBindTexture2D(0, rgbaTex, dImage, desc, width, height, pitch));
thrust::block_2d<uchar4> d_image_block(width,height);
d_image_block.upload((uchar4*)dImage,hipMemoryTypeDevice);
thrust::block_2d<uint> d_dest_block(width,height);
thrust::window_vector<uchar4> input_wv(&d_image_block,2*radius+1,2*radius+1,1,1);
thrust::window_vector<uint> output_wv(&d_dest_block,2*radius+1,2*radius+1,1,1);
for (int i=0; i<iterations; i++)
{
// sync host and start kernel computation timer
dKernelTime = 0.0;
checkCudaErrors(hipDeviceSynchronize());
sdkResetTimer(&timer);
dim3 gridSize((width + 16 - 1) / 16, (height + 16 - 1) / 16);
dim3 blockSize(16, 16);
thrust::transform(thrust::hip::shared,input_wv.begin(),input_wv.end(),output_wv.begin(),d_bilateral_filter_functor(e_d,radius));
// d_dest_block.download(dDest,hipMemoryTypeDevice);
// sync host and stop computation timer
checkCudaErrors(hipDeviceSynchronize());
dKernelTime += sdkGetTimerValue(&timer);
if (iterations > 1)
{
// checkCudaErrors(hipMemcpy2D(dTemp, pitch, d_dest_block.data_pointer, sizeof(int)*width,
// sizeof(int)*width, height, hipMemcpyDeviceToDevice));
// checkCudaErrors(hipBindTexture2D(0, rgbaTex, dTemp, desc, width, height, pitch));
}
}
return ((dKernelTime/1000.)/(double)iterations);
}
| 1b1ca885e7821ee41befb7e663871a1386cdd65f.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <helper_math.h>
#include <helper_functions.h>
#include <helper_cuda.h> // CUDA device initialization helper functions
#include <thrust/window_2d.h>
#include <thrust/window_transform.h>
__constant__ float cGaussian[64]; //gaussian array in device side
texture<uchar4, 2, cudaReadModeNormalizedFloat> rgbaTex;
uint *dImage = NULL; //original image
uint *dTemp = NULL; //temp array for iterations
size_t pitch;
/*
Perform a simple bilateral filter.
Bilateral filter is a nonlinear filter that is a mixture of range
filter and domain filter, the previous one preserves crisp edges and
the latter one filters noise. The intensity value at each pixel in
an image is replaced by a weighted average of intensity values from
nearby pixels.
The weight factor is calculated by the product of domain filter
component(using the gaussian distribution as a spatial distance) as
well as range filter component(Euclidean distance between center pixel
and the current neighbor pixel). Because this process is nonlinear,
the sample just uses a simple pixel by pixel step.
Texture fetches automatically clamp to edge of image. 1D gaussian array
is mapped to a 1D texture instead of using shared memory, which may
cause severe bank conflict.
Threads are y-pass(column-pass), because the output is coalesced.
Parameters
od - pointer to output data in global memory
d_f - pointer to the 1D gaussian array
e_d - euclidean delta
w - image width
h - image height
r - filter radius
*/
//Euclidean Distance (x, y, d) = exp((|x - y| / d)^2 / 2)
__device__ float euclideanLen(float4 a, float4 b, float d)
{
float mod = (b.x - a.x) * (b.x - a.x) +
(b.y - a.y) * (b.y - a.y) +
(b.z - a.z) * (b.z - a.z);
return __expf(-mod / (2.f * d * d));
}
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(fabs(rgba.x)); // clamp to [0.0, 1.0]
rgba.y = __saturatef(fabs(rgba.y));
rgba.z = __saturatef(fabs(rgba.z));
rgba.w = __saturatef(fabs(rgba.w));
return (uint(rgba.w * 255.0f) << 24) | (uint(rgba.z * 255.0f) << 16) | (uint(rgba.y * 255.0f) << 8) | uint(rgba.x * 255.0f);
}
__device__ float4 rgbaIntToFloat(uint c)
{
float4 rgba;
rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f;
rgba.y = ((c>>8) & 0xff) * 0.003921568627f; // /255.0f;
rgba.z = ((c>>16) & 0xff) * 0.003921568627f; // /255.0f;
rgba.w = ((c>>24) & 0xff) * 0.003921568627f; // /255.0f;
return rgba;
}
//column pass using coalesced global memory reads
__global__ void
d_bilateral_filter(uint *od, int w, int h,
float e_d, int r)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= w || y >= h)
{
return;
}
float sum = 0.0f;
float factor;
float4 t = {0.f, 0.f, 0.f, 0.f};
float4 center = tex2D(rgbaTex, x, y);
for (int i = -r; i <= r; i++)
{
for (int j = -r; j <= r; j++)
{
float4 curPix = tex2D(rgbaTex, x + j, y + i);
factor = cGaussian[i + r] * cGaussian[j + r] * //domain factor
euclideanLen(curPix, center, e_d); //range factor
t += factor * curPix;
sum += factor;
}
}
od[y * w + x] = rgbaFloatToInt(t/sum);
}
class d_bilateral_filter_functor
{
float e_d;
int r;
public:
d_bilateral_filter_functor(float e_d, int r)
{
this->e_d = e_d;
this->r = r;
}
__device__ int operator() ( const thrust::window_2d<uchar4> &input, const thrust::window_2d<uint> &output ) const
{
float sum = 0.0f;
float factor;
float4 t = {0.f, 0.f, 0.f, 0.f};
uchar4 center_int = input[r][r];
float4 center = {(float)center_int.x,(float)center_int.y,(float)center_int.z,(float)center_int.w};
for (int i = 0; i <= 2*r; i++)
{
for (int j = 0; j <= 2*r; j++)
{
uchar4 temp = input[j][i];
float4 curPix = {(float)temp.x,(float)temp.y,(float)temp.z,(float)temp.w,};
factor = cGaussian[i + r] * cGaussian[j + r] * //domain factor
euclideanLen(curPix, center, e_d); //range factor
t += factor * curPix;
sum += factor;
}
}
output[r][r]=rgbaFloatToInt(t/sum);\
return 1;
}
};
extern "C"
void initTexture(int width, int height, uint *hImage)
{
// copy image data to array
checkCudaErrors(cudaMallocPitch(&dImage, &pitch, sizeof(uint)*width, height));
checkCudaErrors(cudaMallocPitch(&dTemp, &pitch, sizeof(uint)*width, height));
checkCudaErrors(cudaMemcpy2D(dImage, pitch, hImage, sizeof(uint)*width,
sizeof(uint)*width, height, cudaMemcpyHostToDevice));
}
extern "C"
void freeTextures()
{
checkCudaErrors(cudaFree(dImage));
checkCudaErrors(cudaFree(dTemp));
}
/*
Because a 2D gaussian mask is symmetry in row and column,
here only generate a 1D mask, and use the product by row
and column index later.
1D gaussian distribution :
g(x, d) -- C * exp(-x^2/d^2), C is a constant amplifier
parameters:
og - output gaussian array in global memory
delta - the 2nd parameter 'd' in the above function
radius - half of the filter size
(total filter size = 2 * radius + 1)
*/
extern "C"
void updateGaussian(float delta, int radius)
{
float fGaussian[64];
for (int i = 0; i < 2*radius + 1; ++i)
{
float x = i-radius;
fGaussian[i] = expf(-(x*x) / (2*delta*delta));
}
checkCudaErrors(cudaMemcpyToSymbol(cGaussian, fGaussian, sizeof(float)*(2*radius+1)));
}
/*
Perform 2D bilateral filter on image using CUDA
Parameters:
d_dest - pointer to destination image in device memory
width - image width
height - image height
e_d - euclidean delta
radius - filter radius
iterations - number of iterations
*/
// RGBA version
extern "C"
double bilateralFilterRGBA(uint *dDest,
int width, int height,
float e_d, int radius, int iterations,
StopWatchInterface *timer)
{
// var for kernel computation timing
double dKernelTime;
// Bind the array to the texture
cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>();
checkCudaErrors(cudaBindTexture2D(0, rgbaTex, dImage, desc, width, height, pitch));
thrust::block_2d<uchar4> d_image_block(width,height);
d_image_block.upload((uchar4*)dImage,cudaMemoryTypeDevice);
thrust::block_2d<uint> d_dest_block(width,height);
thrust::window_vector<uchar4> input_wv(&d_image_block,2*radius+1,2*radius+1,1,1);
thrust::window_vector<uint> output_wv(&d_dest_block,2*radius+1,2*radius+1,1,1);
for (int i=0; i<iterations; i++)
{
// sync host and start kernel computation timer
dKernelTime = 0.0;
checkCudaErrors(cudaDeviceSynchronize());
sdkResetTimer(&timer);
dim3 gridSize((width + 16 - 1) / 16, (height + 16 - 1) / 16);
dim3 blockSize(16, 16);
thrust::transform(thrust::cuda::shared,input_wv.begin(),input_wv.end(),output_wv.begin(),d_bilateral_filter_functor(e_d,radius));
// d_dest_block.download(dDest,cudaMemoryTypeDevice);
// sync host and stop computation timer
checkCudaErrors(cudaDeviceSynchronize());
dKernelTime += sdkGetTimerValue(&timer);
if (iterations > 1)
{
// checkCudaErrors(cudaMemcpy2D(dTemp, pitch, d_dest_block.data_pointer, sizeof(int)*width,
// sizeof(int)*width, height, cudaMemcpyDeviceToDevice));
// checkCudaErrors(cudaBindTexture2D(0, rgbaTex, dTemp, desc, width, height, pitch));
}
}
return ((dKernelTime/1000.)/(double)iterations);
}
|
03c12860280ad5e928b099fd6fcac75e81402cc7.hip | // !!! This is a file automatically generated by hipify!!!
#include <SFML/Graphics.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#include "path_tracing.h"
#include "ray.h"
#include "hittable/sphere.h"
#include "hittable/hittable.h"
#include "hittable/rectangle.h"
#include "hittable/transformations.h"
#include "random.h"
#include "pdf.h"
#include "camera.h"
#include "material.h"
#include <float.h>
#include <iostream>
#include <chrono>
#define NTHREADS 128
#define MAX_DEPTH 15
__global__ void init_common(Hittable** dev_world, Hittable** light_shapes, Camera** dev_camera, int height, int width, hiprandState_t* states)
{
if (threadIdx.x == 0 && blockIdx.x == 0)
{
Material* red = new Lambertian(new ConstantTexture({ 0.65, 0.05, 0.05 }));
Material* white = new Lambertian(new ConstantTexture({ 0.73, 0.73, 0.73 }));
Material* green = new Lambertian(new ConstantTexture({ 0.12, 0.45, 0.15 }));
Material* light = new DiffuseLight(new ConstantTexture({ 15, 15, 15 }));
int n = 8;
Hittable** list = new Hittable*[n];
list[0] = new YZRect(0, 555, 0, 555, 555, -1, green);
list[1] = new YZRect(0, 555, 0, 555, 0, 1, red);
list[2] = new XZRect(213, 343, 227, 332, 554, -1, light);
list[3] = new XZRect(0, 555, 0, 555, 555, -1, white);
list[4] = new XZRect(0, 555, 0, 555, 0, 1, white);
list[5] = new XYRect(0, 555, 0, 555, 555, -1, white);
list[6] = new Translate(new RotateY(new Box({ 0, 0, 0 }, { 165, 165, 165 }, white), -18), { 130,0,65 });
list[7] = new Translate(new RotateY(new Box({ 0, 0, 0 }, { 165, 330, 165 }, white), 15), { 265,0,295 });
*dev_world = new HittableList(list, n);
light_shapes[0] = new XZRect(213, 343, 227, 332, 554, 1, 0);
*dev_camera = new Camera({ 278, 278, -800 }, { 278,278,0 }, { 0, 1, 0 }, 40, float(width) / float(height));
}
}
__global__ void init_curand(hiprandState_t* states, int height, int width)
{
int size = height * width;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
hiprand_init(1234, idx, 0, &states[idx]);
}
__device__ vec3 color(Ray* ray, Hittable** dev_world, MixturePDF* pdf, int depth, HitRecord& rec, hiprandState_t* state)
{
if((*dev_world)->hit(ray, 0.001f, FLT_MAX, &rec))
{
Ray scattered;
vec3 emitted = rec.material->emitted(ray, &rec, rec.u, rec.v, rec.p);
vec3 albedo;
float pdf_val;
if (depth < MAX_DEPTH && rec.material->scatter(ray, &rec, &albedo, &scattered, pdf_val, state))
{
((HittablePDF*)pdf->p_0)->origin = rec.p;
((CosinePDF*)pdf->p_1)->uvw.build_from_w(rec.normal);
scattered.origin = rec.p;
scattered.direction = pdf->generate(state);
pdf_val = pdf->value(scattered.direction);
return emitted + albedo / pdf_val * rec.material->scattering_pdf(ray, &rec, &scattered) * color(&scattered, dev_world, pdf, depth + 1, rec, state);
}
return emitted;
}
return vec3(0.0f, 0.0f, 0.0f);
}
__global__ void path_tracing_kernel(Hittable** dev_world, Hittable** light_shapes, Camera** dev_camera,
vec3* framebuffer, unsigned char* pixels, int height, int width,
hiprandState_t* states, int rays_per_pixel, int total_rays_per_pixel)
{
int size = width * height;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= size) return;
int i = idx % width;
int j = idx / width;
int t = threadIdx.x;
__shared__ vec3 col[NTHREADS];
col[t] = { 0.0f, 0.0f, 0.0f };
__shared__ Ray ray[NTHREADS];
__shared__ hiprandState_t local_state[NTHREADS];
local_state[t] = states[idx];
__shared__ HitRecord rec[NTHREADS];
__shared__ PDF* p_0[NTHREADS];
__shared__ PDF* p_1[NTHREADS];
__shared__ MixturePDF* p[NTHREADS];
p_0[t] = new HittablePDF(light_shapes[0], vec3(0, 0, 0));
p_1[t] = new CosinePDF(vec3(0, 0, 0));
p[t] = new MixturePDF(p_0[t], p_1[t]);
//path tracing iterations
for (int s = 0; s < rays_per_pixel; s++)
{
float u = (float(i) + random_float(&local_state[t])) / float(width);
float v = (float(j) + random_float(&local_state[t])) / float(height);
ray[t] = (*dev_camera)->get_ray(u, v);
col[t] += de_nan(color(&ray[t], dev_world, p[t], 0, rec[t], &local_state[t]));
}
//calc color and put to global buffers
framebuffer[idx] += col[t];
col[t] = framebuffer[idx];
float n_rays = float(total_rays_per_pixel + rays_per_pixel);
int r = int(255.99f * sqrtf(col[t].X / n_rays));
if(r > 255) r = 255;
int g = int(255.99f * sqrtf(col[t].Y / n_rays));
if(g > 255) g = 255;
int b = int(255.99f * sqrtf(col[t].Z / n_rays));
if(b > 255) b = 255;
pixels[4 * ((height - 1 - j) * width + i)] = r;
pixels[4 * ((height - 1 - j) * width + i) + 1] = g;
pixels[4 * ((height - 1 - j) * width + i) + 2] = b;
//copy from shared to global memory
states[idx] = local_state[t];
delete p_0[t];
delete p_1[t];
delete p[t];
}
void path_tracing_with_cuda(std::string filename, int height, int width)
{
hipSetDevice(0);
//framebuffer
int size = width * height;
vec3* framebuffer = 0;
hipMalloc(&framebuffer, size * sizeof(vec3));
unsigned char* pixels = 0;
hipMallocManaged(&pixels, 4 * size * sizeof(unsigned char));
hipMemset(pixels, 255, 4 * size * sizeof(unsigned char));
//camera
Camera** dev_camera = 0;
hipMalloc(&dev_camera, sizeof(Camera**));
//hiprand
hiprandState_t* states = 0;
hipMalloc((void**)&states, size * sizeof(hiprandState_t));
hipLaunchKernelGGL(( init_curand), dim3(size / NTHREADS + 1), dim3(NTHREADS), 0, 0, states, height, width);
//world
Hittable** dev_world = 0;
hipMalloc(&dev_world, sizeof(Hittable**));
Hittable** light_shapes = 0;
hipMalloc(&light_shapes, sizeof(Hittable**));
hipLaunchKernelGGL(( init_common), dim3(1),dim3(1), 0, 0, dev_world, light_shapes, dev_camera, height, width, states);
//SFML
sf::RenderWindow window(sf::VideoMode(width, height), "path tracing");
sf::Texture texture;
texture.create(width, height);
sf::Sprite sprite(texture);
//tracing
int rays_per_pixel = 100;
int total_rays_per_pixel = 0;
while(window.isOpen() && total_rays_per_pixel < 1000)
{
sf::Event event;
while(window.pollEvent(event))
{
if(event.type == sf::Event::Closed)
window.close();
}
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( path_tracing_kernel), dim3(size / NTHREADS + 1), dim3(NTHREADS), 0, 0, dev_world, light_shapes, dev_camera, framebuffer, pixels, height, width, states, rays_per_pixel, total_rays_per_pixel);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto diff = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
std::cout << diff << " ms passed; " << float(width * height * rays_per_pixel) / float(diff * 1000) << " Mrays/s" << std::endl;
total_rays_per_pixel += rays_per_pixel;
texture.update((sf::Uint8*)pixels);
window.clear();
window.draw(sprite);
window.display();
}
sf::Image final_pic;
final_pic.create(width, height, (sf::Uint8*)pixels);
final_pic.saveToFile(filename);
hipFree(framebuffer);
hipFree(dev_world);
hipFree(dev_camera);
hipFree(pixels);
}
| 03c12860280ad5e928b099fd6fcac75e81402cc7.cu | #include <SFML/Graphics.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#include "path_tracing.h"
#include "ray.h"
#include "hittable/sphere.h"
#include "hittable/hittable.h"
#include "hittable/rectangle.h"
#include "hittable/transformations.h"
#include "random.h"
#include "pdf.h"
#include "camera.h"
#include "material.h"
#include <float.h>
#include <iostream>
#include <chrono>
#define NTHREADS 128
#define MAX_DEPTH 15
__global__ void init_common(Hittable** dev_world, Hittable** light_shapes, Camera** dev_camera, int height, int width, curandState_t* states)
{
if (threadIdx.x == 0 && blockIdx.x == 0)
{
Material* red = new Lambertian(new ConstantTexture({ 0.65, 0.05, 0.05 }));
Material* white = new Lambertian(new ConstantTexture({ 0.73, 0.73, 0.73 }));
Material* green = new Lambertian(new ConstantTexture({ 0.12, 0.45, 0.15 }));
Material* light = new DiffuseLight(new ConstantTexture({ 15, 15, 15 }));
int n = 8;
Hittable** list = new Hittable*[n];
list[0] = new YZRect(0, 555, 0, 555, 555, -1, green);
list[1] = new YZRect(0, 555, 0, 555, 0, 1, red);
list[2] = new XZRect(213, 343, 227, 332, 554, -1, light);
list[3] = new XZRect(0, 555, 0, 555, 555, -1, white);
list[4] = new XZRect(0, 555, 0, 555, 0, 1, white);
list[5] = new XYRect(0, 555, 0, 555, 555, -1, white);
list[6] = new Translate(new RotateY(new Box({ 0, 0, 0 }, { 165, 165, 165 }, white), -18), { 130,0,65 });
list[7] = new Translate(new RotateY(new Box({ 0, 0, 0 }, { 165, 330, 165 }, white), 15), { 265,0,295 });
*dev_world = new HittableList(list, n);
light_shapes[0] = new XZRect(213, 343, 227, 332, 554, 1, 0);
*dev_camera = new Camera({ 278, 278, -800 }, { 278,278,0 }, { 0, 1, 0 }, 40, float(width) / float(height));
}
}
__global__ void init_curand(curandState_t* states, int height, int width)
{
int size = height * width;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
curand_init(1234, idx, 0, &states[idx]);
}
__device__ vec3 color(Ray* ray, Hittable** dev_world, MixturePDF* pdf, int depth, HitRecord& rec, curandState_t* state)
{
if((*dev_world)->hit(ray, 0.001f, FLT_MAX, &rec))
{
Ray scattered;
vec3 emitted = rec.material->emitted(ray, &rec, rec.u, rec.v, rec.p);
vec3 albedo;
float pdf_val;
if (depth < MAX_DEPTH && rec.material->scatter(ray, &rec, &albedo, &scattered, pdf_val, state))
{
((HittablePDF*)pdf->p_0)->origin = rec.p;
((CosinePDF*)pdf->p_1)->uvw.build_from_w(rec.normal);
scattered.origin = rec.p;
scattered.direction = pdf->generate(state);
pdf_val = pdf->value(scattered.direction);
return emitted + albedo / pdf_val * rec.material->scattering_pdf(ray, &rec, &scattered) * color(&scattered, dev_world, pdf, depth + 1, rec, state);
}
return emitted;
}
return vec3(0.0f, 0.0f, 0.0f);
}
__global__ void path_tracing_kernel(Hittable** dev_world, Hittable** light_shapes, Camera** dev_camera,
vec3* framebuffer, unsigned char* pixels, int height, int width,
curandState_t* states, int rays_per_pixel, int total_rays_per_pixel)
{
int size = width * height;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= size) return;
int i = idx % width;
int j = idx / width;
int t = threadIdx.x;
__shared__ vec3 col[NTHREADS];
col[t] = { 0.0f, 0.0f, 0.0f };
__shared__ Ray ray[NTHREADS];
__shared__ curandState_t local_state[NTHREADS];
local_state[t] = states[idx];
__shared__ HitRecord rec[NTHREADS];
__shared__ PDF* p_0[NTHREADS];
__shared__ PDF* p_1[NTHREADS];
__shared__ MixturePDF* p[NTHREADS];
p_0[t] = new HittablePDF(light_shapes[0], vec3(0, 0, 0));
p_1[t] = new CosinePDF(vec3(0, 0, 0));
p[t] = new MixturePDF(p_0[t], p_1[t]);
//path tracing iterations
for (int s = 0; s < rays_per_pixel; s++)
{
float u = (float(i) + random_float(&local_state[t])) / float(width);
float v = (float(j) + random_float(&local_state[t])) / float(height);
ray[t] = (*dev_camera)->get_ray(u, v);
col[t] += de_nan(color(&ray[t], dev_world, p[t], 0, rec[t], &local_state[t]));
}
//calc color and put to global buffers
framebuffer[idx] += col[t];
col[t] = framebuffer[idx];
float n_rays = float(total_rays_per_pixel + rays_per_pixel);
int r = int(255.99f * sqrtf(col[t].X / n_rays));
if(r > 255) r = 255;
int g = int(255.99f * sqrtf(col[t].Y / n_rays));
if(g > 255) g = 255;
int b = int(255.99f * sqrtf(col[t].Z / n_rays));
if(b > 255) b = 255;
pixels[4 * ((height - 1 - j) * width + i)] = r;
pixels[4 * ((height - 1 - j) * width + i) + 1] = g;
pixels[4 * ((height - 1 - j) * width + i) + 2] = b;
//copy from shared to global memory
states[idx] = local_state[t];
delete p_0[t];
delete p_1[t];
delete p[t];
}
void path_tracing_with_cuda(std::string filename, int height, int width)
{
cudaSetDevice(0);
//framebuffer
int size = width * height;
vec3* framebuffer = 0;
cudaMalloc(&framebuffer, size * sizeof(vec3));
unsigned char* pixels = 0;
cudaMallocManaged(&pixels, 4 * size * sizeof(unsigned char));
cudaMemset(pixels, 255, 4 * size * sizeof(unsigned char));
//camera
Camera** dev_camera = 0;
cudaMalloc(&dev_camera, sizeof(Camera**));
//curand
curandState_t* states = 0;
cudaMalloc((void**)&states, size * sizeof(curandState_t));
init_curand<<<size / NTHREADS + 1, NTHREADS>>>(states, height, width);
//world
Hittable** dev_world = 0;
cudaMalloc(&dev_world, sizeof(Hittable**));
Hittable** light_shapes = 0;
cudaMalloc(&light_shapes, sizeof(Hittable**));
init_common<<<1,1>>>(dev_world, light_shapes, dev_camera, height, width, states);
//SFML
sf::RenderWindow window(sf::VideoMode(width, height), "path tracing");
sf::Texture texture;
texture.create(width, height);
sf::Sprite sprite(texture);
//tracing
int rays_per_pixel = 100;
int total_rays_per_pixel = 0;
while(window.isOpen() && total_rays_per_pixel < 1000)
{
sf::Event event;
while(window.pollEvent(event))
{
if(event.type == sf::Event::Closed)
window.close();
}
auto start = std::chrono::steady_clock::now();
path_tracing_kernel<<<size / NTHREADS + 1, NTHREADS>>>(dev_world, light_shapes, dev_camera, framebuffer, pixels, height, width, states, rays_per_pixel, total_rays_per_pixel);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto diff = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
std::cout << diff << " ms passed; " << float(width * height * rays_per_pixel) / float(diff * 1000) << " Mrays/s" << std::endl;
total_rays_per_pixel += rays_per_pixel;
texture.update((sf::Uint8*)pixels);
window.clear();
window.draw(sprite);
window.display();
}
sf::Image final_pic;
final_pic.create(width, height, (sf::Uint8*)pixels);
final_pic.saveToFile(filename);
cudaFree(framebuffer);
cudaFree(dev_world);
cudaFree(dev_camera);
cudaFree(pixels);
}
|
18adbf276fbd1970d8a4b0afbc167e2943b515b6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include <ctype.h>
#include "genedist.h"
#include "hip/hip_runtime.h"
char** nameList;
int geneCount = 0;
//Determines if the program will run for all genes or a single one
unsigned char singleGeneCalculation = 0;
//May be unable to use CUDA due to GPU memory unavilability
unsigned char canUseCuda;
//User provided parameter, number of results that will be saved for each gene
int numberOfResults = 10;
//User specified gene, set by command line argument (optional)
char selectedGene[MAX_GENE_NAME_SIZE] = "default";
//User specified plates, set by command line argument (optional)
int* selectedPlates = NULL;
int numSelectedPlates = -1;
//User specified threshold values, set by command line argument(optional)
//Default is -1 which indicates value was not set by user and no threshold
//PPIB will be used (all genes will be included in the calculations)
int highPPIB = -1;
int lowPPIB = -1;
//User specified output file save location
//DEFAULT: "output/"
char outputLocation[MAX_FILE_PATH_SIZE] = "output";
//User specified input file location
char inputLocation[MAX_FILE_PATH_SIZE] = "none";
//Texture memory will be used to store gene information
texture<int2, 1, hipReadModeElementType> geneTex;
//User specified calculation variant
calculationVariant calcVariant = STANDARD;
__global__ void calculateDistanceGPU(double* distance_d, int geneCount, calculationVariant calcVariant) {
__shared__ double s_genes[32 * DISTANCES_PER_GENE];
__shared__ double results[1024];
double dist = 0.0;
int geneIndex = blockIdx.x * blockDim.x + threadIdx.x;
//Fill own gene (these memory accesses are not very much fun but can't be avoided if we use texture memory)
double curr_gene[DISTANCES_PER_GENE];
double avgReplicateOne[6];
if(geneIndex < geneCount) {
for(int i = 0; i < DISTANCES_PER_GENE; i++) {
int2 v = tex1Dfetch(geneTex, geneIndex * DISTANCES_PER_GENE + i);
curr_gene[i] = __hiloint2double(v.y, v.x);
}
if(calcVariant == AVERAGE_FIRST_DISTANCE) {
avgReplicateOne[0] = ((curr_gene[0] + curr_gene[6] + curr_gene[12])/3);
avgReplicateOne[1] = ((curr_gene[1] + curr_gene[7] + curr_gene[13])/3);
avgReplicateOne[2] = ((curr_gene[2] + curr_gene[8] + curr_gene[14])/3);
avgReplicateOne[3] = ((curr_gene[3] + curr_gene[9] + curr_gene[15])/3);
avgReplicateOne[4] = ((curr_gene[4] + curr_gene[10] + curr_gene[16])/3);
avgReplicateOne[5] = ((curr_gene[5] + curr_gene[11] + curr_gene[17])/3);
}
int top = (geneCount % 32 == 0) ? geneCount/32 : geneCount/32 + 1;
for(int i = 0; i < top; i++) {
//Fill the shared input array collaboratively
for(int j = 0; j < DISTANCES_PER_GENE; j++) {
int2 v = tex1Dfetch(geneTex, (i * 32 * DISTANCES_PER_GENE) + (threadIdx.x * DISTANCES_PER_GENE) + j);
s_genes[threadIdx.x * DISTANCES_PER_GENE + j] = __hiloint2double(v.y, v.x);
}
for(int j = 0; j < 32; j++) {
int offset = DISTANCES_PER_GENE * j;
if(calcVariant == STANDARD) {
dist = __dsqrt_rz(pow(s_genes[0 + offset] - curr_gene[0],2) + pow(s_genes[1 + offset] - curr_gene[1],2) +
pow(s_genes[2 + offset] - curr_gene[2],2) + pow(s_genes[3 + offset] - curr_gene[3],2) +
pow(s_genes[4 + offset] - curr_gene[4],2) + pow(s_genes[5 + offset] - curr_gene[5],2)) +
__dsqrt_rz(pow(s_genes[6 + offset] - curr_gene[6],2) + pow(s_genes[7 + offset] - curr_gene[7],2) +
pow(s_genes[8 + offset] - curr_gene[8],2) + pow(s_genes[9 + offset] - curr_gene[9],2) +
pow(s_genes[10 + offset] - curr_gene[10],2) + pow(s_genes[11 + offset] - curr_gene[11],2))+
__dsqrt_rz(pow(s_genes[12 + offset] - curr_gene[12],2) + pow(s_genes[13 + offset] - curr_gene[13],2) +
pow(s_genes[14 + offset] - curr_gene[14],2) + pow(s_genes[15 + offset] - curr_gene[15],2) +
pow(s_genes[16 + offset] - curr_gene[16],2) + pow(s_genes[17 + offset] - curr_gene[17],2));
}
else if (calcVariant == AVERAGE_FIRST_DISTANCE) {
dist = __dsqrt_rz(pow(s_genes[0 + offset] - avgReplicateOne[0],2) + pow(s_genes[1 + offset] - avgReplicateOne[1],2) +
pow(s_genes[2 + offset] - avgReplicateOne[2],2) + pow(s_genes[3 + offset] - avgReplicateOne[3],2) +
pow(s_genes[4 + offset] - avgReplicateOne[4],2) + pow(s_genes[5 + offset] - avgReplicateOne[5],2)) +
__dsqrt_rz(pow(s_genes[6 + offset] - avgReplicateOne[0],2) + pow(s_genes[7 + offset] - avgReplicateOne[1],2) +
pow(s_genes[8 + offset] - avgReplicateOne[2],2) + pow(s_genes[9 + offset] - avgReplicateOne[3],2) +
pow(s_genes[10 + offset] - avgReplicateOne[4],2) + pow(s_genes[11 + offset] - avgReplicateOne[5],2))+
__dsqrt_rz(pow(s_genes[12 + offset] - avgReplicateOne[0],2) + pow(s_genes[13 + offset] - curr_gene[1],2) +
pow(s_genes[14 + offset] - avgReplicateOne[2],2) + pow(s_genes[15 + offset] - avgReplicateOne[3],2) +
pow(s_genes[16 + offset] - avgReplicateOne[4],2) + pow(s_genes[17 + offset] - avgReplicateOne[5],2));
}
results[(threadIdx.x * 32) + j] = dist;
}
for(int j = 0; j < 32; j++) {
int sharedoffset = j* 32;
int globaloffset = (blockIdx.x * 32 * geneCount) + (j * geneCount) + (32 * i);
if(threadIdx.x + globaloffset < geneCount * geneCount) {
distance_d[threadIdx.x + globaloffset ] = results[threadIdx.x + sharedoffset];
}
}
}
}
}
void parseArguments(int argc, char** argv) {
if(argc > 1) {
//Will need to loop through all command line arguments
for(int i = 1; i < argc; i+=2) {
if (strcmp(argv[i], "-i") == 0 || strcmp(argv[i], "--input") == 0) {
strcpy(inputLocation, argv[i+1]);
} else if (strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0) {
usage();
exit(0);
} else if (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "--calc") == 0) {
//Convert string to upper case
char str[MAX_FILE_PATH_SIZE] = "temp";
strcpy(str, argv[i+1]);
int i = 0;
while(i < strlen(str)) {
str[i] = (char)toupper(str[i]);
i++;
}
if(strcmp(str, "STANDARD") == 0) {
calcVariant = STANDARD;
} else if (strcmp(str, "AVERAGE_FIRST_DISTANCE") == 0) {
calcVariant = AVERAGE_FIRST_DISTANCE;
} else {
printf("Warning: No such calculation variant as %s. Please see usage below for a list of acceptable variants.\n", str);
usage();
exit(1);
}
} else if (strcmp(argv[i], "-r") == 0 || strcmp(argv[i], "--results") == 0) {
numberOfResults = atoi(argv[i+1]);
//atoi() returns 0 when no valid conversion takes place, need to make sure numberOfResults is at least 1
if(!(numberOfResults > 0)) {
printf("Error: number of results must be at least 1.\n");
exit(1);
}
} else if (strcmp(argv[i], "-g") == 0 || strcmp(argv[i], "--gene") == 0) {
strcpy(selectedGene,argv[i+1]);
singleGeneCalculation = 1;
} else if (strcmp(argv[i], "-p") == 0 || strcmp(argv[i], "--plates") == 0) {
//Format will be numbers separated by commas (i.e. 123,233,11,22)
//First malloc some space in the selectedPlates array
//Because size is not know at compile time, make array as big as number of characters in the string
//This is obviously overkill but will always have enough room for any arbitrary number of plates
selectedPlates = (int*) malloc(strlen(argv[i+1]) * sizeof(int));
//If there is only one plate selected, don't need to tokenize the string
if(strlen(argv[i+1]) == 1) {
numSelectedPlates = 1;
selectedPlates[0] = atoi(argv[i+1]);
} else {
char* tok = strtok(argv[i+1], ",");
numSelectedPlates = 0;
while(tok != NULL) {
selectedPlates[numSelectedPlates] = atoi(tok);
tok = strtok(NULL, ",");
numSelectedPlates++;
}
}
} else if (strcmp(argv[i], "-t") == 0 || strcmp(argv[i], "--highPPIB") == 0) {
highPPIB = atoi(argv[i+1]);
//atoi() returns 0 when on valid conversion takes place, need to make sure threshold PPIB is at least 1
if(!(highPPIB > 0)) {
printf("Error: High threshold PPIB must be at least 1.\n");
exit(1);
}
} else if (strcmp(argv[i], "-l") == 0 || strcmp(argv[i], "--lowPPIB") == 0) {
lowPPIB = atoi(argv[i+1]);
//atoi() returns 0 when on valid conversion takes place, need to make sure threshold PPIB is at least 1
if(!(lowPPIB > 0)) {
printf("Error: Low threshold PPIB must be at least 1.\n");
exit(1);
}
} else if (strcmp(argv[i], "-o") == 0 || strcmp(argv[i], "--output") == 0 ){
strcpy(outputLocation, argv[i+1]);
/*********************************************************
*
* Not sure if this is necessary, simply adds a trailing '/'
* Maybe different for windows vs. posix machines as well
*
**********************************************************/
//if(outputLocation[strlen(outputLocation) - 1] != '/') {
// outputLocation[strlen(outputLocation)] = '/';
//}
} else {
printf("Warning: %s is an invalid command line argument\n. See below for usage.\n\n", argv[i]);
usage();
exit(1);
}
}
} else {
printf("No command line arguments specified. Please see usage below.\n\n");
usage();
exit(1);
}
}
int main(int argc, char** argv) {
double* geneList_h, *geneList_d;
double* distance_d, *distance_h;
//Only holds data if calculating all results and GPU is unavailable
DistanceTuple** distanceMatrix = NULL;
distance_h = NULL;
parseArguments(argc, argv);
if(strcmp(inputLocation, "none") == 0) {
printf("Warning: must provide an input file.\n\n");
usage();
exit(1);
}
FILE* inputFile;
if(!(inputFile = fopen(inputLocation, "r"))) {
//File does not exist
printf("Error: unable to open input file %s\n", inputLocation);
exit(1);
}
//Allocate memory for the gene list on the host
geneList_h = (double *) malloc(MAX_GENES * sizeof(double) * DISTANCES_PER_GENE);
//Allocate memory for the name list on the host
nameList = (char **) malloc(MAX_GENES * sizeof(char*));
for(int i = 0; i < MAX_GENES; i++) {
nameList[i] = (char *) malloc(MAX_GENE_NAME_SIZE * sizeof(char));
}
//File exists so continue
createGeneListFromFile(inputFile, geneList_h);
//Close input file
fclose(inputFile);
printf("Read file successfully.\n");
if(geneCount == 1) {
printf("Only one gene found that meets specified criteria. Please provide new criteria to expand number of eligible genes.\n");
exit(0);
}
if(geneCount < 11) {
numberOfResults = geneCount - 2;
}
if(numberOfResults > geneCount - 1) {
printf("Error: number of results requested exceeds maximum allowable number.\n");
printf("Number of genes: %d, maximum number of results per gene: %d\n", geneCount, geneCount-1);
printf("Number of results will be set to maximum. All results will be saved.\n");
numberOfResults = geneCount - 1;
}
int geneListSize = geneCount * DISTANCES_PER_GENE * sizeof(double);
printf("Number of genes: %d\n", geneCount);
if(!singleGeneCalculation) {
//Get memory specifications of the GPU
getCardSpecs();
}
if(geneCount < CUDA_CUTOFF) {
canUseCuda = 0;
}
//Launch the CUDA portion of the code if calculating distances for all genes
printf("CUDA status (0 - not using, 1 - using): %d\n", canUseCuda);
if(!canUseCuda) {
printf("Program will continue in serial mode...\n");
}
if(!singleGeneCalculation && canUseCuda) {
hipError_t error;
//There will be n^2 results from n genes
long long resultsSize = geneCount * geneCount * sizeof(double);
dim3 blockSize = 32;
dim3 gridSize = 0;
gridSize = (geneCount % 32 == 0) ? geneCount/32 : geneCount/32 + 1;
error = hipMalloc((void**) &distance_d, resultsSize);
checkCudaError(error, "Malloc for device side distance array");
//Allocate space on the host for the distance results
distance_h = (double*) malloc(resultsSize);
//Allocate memory on the device for the genelist
error = hipMalloc((void**) &geneList_d, geneListSize);
checkCudaError(error, "Malloc for device side gene list array");
//Copy the gene list to the device
error = hipMemcpy(geneList_d, geneList_h, geneListSize, hipMemcpyHostToDevice);
checkCudaError(error, "Copy genelist from host to device");
//Bind geneList to texture memory
error = hipBindTexture(NULL, geneTex, geneList_d, geneListSize);
checkCudaError(error, "Bind genelist to texture memory");
hipLaunchKernelGGL(( calculateDistanceGPU), dim3(gridSize),dim3(blockSize), 0, 0, distance_d, geneCount, calcVariant);
error = hipGetLastError();
//If there is an error in the kernel, exit the program because results will be invalid
if(error) {
printf("Progrm experienced error in kernel call and will now exit.\n");
exit(1);
}
//Get results back from the kernel
error = hipMemcpy(distance_h, distance_d, resultsSize, hipMemcpyDeviceToHost);
checkCudaError(error, "Copy results from Device to Host");
error = hipUnbindTexture(geneTex);
checkCudaError(error, "Unbind Texture");
error = hipFree(geneList_d);
checkCudaError(error, "Free genelist");
error = hipFree(distance_d);
checkCudaError(error, "Free distance_d");
error = hipDeviceReset();
checkCudaError(error, "Device Reset");
} else if(!singleGeneCalculation && !canUseCuda) {
//Allocate memory for the distance matrix
distanceMatrix = (DistanceTuple**) malloc(geneCount * sizeof(DistanceTuple*));
for(int i = geneCount - 1; i >= 0; i--) {
distanceMatrix[geneCount - i - 1] = (DistanceTuple*) malloc(i * sizeof(DistanceTuple));
}
calculateDistanceCPU(geneList_h, distanceMatrix);
} else if (singleGeneCalculation){
distance_h = (double *) malloc(geneCount * sizeof(double));
calculateSingleDistance(selectedGene, geneList_h, distance_h);
}
sortAndPrint(geneList_h, distance_h, distanceMatrix);
/**************
| CLEANUP
**************/
free(geneList_h);
if((!singleGeneCalculation && canUseCuda) || singleGeneCalculation) {
free(distance_h);
}
if(!singleGeneCalculation && !canUseCuda) {
for(int i = 0; i < geneCount - 1; i++) {
free(distanceMatrix[i]);
}
free(distanceMatrix);
}
printf("Done! Results have been stored in the \"%s\" folder.\n", outputLocation);
return 0;
}
bool checkCudaError(hipError_t error, char* operation) {
if(strcmp(hipGetErrorString(error), "no error") != 0) {
printf("Warning: the following CUDA error occurred: %s\n", hipGetErrorString(error));
printf("This error occurred during the following operation: %s\n\n", operation);
return 1;
}
return 0;
}
void getCardSpecs() {
int devCount;
hipGetDeviceCount(&devCount);
size_t freemem, totmem;
hipMemGetInfo(&freemem, &totmem);
printf("Total mem: %d\n", totmem);
printf("Free mem: %d\n", freemem);
hipDeviceProp_t props;
hipGetDeviceProperties(&props, 0);
long long geneListSize = DISTANCES_PER_GENE * REPLICATES_PER_GENE * geneCount * sizeof(double);
long long resultsSize = geneCount * geneCount * sizeof(double);
if(props.totalGlobalMem == 0) {
printf("Warning: No CUDA card detected.\n");
canUseCuda = 0;
return;
} else if(props.totalGlobalMem < geneListSize + resultsSize) {
printf("Warning: CUDA card has insufficient total memory to run for this input size.\n");
printf("Global memory (MB): %d\n", props.totalGlobalMem/(1024 * 1024));
canUseCuda = 0;
return;
} else if(props.major < 2) {
printf("Warning: CUDA card compute capability is too low. Expected compute capability is 2.0 or greater. This card has a compute capability of %d.%d\n", props.major, props.minor);
canUseCuda = 0;
return;
}
canUseCuda = 1;
}
void sortAndPrint(double* geneList, double* distance, DistanceTuple** distanceMatrix) {
//Need to reconstruct a templist from the distance matrix for each gene
DistanceTuple* tempDistanceList = (DistanceTuple*) malloc(geneCount * sizeof(DistanceTuple));
int top = (!singleGeneCalculation) ? geneCount : 1;
for(int i = 0; i < top; i++) {
if((!singleGeneCalculation && canUseCuda) || singleGeneCalculation) {
for(int j = 0; j < geneCount; j++) {
tempDistanceList[j].distance = distance[(i*geneCount) + j];
tempDistanceList[j].geneOne = j;
}
} else if (!singleGeneCalculation && !canUseCuda) {
int row, col, distanceIndex;
distanceIndex = 0;
row = i - 1;
col = 0;
//Load entries from the diagonal
while(row >= 0 && col <= i - 1) {
tempDistanceList[distanceIndex] = distanceMatrix[row][col];
distanceIndex++;
row--;
col++;
}
//Load remaining entries from the gene's column
for(int j = 0; j < geneCount - i - 1; j++) {
tempDistanceList[distanceIndex] = distanceMatrix[i][j];
tempDistanceList[distanceIndex].geneOne = tempDistanceList[distanceIndex].geneTwo;
distanceIndex++;
}
}
int listSize = ( (!singleGeneCalculation && canUseCuda)|| singleGeneCalculation) ? geneCount : geneCount - 1;
qsort (tempDistanceList, listSize, sizeof(DistanceTuple), compareDistanceTuple);
char fname[MAX_FILE_PATH_SIZE + MAX_GENE_NAME_SIZE];
strcpy(fname, outputLocation);
if(singleGeneCalculation) {
strcat(fname, selectedGene);
} else {
strcat(fname, nameList[i]);
}
strcat(fname, ".txt");
//Write results to file
FILE * outfile = fopen(fname, "w");
if(!outfile) {
printf("Warning: Output folder does not exist. Please create the output folder before running the program.\n");
exit(1);
}
int startIndex = ((!singleGeneCalculation && canUseCuda) || singleGeneCalculation) ? 1 : 0;
for(int j = startIndex; j < numberOfResults + startIndex; j++) {
if(strcmp(nameList[tempDistanceList[j].geneOne], nameList[i]) != 0) {
fprintf(outfile, "%d: %s %.15f\n", startIndex ? j : j+1, nameList[tempDistanceList[j].geneOne], tempDistanceList[j].distance);
}
}
fclose(outfile);
}
}
void calculateSingleDistance(char* gene, double* geneList, double* distanceList) {
bool foundGene = false;
int currGeneIndex;
for(int i = 0; i < geneCount; i++) {
if(strcmp(nameList[i],gene) == 0) {
foundGene = true;
currGeneIndex = i;
printf("Found gene %s, now calculating distances.\n", gene);
break;
}
}
if(!foundGene) {
printf("Error: unable to find specified gene: %s. \n", gene);
exit(1);
}
double dist = 0.0;
int currOffset = currGeneIndex * DISTANCES_PER_GENE;
double avgReplicateOne[6];
if(calcVariant == AVERAGE_FIRST_DISTANCE) {
avgReplicateOne[0] = ((geneList[0 + currOffset] + geneList[6 + currOffset] + geneList[12 + currOffset])/3);
avgReplicateOne[1] = ((geneList[1 + currOffset] + geneList[7 + currOffset] + geneList[13 + currOffset])/3);
avgReplicateOne[2] = ((geneList[2 + currOffset] + geneList[8 + currOffset] + geneList[14 + currOffset])/3);
avgReplicateOne[3] = ((geneList[3 + currOffset] + geneList[9 + currOffset] + geneList[15 + currOffset])/3);
avgReplicateOne[4] = ((geneList[4 + currOffset] + geneList[10 + currOffset] + geneList[16 + currOffset])/3);
avgReplicateOne[5] = ((geneList[5 + currOffset] + geneList[11 + currOffset] + geneList[17 + currOffset])/3);
}
for(int i = 0; i < geneCount; i++) {
int tmpOffset = i * DISTANCES_PER_GENE;
if(calcVariant == STANDARD) {
dist = sqrt(pow(geneList[0 + tmpOffset] - geneList[0 + currOffset],2) + pow(geneList[1 + tmpOffset] - geneList[1 + currOffset],2) +
pow(geneList[2 + tmpOffset] - geneList[2 + currOffset],2) + pow(geneList[3 + tmpOffset] - geneList[3 + currOffset],2) +
pow(geneList[4 + tmpOffset] - geneList[4 + currOffset],2) + pow(geneList[5 + tmpOffset] - geneList[5 + currOffset],2))
+ sqrt(pow(geneList[6 + tmpOffset] - geneList[6 + currOffset],2) + pow(geneList[7 + tmpOffset] - geneList[7 + currOffset],2) +
pow(geneList[8 + tmpOffset] - geneList[8 + currOffset],2) + pow(geneList[9 + tmpOffset] - geneList[9 + currOffset],2) +
pow(geneList[10 + tmpOffset] - geneList[10 + currOffset],2) + pow(geneList[11 + tmpOffset] - geneList[11 + currOffset],2))
+ sqrt(pow(geneList[12 + tmpOffset] - geneList[12 + currOffset],2) + pow(geneList[13 + tmpOffset] - geneList[13 + currOffset],2) +
pow(geneList[14 + tmpOffset] - geneList[14 + currOffset],2) + pow(geneList[15 + tmpOffset] - geneList[15 + currOffset],2) +
pow(geneList[16 + tmpOffset] - geneList[16 + currOffset],2) + pow(geneList[17 + tmpOffset] - geneList[17 + currOffset],2));
} else if(calcVariant == AVERAGE_FIRST_DISTANCE) {
dist = sqrt(pow(geneList[0 + tmpOffset] - avgReplicateOne[0],2) + pow(geneList[1 + tmpOffset] - avgReplicateOne[1],2) +
pow(geneList[2 + tmpOffset] - avgReplicateOne[2],2) + pow(geneList[3 + tmpOffset] - avgReplicateOne[3],2) +
pow(geneList[4 + tmpOffset] - avgReplicateOne[4],2) + pow(geneList[5 + tmpOffset] - avgReplicateOne[5],2))
+ sqrt(pow(geneList[6 + tmpOffset] - avgReplicateOne[0],2) + pow(geneList[7 + tmpOffset] - avgReplicateOne[1],2) +
pow(geneList[8 + tmpOffset] - avgReplicateOne[2],2) + pow(geneList[9 + tmpOffset] - avgReplicateOne[3],2) +
pow(geneList[10 + tmpOffset] - avgReplicateOne[4],2) + pow(geneList[11 + tmpOffset] - avgReplicateOne[5],2))
+ sqrt(pow(geneList[12 + tmpOffset] - avgReplicateOne[0],2) + pow(geneList[13 + tmpOffset] - avgReplicateOne[1],2) +
pow(geneList[14 + tmpOffset] - avgReplicateOne[2],2) + pow(geneList[15 + tmpOffset] - avgReplicateOne[3],2) +
pow(geneList[16 + tmpOffset] - avgReplicateOne[4],2) + pow(geneList[17 + tmpOffset] - avgReplicateOne[5],2));
}
distanceList[i] = dist;
}
}
void trim(char *str) {
char *p1 = str, *p2 = str;
do
while (*p2 == ' ' || *p2 == '/')
p2++;
while (*p1++ = *p2++);
}
void calculateDistanceCPU(double* geneList, DistanceTuple** distanceMatrix) {
double dist = 0.0;
int distColIndex, currOffset, tmpOffset, currGeneIndex;
for(int i = 0; i < geneCount; i++) {
currGeneIndex = i;
currOffset = currGeneIndex * DISTANCES_PER_GENE;
double avgReplicateOne[6];
if(calcVariant == AVERAGE_FIRST_DISTANCE) {
avgReplicateOne[0] = ((geneList[0 + currOffset] + geneList[6 + currOffset] + geneList[12 + currOffset])/3);
avgReplicateOne[1] = ((geneList[1 + currOffset] + geneList[7 + currOffset] + geneList[13 + currOffset])/3);
avgReplicateOne[2] = ((geneList[2 + currOffset] + geneList[8 + currOffset] + geneList[14 + currOffset])/3);
avgReplicateOne[3] = ((geneList[3 + currOffset] + geneList[9 + currOffset] + geneList[15 + currOffset])/3);
avgReplicateOne[4] = ((geneList[4 + currOffset] + geneList[10 + currOffset] + geneList[16 + currOffset])/3);
avgReplicateOne[5] = ((geneList[5 + currOffset] + geneList[11 + currOffset] + geneList[17 + currOffset])/3);
}
distColIndex = 0;
for(int j = i+1; j < geneCount; j++) {
tmpOffset = j * DISTANCES_PER_GENE;
if(calcVariant == STANDARD) {
dist = sqrt(pow(geneList[0 + tmpOffset] - geneList[0 + currOffset],2) + pow(geneList[1 + tmpOffset] - geneList[1 + currOffset],2) +
pow(geneList[2 + tmpOffset] - geneList[2 + currOffset],2) + pow(geneList[3 + tmpOffset] - geneList[3 + currOffset],2) +
pow(geneList[4 + tmpOffset] - geneList[4 + currOffset],2) + pow(geneList[5 + tmpOffset] - geneList[5 + currOffset],2))
+ sqrt(pow(geneList[6 + tmpOffset] - geneList[6 + currOffset],2) + pow(geneList[7 + tmpOffset] - geneList[7 + currOffset],2) +
pow(geneList[8 + tmpOffset] - geneList[8 + currOffset],2) + pow(geneList[9 + tmpOffset] - geneList[9 + currOffset],2) +
pow(geneList[10 + tmpOffset] - geneList[10 + currOffset],2) + pow(geneList[11 + tmpOffset] - geneList[11 + currOffset],2))
+ sqrt(pow(geneList[12 + tmpOffset] - geneList[12 + currOffset],2) + pow(geneList[13 + tmpOffset] - geneList[13 + currOffset],2) +
pow(geneList[14 + tmpOffset] - geneList[14 + currOffset],2) + pow(geneList[15 + tmpOffset] - geneList[15 + currOffset],2) +
pow(geneList[16 + tmpOffset] - geneList[16 + currOffset],2) + pow(geneList[17 + tmpOffset] - geneList[17 + currOffset],2));
} else if (calcVariant == AVERAGE_FIRST_DISTANCE) {
dist = sqrt(pow(geneList[0 + tmpOffset] - avgReplicateOne[0],2) + pow(geneList[1 + tmpOffset] - avgReplicateOne[1],2) +
pow(geneList[2 + tmpOffset] - avgReplicateOne[2],2) + pow(geneList[3 + tmpOffset] - avgReplicateOne[3],2) +
pow(geneList[4 + tmpOffset] - avgReplicateOne[4],2) + pow(geneList[5 + tmpOffset] - avgReplicateOne[5],2))
+ sqrt(pow(geneList[6 + tmpOffset] - avgReplicateOne[0],2) + pow(geneList[7 + tmpOffset] - avgReplicateOne[1],2) +
pow(geneList[8 + tmpOffset] - avgReplicateOne[2],2) + pow(geneList[9 + tmpOffset] - avgReplicateOne[3],2) +
pow(geneList[10 + tmpOffset] - avgReplicateOne[4],2) + pow(geneList[11 + tmpOffset] - avgReplicateOne[5],2))
+ sqrt(pow(geneList[12 + tmpOffset] - avgReplicateOne[0],2) + pow(geneList[13 + tmpOffset] - avgReplicateOne[1],2) +
pow(geneList[14 + tmpOffset] - avgReplicateOne[2],2) + pow(geneList[15 + tmpOffset] - avgReplicateOne[3],2) +
pow(geneList[16 + tmpOffset] - avgReplicateOne[4],2) + pow(geneList[17 + tmpOffset] - avgReplicateOne[5],2));
}
distanceMatrix[i][distColIndex].geneOne = i;
distanceMatrix[i][distColIndex].geneTwo = j;
distanceMatrix[i][distColIndex].distance = dist;
distColIndex++;
}
}
}
int compareDistanceTuple (const void * a, const void * b) {
DistanceTuple *ia = (DistanceTuple*)a;
DistanceTuple *ib = (DistanceTuple*)b;
double diff = ia->distance - ib->distance;
if(diff < 0) {
return -1;
} else if (diff == 0.0f) {
return 0;
} else {
return 1;
}
}
void usage(void) {
printf("User MUST provide an input file path\n\n");
printf("\t-c, --calc: specify the calculation variant to be run. Valid options are as follows:\n");
printf("\t\tSTANDARD - dist(r1,s1) + dist(r2,s2) + dist(r3,s3)\n");
printf("\t\tAVERAGE_FIRST_DISTANCE - dist(avg(r1,r2,r3), s1) + dist(avg(r1,r2,r3), s2) + dist(avg(r1,r2,r3), s3)\n\n");
printf("\t-g, --gene: specify a specific gene to generate results for\n\n");
printf("\t-h, --help: displays this usage message\n\n");
printf("\t-i, --input: specify the input file path\n\n");
printf("\t-l, --lowPPIB: any genes with an average PPIB value lower than the number specified here will not be included in the calculations\n\n");
printf("\t-o, --output: specify which folder the output results will be saved in - must already exist\n\n");
printf("\t-p, --plates: specify which specific plates will be included in the calculations. Format is: plate,plate,plate e.g. '-p 1,5,6' will calculate for only plates 1, 5, and 6.\n\n");
printf("\t-r, --results: specify the number of results to save for each gene\n\n");
printf("\t-t, --highPPIB: any genes with an average PPIB value higher than the number specified here will not be included in the calculations\n\n");
}
unsigned char isSelectedPlate(int plateNumber) {
for(int i = 0; i < numSelectedPlates; i++) {
if(selectedPlates[i] == plateNumber) {
return 1;
}
}
return 0;
}
void createGeneListFromFile(FILE* file, double* geneList) {
char line[512];
int ppib;
//Eat the header line
fgets( line, sizeof(line), file);
while(fgets( line, sizeof(line), file ) != NULL)
{
char* tok;
int len = strlen(line);
//Reset PPIB value for each gene
ppib = 0;
if(len > 0 && line[len-1]=='\n'){
line[len-1] = '\0';
}
if(len > 2 && line[len-2]=='\r') {
line[len-2] = '\0';
}
tok = strtok(line,",");
//Check to ensure tok is not null and that it is not a control
if(tok && strcmp(tok, "FALSE") == 0) {
//Get the plate number (next token)
tok = strtok(NULL,",");
//check to ensure the plate number is wanted for this calculation
if(numSelectedPlates != -1 && !isSelectedPlate(atoi(tok))) {
goto out;
}
//Get the Gene Name (next token)
tok = strtok(NULL,",");
//Store the name of the gene in the nameList
strcpy(nameList[geneCount], tok);
//Eliminate unwanted characters (spaces, slashes)
trim(nameList[geneCount]);
//Populate distances into geneList
for(int i = 0; i < REPLICATES_PER_GENE; i++) {
for(int j = 0; j < PROBES_PER_REPLICATE; j++) {
tok = strtok(NULL,",");
if(strcmp(tok,"=") == 0 || strcmp(tok,"#N/A") == 0) {
//Break out of nested loops, if one replicate of a gene is invalid, the entire gene is invalid
//There is no need to parse any more of this line
goto out;
} else {
geneList[(geneCount * DISTANCES_PER_GENE) + (PROBES_PER_REPLICATE * i) + j] = atof(tok);
}
}
//Read the PPIB value, add to ppib total
ppib += atoi(strtok(NULL, ","));
}
//Calculate the average of the PPIBs for each replicate
ppib /= 3;
//Remove a gene that has a sub-threshold PPIB (first check to see that a threshold PPIB has been set)
if((lowPPIB != -1 && ppib < lowPPIB) || (highPPIB != -1 && ppib > highPPIB)) {
goto out;
}
geneCount++;
out:
continue;
}
}
}
| 18adbf276fbd1970d8a4b0afbc167e2943b515b6.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "cuda.h"
#include <ctype.h>
#include "genedist.h"
#include "cuda_runtime.h"
char** nameList;
int geneCount = 0;
//Determines if the program will run for all genes or a single one
unsigned char singleGeneCalculation = 0;
//May be unable to use CUDA due to GPU memory unavilability
unsigned char canUseCuda;
//User provided parameter, number of results that will be saved for each gene
int numberOfResults = 10;
//User specified gene, set by command line argument (optional)
char selectedGene[MAX_GENE_NAME_SIZE] = "default";
//User specified plates, set by command line argument (optional)
int* selectedPlates = NULL;
int numSelectedPlates = -1;
//User specified threshold values, set by command line argument(optional)
//Default is -1 which indicates value was not set by user and no threshold
//PPIB will be used (all genes will be included in the calculations)
int highPPIB = -1;
int lowPPIB = -1;
//User specified output file save location
//DEFAULT: "output/"
char outputLocation[MAX_FILE_PATH_SIZE] = "output";
//User specified input file location
char inputLocation[MAX_FILE_PATH_SIZE] = "none";
//Texture memory will be used to store gene information
texture<int2, 1, cudaReadModeElementType> geneTex;
//User specified calculation variant
calculationVariant calcVariant = STANDARD;
__global__ void calculateDistanceGPU(double* distance_d, int geneCount, calculationVariant calcVariant) {
__shared__ double s_genes[32 * DISTANCES_PER_GENE];
__shared__ double results[1024];
double dist = 0.0;
int geneIndex = blockIdx.x * blockDim.x + threadIdx.x;
//Fill own gene (these memory accesses are not very much fun but can't be avoided if we use texture memory)
double curr_gene[DISTANCES_PER_GENE];
double avgReplicateOne[6];
if(geneIndex < geneCount) {
for(int i = 0; i < DISTANCES_PER_GENE; i++) {
int2 v = tex1Dfetch(geneTex, geneIndex * DISTANCES_PER_GENE + i);
curr_gene[i] = __hiloint2double(v.y, v.x);
}
if(calcVariant == AVERAGE_FIRST_DISTANCE) {
avgReplicateOne[0] = ((curr_gene[0] + curr_gene[6] + curr_gene[12])/3);
avgReplicateOne[1] = ((curr_gene[1] + curr_gene[7] + curr_gene[13])/3);
avgReplicateOne[2] = ((curr_gene[2] + curr_gene[8] + curr_gene[14])/3);
avgReplicateOne[3] = ((curr_gene[3] + curr_gene[9] + curr_gene[15])/3);
avgReplicateOne[4] = ((curr_gene[4] + curr_gene[10] + curr_gene[16])/3);
avgReplicateOne[5] = ((curr_gene[5] + curr_gene[11] + curr_gene[17])/3);
}
int top = (geneCount % 32 == 0) ? geneCount/32 : geneCount/32 + 1;
for(int i = 0; i < top; i++) {
//Fill the shared input array collaboratively
for(int j = 0; j < DISTANCES_PER_GENE; j++) {
int2 v = tex1Dfetch(geneTex, (i * 32 * DISTANCES_PER_GENE) + (threadIdx.x * DISTANCES_PER_GENE) + j);
s_genes[threadIdx.x * DISTANCES_PER_GENE + j] = __hiloint2double(v.y, v.x);
}
for(int j = 0; j < 32; j++) {
int offset = DISTANCES_PER_GENE * j;
if(calcVariant == STANDARD) {
dist = __dsqrt_rz(pow(s_genes[0 + offset] - curr_gene[0],2) + pow(s_genes[1 + offset] - curr_gene[1],2) +
pow(s_genes[2 + offset] - curr_gene[2],2) + pow(s_genes[3 + offset] - curr_gene[3],2) +
pow(s_genes[4 + offset] - curr_gene[4],2) + pow(s_genes[5 + offset] - curr_gene[5],2)) +
__dsqrt_rz(pow(s_genes[6 + offset] - curr_gene[6],2) + pow(s_genes[7 + offset] - curr_gene[7],2) +
pow(s_genes[8 + offset] - curr_gene[8],2) + pow(s_genes[9 + offset] - curr_gene[9],2) +
pow(s_genes[10 + offset] - curr_gene[10],2) + pow(s_genes[11 + offset] - curr_gene[11],2))+
__dsqrt_rz(pow(s_genes[12 + offset] - curr_gene[12],2) + pow(s_genes[13 + offset] - curr_gene[13],2) +
pow(s_genes[14 + offset] - curr_gene[14],2) + pow(s_genes[15 + offset] - curr_gene[15],2) +
pow(s_genes[16 + offset] - curr_gene[16],2) + pow(s_genes[17 + offset] - curr_gene[17],2));
}
else if (calcVariant == AVERAGE_FIRST_DISTANCE) {
dist = __dsqrt_rz(pow(s_genes[0 + offset] - avgReplicateOne[0],2) + pow(s_genes[1 + offset] - avgReplicateOne[1],2) +
pow(s_genes[2 + offset] - avgReplicateOne[2],2) + pow(s_genes[3 + offset] - avgReplicateOne[3],2) +
pow(s_genes[4 + offset] - avgReplicateOne[4],2) + pow(s_genes[5 + offset] - avgReplicateOne[5],2)) +
__dsqrt_rz(pow(s_genes[6 + offset] - avgReplicateOne[0],2) + pow(s_genes[7 + offset] - avgReplicateOne[1],2) +
pow(s_genes[8 + offset] - avgReplicateOne[2],2) + pow(s_genes[9 + offset] - avgReplicateOne[3],2) +
pow(s_genes[10 + offset] - avgReplicateOne[4],2) + pow(s_genes[11 + offset] - avgReplicateOne[5],2))+
__dsqrt_rz(pow(s_genes[12 + offset] - avgReplicateOne[0],2) + pow(s_genes[13 + offset] - curr_gene[1],2) +
pow(s_genes[14 + offset] - avgReplicateOne[2],2) + pow(s_genes[15 + offset] - avgReplicateOne[3],2) +
pow(s_genes[16 + offset] - avgReplicateOne[4],2) + pow(s_genes[17 + offset] - avgReplicateOne[5],2));
}
results[(threadIdx.x * 32) + j] = dist;
}
for(int j = 0; j < 32; j++) {
int sharedoffset = j* 32;
int globaloffset = (blockIdx.x * 32 * geneCount) + (j * geneCount) + (32 * i);
if(threadIdx.x + globaloffset < geneCount * geneCount) {
distance_d[threadIdx.x + globaloffset ] = results[threadIdx.x + sharedoffset];
}
}
}
}
}
void parseArguments(int argc, char** argv) {
if(argc > 1) {
//Will need to loop through all command line arguments
for(int i = 1; i < argc; i+=2) {
if (strcmp(argv[i], "-i") == 0 || strcmp(argv[i], "--input") == 0) {
strcpy(inputLocation, argv[i+1]);
} else if (strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0) {
usage();
exit(0);
} else if (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "--calc") == 0) {
//Convert string to upper case
char str[MAX_FILE_PATH_SIZE] = "temp";
strcpy(str, argv[i+1]);
int i = 0;
while(i < strlen(str)) {
str[i] = (char)toupper(str[i]);
i++;
}
if(strcmp(str, "STANDARD") == 0) {
calcVariant = STANDARD;
} else if (strcmp(str, "AVERAGE_FIRST_DISTANCE") == 0) {
calcVariant = AVERAGE_FIRST_DISTANCE;
} else {
printf("Warning: No such calculation variant as %s. Please see usage below for a list of acceptable variants.\n", str);
usage();
exit(1);
}
} else if (strcmp(argv[i], "-r") == 0 || strcmp(argv[i], "--results") == 0) {
numberOfResults = atoi(argv[i+1]);
//atoi() returns 0 when no valid conversion takes place, need to make sure numberOfResults is at least 1
if(!(numberOfResults > 0)) {
printf("Error: number of results must be at least 1.\n");
exit(1);
}
} else if (strcmp(argv[i], "-g") == 0 || strcmp(argv[i], "--gene") == 0) {
strcpy(selectedGene,argv[i+1]);
singleGeneCalculation = 1;
} else if (strcmp(argv[i], "-p") == 0 || strcmp(argv[i], "--plates") == 0) {
//Format will be numbers separated by commas (i.e. 123,233,11,22)
//First malloc some space in the selectedPlates array
//Because size is not know at compile time, make array as big as number of characters in the string
//This is obviously overkill but will always have enough room for any arbitrary number of plates
selectedPlates = (int*) malloc(strlen(argv[i+1]) * sizeof(int));
//If there is only one plate selected, don't need to tokenize the string
if(strlen(argv[i+1]) == 1) {
numSelectedPlates = 1;
selectedPlates[0] = atoi(argv[i+1]);
} else {
char* tok = strtok(argv[i+1], ",");
numSelectedPlates = 0;
while(tok != NULL) {
selectedPlates[numSelectedPlates] = atoi(tok);
tok = strtok(NULL, ",");
numSelectedPlates++;
}
}
} else if (strcmp(argv[i], "-t") == 0 || strcmp(argv[i], "--highPPIB") == 0) {
highPPIB = atoi(argv[i+1]);
//atoi() returns 0 when on valid conversion takes place, need to make sure threshold PPIB is at least 1
if(!(highPPIB > 0)) {
printf("Error: High threshold PPIB must be at least 1.\n");
exit(1);
}
} else if (strcmp(argv[i], "-l") == 0 || strcmp(argv[i], "--lowPPIB") == 0) {
lowPPIB = atoi(argv[i+1]);
//atoi() returns 0 when on valid conversion takes place, need to make sure threshold PPIB is at least 1
if(!(lowPPIB > 0)) {
printf("Error: Low threshold PPIB must be at least 1.\n");
exit(1);
}
} else if (strcmp(argv[i], "-o") == 0 || strcmp(argv[i], "--output") == 0 ){
strcpy(outputLocation, argv[i+1]);
/*********************************************************
*
* Not sure if this is necessary, simply adds a trailing '/'
* Maybe different for windows vs. posix machines as well
*
**********************************************************/
//if(outputLocation[strlen(outputLocation) - 1] != '/') {
// outputLocation[strlen(outputLocation)] = '/';
//}
} else {
printf("Warning: %s is an invalid command line argument\n. See below for usage.\n\n", argv[i]);
usage();
exit(1);
}
}
} else {
printf("No command line arguments specified. Please see usage below.\n\n");
usage();
exit(1);
}
}
int main(int argc, char** argv) {
double* geneList_h, *geneList_d;
double* distance_d, *distance_h;
//Only holds data if calculating all results and GPU is unavailable
DistanceTuple** distanceMatrix = NULL;
distance_h = NULL;
parseArguments(argc, argv);
if(strcmp(inputLocation, "none") == 0) {
printf("Warning: must provide an input file.\n\n");
usage();
exit(1);
}
FILE* inputFile;
if(!(inputFile = fopen(inputLocation, "r"))) {
//File does not exist
printf("Error: unable to open input file %s\n", inputLocation);
exit(1);
}
//Allocate memory for the gene list on the host
geneList_h = (double *) malloc(MAX_GENES * sizeof(double) * DISTANCES_PER_GENE);
//Allocate memory for the name list on the host
nameList = (char **) malloc(MAX_GENES * sizeof(char*));
for(int i = 0; i < MAX_GENES; i++) {
nameList[i] = (char *) malloc(MAX_GENE_NAME_SIZE * sizeof(char));
}
//File exists so continue
createGeneListFromFile(inputFile, geneList_h);
//Close input file
fclose(inputFile);
printf("Read file successfully.\n");
if(geneCount == 1) {
printf("Only one gene found that meets specified criteria. Please provide new criteria to expand number of eligible genes.\n");
exit(0);
}
if(geneCount < 11) {
numberOfResults = geneCount - 2;
}
if(numberOfResults > geneCount - 1) {
printf("Error: number of results requested exceeds maximum allowable number.\n");
printf("Number of genes: %d, maximum number of results per gene: %d\n", geneCount, geneCount-1);
printf("Number of results will be set to maximum. All results will be saved.\n");
numberOfResults = geneCount - 1;
}
int geneListSize = geneCount * DISTANCES_PER_GENE * sizeof(double);
printf("Number of genes: %d\n", geneCount);
if(!singleGeneCalculation) {
//Get memory specifications of the GPU
getCardSpecs();
}
if(geneCount < CUDA_CUTOFF) {
canUseCuda = 0;
}
//Launch the CUDA portion of the code if calculating distances for all genes
printf("CUDA status (0 - not using, 1 - using): %d\n", canUseCuda);
if(!canUseCuda) {
printf("Program will continue in serial mode...\n");
}
if(!singleGeneCalculation && canUseCuda) {
cudaError_t error;
//There will be n^2 results from n genes
long long resultsSize = geneCount * geneCount * sizeof(double);
dim3 blockSize = 32;
dim3 gridSize = 0;
gridSize = (geneCount % 32 == 0) ? geneCount/32 : geneCount/32 + 1;
error = cudaMalloc((void**) &distance_d, resultsSize);
checkCudaError(error, "Malloc for device side distance array");
//Allocate space on the host for the distance results
distance_h = (double*) malloc(resultsSize);
//Allocate memory on the device for the genelist
error = cudaMalloc((void**) &geneList_d, geneListSize);
checkCudaError(error, "Malloc for device side gene list array");
//Copy the gene list to the device
error = cudaMemcpy(geneList_d, geneList_h, geneListSize, cudaMemcpyHostToDevice);
checkCudaError(error, "Copy genelist from host to device");
//Bind geneList to texture memory
error = cudaBindTexture(NULL, geneTex, geneList_d, geneListSize);
checkCudaError(error, "Bind genelist to texture memory");
calculateDistanceGPU<<<gridSize,blockSize>>>(distance_d, geneCount, calcVariant);
error = cudaGetLastError();
//If there is an error in the kernel, exit the program because results will be invalid
if(error) {
printf("Progrm experienced error in kernel call and will now exit.\n");
exit(1);
}
//Get results back from the kernel
error = cudaMemcpy(distance_h, distance_d, resultsSize, cudaMemcpyDeviceToHost);
checkCudaError(error, "Copy results from Device to Host");
error = cudaUnbindTexture(geneTex);
checkCudaError(error, "Unbind Texture");
error = cudaFree(geneList_d);
checkCudaError(error, "Free genelist");
error = cudaFree(distance_d);
checkCudaError(error, "Free distance_d");
error = cudaDeviceReset();
checkCudaError(error, "Device Reset");
} else if(!singleGeneCalculation && !canUseCuda) {
//Allocate memory for the distance matrix
distanceMatrix = (DistanceTuple**) malloc(geneCount * sizeof(DistanceTuple*));
for(int i = geneCount - 1; i >= 0; i--) {
distanceMatrix[geneCount - i - 1] = (DistanceTuple*) malloc(i * sizeof(DistanceTuple));
}
calculateDistanceCPU(geneList_h, distanceMatrix);
} else if (singleGeneCalculation){
distance_h = (double *) malloc(geneCount * sizeof(double));
calculateSingleDistance(selectedGene, geneList_h, distance_h);
}
sortAndPrint(geneList_h, distance_h, distanceMatrix);
/**************
| CLEANUP
**************/
free(geneList_h);
if((!singleGeneCalculation && canUseCuda) || singleGeneCalculation) {
free(distance_h);
}
if(!singleGeneCalculation && !canUseCuda) {
for(int i = 0; i < geneCount - 1; i++) {
free(distanceMatrix[i]);
}
free(distanceMatrix);
}
printf("Done! Results have been stored in the \"%s\" folder.\n", outputLocation);
return 0;
}
bool checkCudaError(cudaError_t error, char* operation) {
if(strcmp(cudaGetErrorString(error), "no error") != 0) {
printf("Warning: the following CUDA error occurred: %s\n", cudaGetErrorString(error));
printf("This error occurred during the following operation: %s\n\n", operation);
return 1;
}
return 0;
}
void getCardSpecs() {
int devCount;
cudaGetDeviceCount(&devCount);
size_t freemem, totmem;
cudaMemGetInfo(&freemem, &totmem);
printf("Total mem: %d\n", totmem);
printf("Free mem: %d\n", freemem);
cudaDeviceProp props;
cudaGetDeviceProperties(&props, 0);
long long geneListSize = DISTANCES_PER_GENE * REPLICATES_PER_GENE * geneCount * sizeof(double);
long long resultsSize = geneCount * geneCount * sizeof(double);
if(props.totalGlobalMem == 0) {
printf("Warning: No CUDA card detected.\n");
canUseCuda = 0;
return;
} else if(props.totalGlobalMem < geneListSize + resultsSize) {
printf("Warning: CUDA card has insufficient total memory to run for this input size.\n");
printf("Global memory (MB): %d\n", props.totalGlobalMem/(1024 * 1024));
canUseCuda = 0;
return;
} else if(props.major < 2) {
printf("Warning: CUDA card compute capability is too low. Expected compute capability is 2.0 or greater. This card has a compute capability of %d.%d\n", props.major, props.minor);
canUseCuda = 0;
return;
}
canUseCuda = 1;
}
void sortAndPrint(double* geneList, double* distance, DistanceTuple** distanceMatrix) {
//Need to reconstruct a templist from the distance matrix for each gene
DistanceTuple* tempDistanceList = (DistanceTuple*) malloc(geneCount * sizeof(DistanceTuple));
int top = (!singleGeneCalculation) ? geneCount : 1;
for(int i = 0; i < top; i++) {
if((!singleGeneCalculation && canUseCuda) || singleGeneCalculation) {
for(int j = 0; j < geneCount; j++) {
tempDistanceList[j].distance = distance[(i*geneCount) + j];
tempDistanceList[j].geneOne = j;
}
} else if (!singleGeneCalculation && !canUseCuda) {
int row, col, distanceIndex;
distanceIndex = 0;
row = i - 1;
col = 0;
//Load entries from the diagonal
while(row >= 0 && col <= i - 1) {
tempDistanceList[distanceIndex] = distanceMatrix[row][col];
distanceIndex++;
row--;
col++;
}
//Load remaining entries from the gene's column
for(int j = 0; j < geneCount - i - 1; j++) {
tempDistanceList[distanceIndex] = distanceMatrix[i][j];
tempDistanceList[distanceIndex].geneOne = tempDistanceList[distanceIndex].geneTwo;
distanceIndex++;
}
}
int listSize = ( (!singleGeneCalculation && canUseCuda)|| singleGeneCalculation) ? geneCount : geneCount - 1;
qsort (tempDistanceList, listSize, sizeof(DistanceTuple), compareDistanceTuple);
char fname[MAX_FILE_PATH_SIZE + MAX_GENE_NAME_SIZE];
strcpy(fname, outputLocation);
if(singleGeneCalculation) {
strcat(fname, selectedGene);
} else {
strcat(fname, nameList[i]);
}
strcat(fname, ".txt");
//Write results to file
FILE * outfile = fopen(fname, "w");
if(!outfile) {
printf("Warning: Output folder does not exist. Please create the output folder before running the program.\n");
exit(1);
}
int startIndex = ((!singleGeneCalculation && canUseCuda) || singleGeneCalculation) ? 1 : 0;
for(int j = startIndex; j < numberOfResults + startIndex; j++) {
if(strcmp(nameList[tempDistanceList[j].geneOne], nameList[i]) != 0) {
fprintf(outfile, "%d: %s %.15f\n", startIndex ? j : j+1, nameList[tempDistanceList[j].geneOne], tempDistanceList[j].distance);
}
}
fclose(outfile);
}
}
void calculateSingleDistance(char* gene, double* geneList, double* distanceList) {
bool foundGene = false;
int currGeneIndex;
for(int i = 0; i < geneCount; i++) {
if(strcmp(nameList[i],gene) == 0) {
foundGene = true;
currGeneIndex = i;
printf("Found gene %s, now calculating distances.\n", gene);
break;
}
}
if(!foundGene) {
printf("Error: unable to find specified gene: %s. \n", gene);
exit(1);
}
double dist = 0.0;
int currOffset = currGeneIndex * DISTANCES_PER_GENE;
double avgReplicateOne[6];
if(calcVariant == AVERAGE_FIRST_DISTANCE) {
avgReplicateOne[0] = ((geneList[0 + currOffset] + geneList[6 + currOffset] + geneList[12 + currOffset])/3);
avgReplicateOne[1] = ((geneList[1 + currOffset] + geneList[7 + currOffset] + geneList[13 + currOffset])/3);
avgReplicateOne[2] = ((geneList[2 + currOffset] + geneList[8 + currOffset] + geneList[14 + currOffset])/3);
avgReplicateOne[3] = ((geneList[3 + currOffset] + geneList[9 + currOffset] + geneList[15 + currOffset])/3);
avgReplicateOne[4] = ((geneList[4 + currOffset] + geneList[10 + currOffset] + geneList[16 + currOffset])/3);
avgReplicateOne[5] = ((geneList[5 + currOffset] + geneList[11 + currOffset] + geneList[17 + currOffset])/3);
}
for(int i = 0; i < geneCount; i++) {
int tmpOffset = i * DISTANCES_PER_GENE;
if(calcVariant == STANDARD) {
dist = sqrt(pow(geneList[0 + tmpOffset] - geneList[0 + currOffset],2) + pow(geneList[1 + tmpOffset] - geneList[1 + currOffset],2) +
pow(geneList[2 + tmpOffset] - geneList[2 + currOffset],2) + pow(geneList[3 + tmpOffset] - geneList[3 + currOffset],2) +
pow(geneList[4 + tmpOffset] - geneList[4 + currOffset],2) + pow(geneList[5 + tmpOffset] - geneList[5 + currOffset],2))
+ sqrt(pow(geneList[6 + tmpOffset] - geneList[6 + currOffset],2) + pow(geneList[7 + tmpOffset] - geneList[7 + currOffset],2) +
pow(geneList[8 + tmpOffset] - geneList[8 + currOffset],2) + pow(geneList[9 + tmpOffset] - geneList[9 + currOffset],2) +
pow(geneList[10 + tmpOffset] - geneList[10 + currOffset],2) + pow(geneList[11 + tmpOffset] - geneList[11 + currOffset],2))
+ sqrt(pow(geneList[12 + tmpOffset] - geneList[12 + currOffset],2) + pow(geneList[13 + tmpOffset] - geneList[13 + currOffset],2) +
pow(geneList[14 + tmpOffset] - geneList[14 + currOffset],2) + pow(geneList[15 + tmpOffset] - geneList[15 + currOffset],2) +
pow(geneList[16 + tmpOffset] - geneList[16 + currOffset],2) + pow(geneList[17 + tmpOffset] - geneList[17 + currOffset],2));
} else if(calcVariant == AVERAGE_FIRST_DISTANCE) {
dist = sqrt(pow(geneList[0 + tmpOffset] - avgReplicateOne[0],2) + pow(geneList[1 + tmpOffset] - avgReplicateOne[1],2) +
pow(geneList[2 + tmpOffset] - avgReplicateOne[2],2) + pow(geneList[3 + tmpOffset] - avgReplicateOne[3],2) +
pow(geneList[4 + tmpOffset] - avgReplicateOne[4],2) + pow(geneList[5 + tmpOffset] - avgReplicateOne[5],2))
+ sqrt(pow(geneList[6 + tmpOffset] - avgReplicateOne[0],2) + pow(geneList[7 + tmpOffset] - avgReplicateOne[1],2) +
pow(geneList[8 + tmpOffset] - avgReplicateOne[2],2) + pow(geneList[9 + tmpOffset] - avgReplicateOne[3],2) +
pow(geneList[10 + tmpOffset] - avgReplicateOne[4],2) + pow(geneList[11 + tmpOffset] - avgReplicateOne[5],2))
+ sqrt(pow(geneList[12 + tmpOffset] - avgReplicateOne[0],2) + pow(geneList[13 + tmpOffset] - avgReplicateOne[1],2) +
pow(geneList[14 + tmpOffset] - avgReplicateOne[2],2) + pow(geneList[15 + tmpOffset] - avgReplicateOne[3],2) +
pow(geneList[16 + tmpOffset] - avgReplicateOne[4],2) + pow(geneList[17 + tmpOffset] - avgReplicateOne[5],2));
}
distanceList[i] = dist;
}
}
void trim(char *str) {
char *p1 = str, *p2 = str;
do
while (*p2 == ' ' || *p2 == '/')
p2++;
while (*p1++ = *p2++);
}
void calculateDistanceCPU(double* geneList, DistanceTuple** distanceMatrix) {
double dist = 0.0;
int distColIndex, currOffset, tmpOffset, currGeneIndex;
for(int i = 0; i < geneCount; i++) {
currGeneIndex = i;
currOffset = currGeneIndex * DISTANCES_PER_GENE;
double avgReplicateOne[6];
if(calcVariant == AVERAGE_FIRST_DISTANCE) {
avgReplicateOne[0] = ((geneList[0 + currOffset] + geneList[6 + currOffset] + geneList[12 + currOffset])/3);
avgReplicateOne[1] = ((geneList[1 + currOffset] + geneList[7 + currOffset] + geneList[13 + currOffset])/3);
avgReplicateOne[2] = ((geneList[2 + currOffset] + geneList[8 + currOffset] + geneList[14 + currOffset])/3);
avgReplicateOne[3] = ((geneList[3 + currOffset] + geneList[9 + currOffset] + geneList[15 + currOffset])/3);
avgReplicateOne[4] = ((geneList[4 + currOffset] + geneList[10 + currOffset] + geneList[16 + currOffset])/3);
avgReplicateOne[5] = ((geneList[5 + currOffset] + geneList[11 + currOffset] + geneList[17 + currOffset])/3);
}
distColIndex = 0;
for(int j = i+1; j < geneCount; j++) {
tmpOffset = j * DISTANCES_PER_GENE;
if(calcVariant == STANDARD) {
dist = sqrt(pow(geneList[0 + tmpOffset] - geneList[0 + currOffset],2) + pow(geneList[1 + tmpOffset] - geneList[1 + currOffset],2) +
pow(geneList[2 + tmpOffset] - geneList[2 + currOffset],2) + pow(geneList[3 + tmpOffset] - geneList[3 + currOffset],2) +
pow(geneList[4 + tmpOffset] - geneList[4 + currOffset],2) + pow(geneList[5 + tmpOffset] - geneList[5 + currOffset],2))
+ sqrt(pow(geneList[6 + tmpOffset] - geneList[6 + currOffset],2) + pow(geneList[7 + tmpOffset] - geneList[7 + currOffset],2) +
pow(geneList[8 + tmpOffset] - geneList[8 + currOffset],2) + pow(geneList[9 + tmpOffset] - geneList[9 + currOffset],2) +
pow(geneList[10 + tmpOffset] - geneList[10 + currOffset],2) + pow(geneList[11 + tmpOffset] - geneList[11 + currOffset],2))
+ sqrt(pow(geneList[12 + tmpOffset] - geneList[12 + currOffset],2) + pow(geneList[13 + tmpOffset] - geneList[13 + currOffset],2) +
pow(geneList[14 + tmpOffset] - geneList[14 + currOffset],2) + pow(geneList[15 + tmpOffset] - geneList[15 + currOffset],2) +
pow(geneList[16 + tmpOffset] - geneList[16 + currOffset],2) + pow(geneList[17 + tmpOffset] - geneList[17 + currOffset],2));
} else if (calcVariant == AVERAGE_FIRST_DISTANCE) {
dist = sqrt(pow(geneList[0 + tmpOffset] - avgReplicateOne[0],2) + pow(geneList[1 + tmpOffset] - avgReplicateOne[1],2) +
pow(geneList[2 + tmpOffset] - avgReplicateOne[2],2) + pow(geneList[3 + tmpOffset] - avgReplicateOne[3],2) +
pow(geneList[4 + tmpOffset] - avgReplicateOne[4],2) + pow(geneList[5 + tmpOffset] - avgReplicateOne[5],2))
+ sqrt(pow(geneList[6 + tmpOffset] - avgReplicateOne[0],2) + pow(geneList[7 + tmpOffset] - avgReplicateOne[1],2) +
pow(geneList[8 + tmpOffset] - avgReplicateOne[2],2) + pow(geneList[9 + tmpOffset] - avgReplicateOne[3],2) +
pow(geneList[10 + tmpOffset] - avgReplicateOne[4],2) + pow(geneList[11 + tmpOffset] - avgReplicateOne[5],2))
+ sqrt(pow(geneList[12 + tmpOffset] - avgReplicateOne[0],2) + pow(geneList[13 + tmpOffset] - avgReplicateOne[1],2) +
pow(geneList[14 + tmpOffset] - avgReplicateOne[2],2) + pow(geneList[15 + tmpOffset] - avgReplicateOne[3],2) +
pow(geneList[16 + tmpOffset] - avgReplicateOne[4],2) + pow(geneList[17 + tmpOffset] - avgReplicateOne[5],2));
}
distanceMatrix[i][distColIndex].geneOne = i;
distanceMatrix[i][distColIndex].geneTwo = j;
distanceMatrix[i][distColIndex].distance = dist;
distColIndex++;
}
}
}
int compareDistanceTuple (const void * a, const void * b) {
DistanceTuple *ia = (DistanceTuple*)a;
DistanceTuple *ib = (DistanceTuple*)b;
double diff = ia->distance - ib->distance;
if(diff < 0) {
return -1;
} else if (diff == 0.0f) {
return 0;
} else {
return 1;
}
}
void usage(void) {
printf("User MUST provide an input file path\n\n");
printf("\t-c, --calc: specify the calculation variant to be run. Valid options are as follows:\n");
printf("\t\tSTANDARD - dist(r1,s1) + dist(r2,s2) + dist(r3,s3)\n");
printf("\t\tAVERAGE_FIRST_DISTANCE - dist(avg(r1,r2,r3), s1) + dist(avg(r1,r2,r3), s2) + dist(avg(r1,r2,r3), s3)\n\n");
printf("\t-g, --gene: specify a specific gene to generate results for\n\n");
printf("\t-h, --help: displays this usage message\n\n");
printf("\t-i, --input: specify the input file path\n\n");
printf("\t-l, --lowPPIB: any genes with an average PPIB value lower than the number specified here will not be included in the calculations\n\n");
printf("\t-o, --output: specify which folder the output results will be saved in - must already exist\n\n");
printf("\t-p, --plates: specify which specific plates will be included in the calculations. Format is: plate,plate,plate e.g. '-p 1,5,6' will calculate for only plates 1, 5, and 6.\n\n");
printf("\t-r, --results: specify the number of results to save for each gene\n\n");
printf("\t-t, --highPPIB: any genes with an average PPIB value higher than the number specified here will not be included in the calculations\n\n");
}
unsigned char isSelectedPlate(int plateNumber) {
for(int i = 0; i < numSelectedPlates; i++) {
if(selectedPlates[i] == plateNumber) {
return 1;
}
}
return 0;
}
void createGeneListFromFile(FILE* file, double* geneList) {
char line[512];
int ppib;
//Eat the header line
fgets( line, sizeof(line), file);
while(fgets( line, sizeof(line), file ) != NULL)
{
char* tok;
int len = strlen(line);
//Reset PPIB value for each gene
ppib = 0;
if(len > 0 && line[len-1]=='\n'){
line[len-1] = '\0';
}
if(len > 2 && line[len-2]=='\r') {
line[len-2] = '\0';
}
tok = strtok(line,",");
//Check to ensure tok is not null and that it is not a control
if(tok && strcmp(tok, "FALSE") == 0) {
//Get the plate number (next token)
tok = strtok(NULL,",");
//check to ensure the plate number is wanted for this calculation
if(numSelectedPlates != -1 && !isSelectedPlate(atoi(tok))) {
goto out;
}
//Get the Gene Name (next token)
tok = strtok(NULL,",");
//Store the name of the gene in the nameList
strcpy(nameList[geneCount], tok);
//Eliminate unwanted characters (spaces, slashes)
trim(nameList[geneCount]);
//Populate distances into geneList
for(int i = 0; i < REPLICATES_PER_GENE; i++) {
for(int j = 0; j < PROBES_PER_REPLICATE; j++) {
tok = strtok(NULL,",");
if(strcmp(tok,"=") == 0 || strcmp(tok,"#N/A") == 0) {
//Break out of nested loops, if one replicate of a gene is invalid, the entire gene is invalid
//There is no need to parse any more of this line
goto out;
} else {
geneList[(geneCount * DISTANCES_PER_GENE) + (PROBES_PER_REPLICATE * i) + j] = atof(tok);
}
}
//Read the PPIB value, add to ppib total
ppib += atoi(strtok(NULL, ","));
}
//Calculate the average of the PPIBs for each replicate
ppib /= 3;
//Remove a gene that has a sub-threshold PPIB (first check to see that a threshold PPIB has been set)
if((lowPPIB != -1 && ppib < lowPPIB) || (highPPIB != -1 && ppib > highPPIB)) {
goto out;
}
geneCount++;
out:
continue;
}
}
}
|
7b00ce74ec113d742d29b0d6247afaae1af83975.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file SigmoidLayer2_device.cu
* @date 2017-02-07
* @author moonhoen lee
* @brief
* @details
*/
#include "hip/hip_runtime.h"
#include "Sigmoid2Layer.h"
#include "Network.h"
#include "SysLog.h"
#include "StdOutLog.h"
#include "ColdLog.h"
#include "Perf.h"
#include "PropMgmt.h"
#define SIGMOID2LAYER_LOG 1
using namespace std;
///////////////////////////////////////////////////////////////////////////////////////////
// GPU Kernels
template <typename Dtype>
__global__ void Forward(const Dtype *input, int size, Dtype *output)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
output[idx] = 1.0 / (1.0 + exp((-1.0) * input[idx]));
}
template <typename Dtype>
__global__ void Backward(const Dtype *outputGrad, const Dtype *output, int size,
Dtype *inputGrad)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
inputGrad[idx] = outputGrad[idx] * output[idx] * (1.0 - output[idx]);
}
template <typename Dtype>
Sigmoid2Layer<Dtype>::Sigmoid2Layer() : Layer<Dtype>() {
this->type = Layer<Dtype>::Sigmoid2;
}
template <typename Dtype>
void Sigmoid2Layer<Dtype>::feedforward() {
const Dtype* inputData = this->_inputData[0]->device_data();
Dtype* outputData = this->_outputData[0]->mutable_device_data();
int size = this->_inputData[0]->getCountByAxis(0);
hipLaunchKernelGGL(( Forward), dim3(SOOOA_GET_BLOCKS(size)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
inputData, size, outputData);
}
template <typename Dtype>
void Sigmoid2Layer<Dtype>::backpropagation() {
const Dtype* outputGrads = this->_outputData[0]->device_grad();
const Dtype* output = this->_outputData[0]->device_data();
Dtype* inputGrads = this->_inputData[0]->mutable_device_grad();
int size = this->_inputData[0]->getCountByAxis(0);
hipLaunchKernelGGL(( Backward), dim3(SOOOA_GET_BLOCKS(size)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
outputGrads, output, size, inputGrads);
}
template <typename Dtype>
void Sigmoid2Layer<Dtype>::reshape() {
if (!Layer<Dtype>::_adjustInputShape()) {
const uint32_t count = Util::vecCountByAxis(this->_inputShape[0], 1);
const uint32_t inputDataCount = this->_inputData[0]->getCountByAxis(1);
assert(count == inputDataCount);
}
if (!Layer<Dtype>::_isInputShapeChanged(0))
return;
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
// XXX: FC
// TODO: Conv Layer
uint32_t batches = inputShape[0];
uint32_t channels = inputShape[1];
uint32_t rows = inputShape[2];
uint32_t cols = inputShape[3];
this->_inputShape[0] = {batches, channels, rows, cols};
this->_outputData[0]->reshape({batches, channels, rows, cols});
STDOUT_COND_LOG(SIGMOID2LAYER_LOG,
"<%s> layer' input-0 has reshaped as: %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), batches, channels, rows, cols);
STDOUT_COND_LOG(SIGMOID2LAYER_LOG,
"<%s> layer' output-0 has reshaped as: %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), batches, channels, rows, cols);
}
/****************************************************************************
* layer callback functions
****************************************************************************/
template<typename Dtype>
void* Sigmoid2Layer<Dtype>::initLayer() {
Sigmoid2Layer* layer = new Sigmoid2Layer<Dtype>();
return (void*)layer;
}
template<typename Dtype>
void Sigmoid2Layer<Dtype>::destroyLayer(void* instancePtr) {
Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr;
delete layer;
}
template<typename Dtype>
void Sigmoid2Layer<Dtype>::setInOutTensor(void* instancePtr, void* tensorPtr,
bool isInput, int index) {
SASSERT0(index == 0);
Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr;
if (isInput) {
SASSERT0(layer->_inputData.size() == 0);
layer->_inputData.push_back((Data<Dtype>*)tensorPtr);
} else {
SASSERT0(layer->_outputData.size() == 0);
layer->_outputData.push_back((Data<Dtype>*)tensorPtr);
}
}
template<typename Dtype>
bool Sigmoid2Layer<Dtype>::allocLayerTensors(void* instancePtr) {
Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr;
layer->reshape();
return true;
}
template<typename Dtype>
void Sigmoid2Layer<Dtype>::forwardTensor(void* instancePtr, int miniBatchIdx) {
Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr;
layer->feedforward();
}
template<typename Dtype>
void Sigmoid2Layer<Dtype>::backwardTensor(void* instancePtr) {
Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr;
layer->backpropagation();
}
template<typename Dtype>
void Sigmoid2Layer<Dtype>::learnTensor(void* instancePtr) {
SASSERT0(false);
}
template class Sigmoid2Layer<float>;
| 7b00ce74ec113d742d29b0d6247afaae1af83975.cu | /**
* @file SigmoidLayer2_device.cu
* @date 2017-02-07
* @author moonhoen lee
* @brief
* @details
*/
#include "cuda_runtime.h"
#include "Sigmoid2Layer.h"
#include "Network.h"
#include "SysLog.h"
#include "StdOutLog.h"
#include "ColdLog.h"
#include "Perf.h"
#include "PropMgmt.h"
#define SIGMOID2LAYER_LOG 1
using namespace std;
///////////////////////////////////////////////////////////////////////////////////////////
// GPU Kernels
template <typename Dtype>
__global__ void Forward(const Dtype *input, int size, Dtype *output)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
output[idx] = 1.0 / (1.0 + exp((-1.0) * input[idx]));
}
template <typename Dtype>
__global__ void Backward(const Dtype *outputGrad, const Dtype *output, int size,
Dtype *inputGrad)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
inputGrad[idx] = outputGrad[idx] * output[idx] * (1.0 - output[idx]);
}
template <typename Dtype>
Sigmoid2Layer<Dtype>::Sigmoid2Layer() : Layer<Dtype>() {
this->type = Layer<Dtype>::Sigmoid2;
}
template <typename Dtype>
void Sigmoid2Layer<Dtype>::feedforward() {
const Dtype* inputData = this->_inputData[0]->device_data();
Dtype* outputData = this->_outputData[0]->mutable_device_data();
int size = this->_inputData[0]->getCountByAxis(0);
Forward<<<SOOOA_GET_BLOCKS(size), SOOOA_CUDA_NUM_THREADS>>>(
inputData, size, outputData);
}
template <typename Dtype>
void Sigmoid2Layer<Dtype>::backpropagation() {
const Dtype* outputGrads = this->_outputData[0]->device_grad();
const Dtype* output = this->_outputData[0]->device_data();
Dtype* inputGrads = this->_inputData[0]->mutable_device_grad();
int size = this->_inputData[0]->getCountByAxis(0);
Backward<<<SOOOA_GET_BLOCKS(size), SOOOA_CUDA_NUM_THREADS>>>(
outputGrads, output, size, inputGrads);
}
template <typename Dtype>
void Sigmoid2Layer<Dtype>::reshape() {
if (!Layer<Dtype>::_adjustInputShape()) {
const uint32_t count = Util::vecCountByAxis(this->_inputShape[0], 1);
const uint32_t inputDataCount = this->_inputData[0]->getCountByAxis(1);
assert(count == inputDataCount);
}
if (!Layer<Dtype>::_isInputShapeChanged(0))
return;
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
// XXX: 현재 FC에 대해서만 생각하였음
// TODO: Conv Layer에 대한 구현 필요
uint32_t batches = inputShape[0];
uint32_t channels = inputShape[1];
uint32_t rows = inputShape[2];
uint32_t cols = inputShape[3];
this->_inputShape[0] = {batches, channels, rows, cols};
this->_outputData[0]->reshape({batches, channels, rows, cols});
STDOUT_COND_LOG(SIGMOID2LAYER_LOG,
"<%s> layer' input-0 has reshaped as: %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), batches, channels, rows, cols);
STDOUT_COND_LOG(SIGMOID2LAYER_LOG,
"<%s> layer' output-0 has reshaped as: %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), batches, channels, rows, cols);
}
/****************************************************************************
* layer callback functions
****************************************************************************/
template<typename Dtype>
void* Sigmoid2Layer<Dtype>::initLayer() {
Sigmoid2Layer* layer = new Sigmoid2Layer<Dtype>();
return (void*)layer;
}
template<typename Dtype>
void Sigmoid2Layer<Dtype>::destroyLayer(void* instancePtr) {
Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr;
delete layer;
}
template<typename Dtype>
void Sigmoid2Layer<Dtype>::setInOutTensor(void* instancePtr, void* tensorPtr,
bool isInput, int index) {
SASSERT0(index == 0);
Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr;
if (isInput) {
SASSERT0(layer->_inputData.size() == 0);
layer->_inputData.push_back((Data<Dtype>*)tensorPtr);
} else {
SASSERT0(layer->_outputData.size() == 0);
layer->_outputData.push_back((Data<Dtype>*)tensorPtr);
}
}
template<typename Dtype>
bool Sigmoid2Layer<Dtype>::allocLayerTensors(void* instancePtr) {
Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr;
layer->reshape();
return true;
}
template<typename Dtype>
void Sigmoid2Layer<Dtype>::forwardTensor(void* instancePtr, int miniBatchIdx) {
Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr;
layer->feedforward();
}
template<typename Dtype>
void Sigmoid2Layer<Dtype>::backwardTensor(void* instancePtr) {
Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr;
layer->backpropagation();
}
template<typename Dtype>
void Sigmoid2Layer<Dtype>::learnTensor(void* instancePtr) {
SASSERT0(false);
}
template class Sigmoid2Layer<float>;
|
2000446264db89760502d929145e85a645a5586a.hip | // !!! This is a file automatically generated by hipify!!!
#include <nervCUDA.h>
#include <hip/hip_runtime.h>
#include <nerv_kernels.h>
#ifdef BLOCK_SIZE
#undef BLOCK_SIZE
#endif
#define BLOCK_SIZE 1024
template<typename T, bool debugMode, unsigned int blockSize = 32>
__global__ void RandWeights(RandDeviceTraits<T> traits)
{
unsigned int id = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if (id < traits.size)
{
float val;
if (debugMode)
{
val = (float)abs(sin((T)id));
}
else
{
// Compute the index to retrieve the rand state:
int rid = blockSize * threadIdx.y + threadIdx.x;
hiprandState_t rState = traits.randStates[rid];
val = hiprand_uniform(&rState);
traits.randStates[rid] = rState;
}
if (traits.values)
{
traits.target[id] = val <= traits.threshold ? traits.values[id] : 0.0;
}
else
{
traits.target[id] = val <= traits.threshold ? traits.value : 0.0;
}
}
}
template<typename T>
void rand_weights_device(RandDeviceTraits<T> &traits)
{
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimGrid((BLOCK_SIZE + traits.size - 1) / BLOCK_SIZE, 1, 1);
if (traits.debug)
{
hipLaunchKernelGGL(( RandWeights<T, true>) , dim3(dimGrid), dim3(dimBlock), 0, 0, traits);
}
else
{
hipLaunchKernelGGL(( RandWeights<T, false>) , dim3(dimGrid), dim3(dimBlock), 0, 0, traits);
}
// CHECK_KERNEL();
}
template <typename T>
void _rand_weights(RandTraits<T> &traits)
{
RandDeviceTraits<T> d_traits;
d_traits = traits;
// Now call the device kernel:
rand_weights_device(d_traits);
// copy the results back:
copyFromDevice(traits.target, d_traits.target, traits.size);
}
extern "C" {
void rand_weights(RandTraits<double> &traits)
{
_rand_weights(traits);
}
void rand_weights_f(RandTraits<float> &traits)
{
_rand_weights(traits);
}
}
| 2000446264db89760502d929145e85a645a5586a.cu | #include <nervCUDA.h>
#include <cuda_runtime.h>
#include <nerv_kernels.h>
#ifdef BLOCK_SIZE
#undef BLOCK_SIZE
#endif
#define BLOCK_SIZE 1024
template<typename T, bool debugMode, unsigned int blockSize = 32>
__global__ void RandWeights(RandDeviceTraits<T> traits)
{
unsigned int id = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if (id < traits.size)
{
float val;
if (debugMode)
{
val = (float)abs(sin((T)id));
}
else
{
// Compute the index to retrieve the rand state:
int rid = blockSize * threadIdx.y + threadIdx.x;
curandState rState = traits.randStates[rid];
val = curand_uniform(&rState);
traits.randStates[rid] = rState;
}
if (traits.values)
{
traits.target[id] = val <= traits.threshold ? traits.values[id] : 0.0;
}
else
{
traits.target[id] = val <= traits.threshold ? traits.value : 0.0;
}
}
}
template<typename T>
void rand_weights_device(RandDeviceTraits<T> &traits)
{
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimGrid((BLOCK_SIZE + traits.size - 1) / BLOCK_SIZE, 1, 1);
if (traits.debug)
{
RandWeights<T, true> <<< dimGrid, dimBlock>>>(traits);
}
else
{
RandWeights<T, false> <<< dimGrid, dimBlock>>>(traits);
}
// CHECK_KERNEL();
}
template <typename T>
void _rand_weights(RandTraits<T> &traits)
{
RandDeviceTraits<T> d_traits;
d_traits = traits;
// Now call the device kernel:
rand_weights_device(d_traits);
// copy the results back:
copyFromDevice(traits.target, d_traits.target, traits.size);
}
extern "C" {
void rand_weights(RandTraits<double> &traits)
{
_rand_weights(traits);
}
void rand_weights_f(RandTraits<float> &traits)
{
_rand_weights(traits);
}
}
|
2954ed8dede6579875487cb17485312f1fdcbe38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define CUB_HALF_OPTIMIZATION 1
#include <benchmark/benchmark.h>
#include <type_traits>
#include <utility>
#include "init/init.hpp"
#include "unsafe_reduction/args.hpp"
#include "utils/utils.hpp"
#include "kernel_hip.cuh"
using namespace wmma_unsafe_reduction;
enum class block_synchronization_stategy : int { synchronize_threads, atomic_ballot };
template <size_t SEGMENT_SIZE,
int WARPS_PER_BLOCK,
block_synchronization_stategy sync_stategy>
void tryCUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC(benchmark::State &state) {
const size_t num_elements = state.range(0);
if (num_elements % SEGMENT_SIZE) {
state.SkipWithError("num_elements must be multiples of SEGMENT_SIZE");
return;
}
size_t num_segments = (num_elements + SEGMENT_SIZE - 1) / SEGMENT_SIZE;
const int BLOCK_DIM = WARPS_PER_BLOCK * WARP_SIZE;
half *d_in_fp16 = nullptr;
half *d_out = nullptr;
dim3 gridDim, blockDim;
blockDim.x = BLOCK_DIM;
gridDim.x = (num_segments + WARPS_PER_BLOCK - 1) / WARPS_PER_BLOCK;
if (gridDim.x >= CUDA_MAX_GRID_SIZE) {
state.SkipWithError(
fmt::format("gridDim.x={} is greater than CUDA_MAX_GRID_SIZE", gridDim.x)
.c_str());
return;
}
PRINT_IF_ERROR(hipMalloc(&d_in_fp16, num_elements * sizeof(half)));
PRINT_IF_ERROR(hipMalloc(&d_out, 2 * sizeof(half)));
PRINT_IF_ERROR(hipMemset(d_out, 0, 2 * sizeof(half)));
cuda_memory_set(d_in_fp16, 0.001f, num_elements);
hipEvent_t start, stop;
PRINT_IF_ERROR(hipEventCreate(&start));
PRINT_IF_ERROR(hipEventCreate(&stop));
defer(hipEventDestroy(start));
defer(hipEventDestroy(stop));
try {
for (auto _ : state) {
PRINT_IF_ERROR(hipMemset(d_out, 0, 2 * sizeof(half)));
PRINT_IF_ERROR(hipEventRecord(start));
if (sync_stategy == block_synchronization_stategy::synchronize_threads) {
hipLaunchKernelGGL(( compute_wmma_reduction_atomic_w_syncthreads<SEGMENT_SIZE,
WARPS_PER_BLOCK,
BLOCK_DIM>)
, dim3(gridDim), dim3(blockDim), 0, 0, d_in_fp16, d_out, num_segments);
} else if (sync_stategy == block_synchronization_stategy::atomic_ballot) {
hipLaunchKernelGGL(( compute_wmma_reduction_atomic_w_atomicballot<SEGMENT_SIZE,
WARPS_PER_BLOCK,
BLOCK_DIM>)
, dim3(gridDim), dim3(blockDim), 0, 0, d_in_fp16, d_out, num_segments);
}
PRINT_IF_ERROR(hipEventRecord(stop));
PRINT_IF_ERROR(hipEventSynchronize(stop));
state.PauseTiming();
float msecTotal = 0.0f;
PRINT_IF_ERROR(hipEventElapsedTime(&msecTotal, start, stop));
state.SetIterationTime(msecTotal / 1000);
state.ResumeTiming();
}
state.counters.insert({{"num_elements", num_elements},
{"num_segments", num_segments},
{"segment_size", SEGMENT_SIZE},
{"warps_per_block", WARPS_PER_BLOCK},
{"flops",
{state.iterations() * 1.0 * num_elements,
benchmark::Counter::kAvgThreadsRate}}});
#if 0
half h_out;
PRINT_IF_ERROR(
hipMemcpy(&h_out, d_out, sizeof(half), hipMemcpyDeviceToHost));
int errors = 0;
float correct_sum = 0;
for (int i = 0; i < num_elements; i++) {
correct_sum += h_in[i];
}
if (fabs(half_to_float(h_out) - correct_sum) > 0.1) {
errors++;
if (errors < 10) {
printf("Expected Reuction = %f, got h_out_buf = %f\n", correct_sum,
half_to_float(h_out));
}
}
if (errors > 0) {
printf(
"CUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC does not agree with SEQUENTIAL! %d errors!\n",
errors);
}
#endif
hipFree(d_in_fp16);
hipFree(d_out);
} catch (...) {
hipFree(d_in_fp16);
hipFree(d_out);
hipDeviceReset();
const auto p = std::current_exception();
std::rethrow_exception(p);
}
}
template <size_t SEGMENT_SIZE,
int WARPS_PER_BLOCK,
block_synchronization_stategy sync_stategy>
void CUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC(benchmark::State &state) {
hipDeviceReset();
try {
tryCUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC<SEGMENT_SIZE, WARPS_PER_BLOCK, sync_stategy>(
state);
} catch (const std::exception &e) {
state.SkipWithError(e.what());
} catch (const std::string &e) {
state.SkipWithError(e.c_str());
} catch (...) {
state.SkipWithError("unknown exception");
}
}
template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK>
void CUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC_W_BLOCK_SYNC(benchmark::State &state) {
CUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC<
SEGMENT_SIZE,
WARPS_PER_BLOCK,
block_synchronization_stategy::synchronize_threads>(state);
}
template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK>
void CUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC_W_ATOMIC_BALLOT(benchmark::State &state) {
CUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC<SEGMENT_SIZE,
WARPS_PER_BLOCK,
block_synchronization_stategy::atomic_ballot>(
state);
}
#define BENCHMARK_REDUCTION0(SEGMENT_SIZE, WARPS_PER_BLOCK) \
BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC_W_ATOMIC_BALLOT, \
SEGMENT_SIZE, \
WARPS_PER_BLOCK) \
->ARGS() \
->UseManualTime(); \
BENCHMARK_TEMPLATE( \
CUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC_W_BLOCK_SYNC, SEGMENT_SIZE, WARPS_PER_BLOCK) \
->ARGS() \
->UseManualTime()
#define BENCHMARK_REDUCTION(SEGMENT_SIZE) \
BENCHMARK_REDUCTION0(SEGMENT_SIZE, 1); \
BENCHMARK_REDUCTION0(SEGMENT_SIZE, 2); \
BENCHMARK_REDUCTION0(SEGMENT_SIZE, 4); \
BENCHMARK_REDUCTION0(SEGMENT_SIZE, 8); \
BENCHMARK_REDUCTION0(SEGMENT_SIZE, 16)
BENCHMARK_REDUCTION(256);
BENCHMARK_REDUCTION(2 * 256);
BENCHMARK_REDUCTION(4 * 256);
BENCHMARK_REDUCTION(8 * 256);
BENCHMARK_REDUCTION(16 * 256);
// BENCHMARK_REDUCTION(32 * 256);
// BENCHMARK_REDUCTION(64 * 256);
// BENCHMARK_REDUCTION(128 * 256);
// BENCHMARK_REDUCTION(256 * 256);
// BENCHMARK_REDUCTION(512 * 256);
// BENCHMARK_REDUCTION(1024 * 256);
| 2954ed8dede6579875487cb17485312f1fdcbe38.cu | #define CUB_HALF_OPTIMIZATION 1
#include <benchmark/benchmark.h>
#include <type_traits>
#include <utility>
#include "init/init.hpp"
#include "unsafe_reduction/args.hpp"
#include "utils/utils.hpp"
#include "kernel.cuh"
using namespace wmma_unsafe_reduction;
enum class block_synchronization_stategy : int { synchronize_threads, atomic_ballot };
template <size_t SEGMENT_SIZE,
int WARPS_PER_BLOCK,
block_synchronization_stategy sync_stategy>
void tryCUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC(benchmark::State &state) {
const size_t num_elements = state.range(0);
if (num_elements % SEGMENT_SIZE) {
state.SkipWithError("num_elements must be multiples of SEGMENT_SIZE");
return;
}
size_t num_segments = (num_elements + SEGMENT_SIZE - 1) / SEGMENT_SIZE;
const int BLOCK_DIM = WARPS_PER_BLOCK * WARP_SIZE;
half *d_in_fp16 = nullptr;
half *d_out = nullptr;
dim3 gridDim, blockDim;
blockDim.x = BLOCK_DIM;
gridDim.x = (num_segments + WARPS_PER_BLOCK - 1) / WARPS_PER_BLOCK;
if (gridDim.x >= CUDA_MAX_GRID_SIZE) {
state.SkipWithError(
fmt::format("gridDim.x={} is greater than CUDA_MAX_GRID_SIZE", gridDim.x)
.c_str());
return;
}
PRINT_IF_ERROR(cudaMalloc(&d_in_fp16, num_elements * sizeof(half)));
PRINT_IF_ERROR(cudaMalloc(&d_out, 2 * sizeof(half)));
PRINT_IF_ERROR(cudaMemset(d_out, 0, 2 * sizeof(half)));
cuda_memory_set(d_in_fp16, 0.001f, num_elements);
cudaEvent_t start, stop;
PRINT_IF_ERROR(cudaEventCreate(&start));
PRINT_IF_ERROR(cudaEventCreate(&stop));
defer(cudaEventDestroy(start));
defer(cudaEventDestroy(stop));
try {
for (auto _ : state) {
PRINT_IF_ERROR(cudaMemset(d_out, 0, 2 * sizeof(half)));
PRINT_IF_ERROR(cudaEventRecord(start));
if (sync_stategy == block_synchronization_stategy::synchronize_threads) {
compute_wmma_reduction_atomic_w_syncthreads<SEGMENT_SIZE,
WARPS_PER_BLOCK,
BLOCK_DIM>
<<<gridDim, blockDim>>>(d_in_fp16, d_out, num_segments);
} else if (sync_stategy == block_synchronization_stategy::atomic_ballot) {
compute_wmma_reduction_atomic_w_atomicballot<SEGMENT_SIZE,
WARPS_PER_BLOCK,
BLOCK_DIM>
<<<gridDim, blockDim>>>(d_in_fp16, d_out, num_segments);
}
PRINT_IF_ERROR(cudaEventRecord(stop));
PRINT_IF_ERROR(cudaEventSynchronize(stop));
state.PauseTiming();
float msecTotal = 0.0f;
PRINT_IF_ERROR(cudaEventElapsedTime(&msecTotal, start, stop));
state.SetIterationTime(msecTotal / 1000);
state.ResumeTiming();
}
state.counters.insert({{"num_elements", num_elements},
{"num_segments", num_segments},
{"segment_size", SEGMENT_SIZE},
{"warps_per_block", WARPS_PER_BLOCK},
{"flops",
{state.iterations() * 1.0 * num_elements,
benchmark::Counter::kAvgThreadsRate}}});
#if 0
half h_out;
PRINT_IF_ERROR(
cudaMemcpy(&h_out, d_out, sizeof(half), cudaMemcpyDeviceToHost));
int errors = 0;
float correct_sum = 0;
for (int i = 0; i < num_elements; i++) {
correct_sum += h_in[i];
}
if (fabs(half_to_float(h_out) - correct_sum) > 0.1) {
errors++;
if (errors < 10) {
printf("Expected Reuction = %f, got h_out_buf = %f\n", correct_sum,
half_to_float(h_out));
}
}
if (errors > 0) {
printf(
"CUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC does not agree with SEQUENTIAL! %d errors!\n",
errors);
}
#endif
cudaFree(d_in_fp16);
cudaFree(d_out);
} catch (...) {
cudaFree(d_in_fp16);
cudaFree(d_out);
cudaDeviceReset();
const auto p = std::current_exception();
std::rethrow_exception(p);
}
}
template <size_t SEGMENT_SIZE,
int WARPS_PER_BLOCK,
block_synchronization_stategy sync_stategy>
void CUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC(benchmark::State &state) {
cudaDeviceReset();
try {
tryCUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC<SEGMENT_SIZE, WARPS_PER_BLOCK, sync_stategy>(
state);
} catch (const std::exception &e) {
state.SkipWithError(e.what());
} catch (const std::string &e) {
state.SkipWithError(e.c_str());
} catch (...) {
state.SkipWithError("unknown exception");
}
}
template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK>
void CUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC_W_BLOCK_SYNC(benchmark::State &state) {
CUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC<
SEGMENT_SIZE,
WARPS_PER_BLOCK,
block_synchronization_stategy::synchronize_threads>(state);
}
template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK>
void CUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC_W_ATOMIC_BALLOT(benchmark::State &state) {
CUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC<SEGMENT_SIZE,
WARPS_PER_BLOCK,
block_synchronization_stategy::atomic_ballot>(
state);
}
#define BENCHMARK_REDUCTION0(SEGMENT_SIZE, WARPS_PER_BLOCK) \
BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC_W_ATOMIC_BALLOT, \
SEGMENT_SIZE, \
WARPS_PER_BLOCK) \
->ARGS() \
->UseManualTime(); \
BENCHMARK_TEMPLATE( \
CUDA_UNSAFE_WMMAFULL_REDUCTION_ATOMIC_W_BLOCK_SYNC, SEGMENT_SIZE, WARPS_PER_BLOCK) \
->ARGS() \
->UseManualTime()
#define BENCHMARK_REDUCTION(SEGMENT_SIZE) \
BENCHMARK_REDUCTION0(SEGMENT_SIZE, 1); \
BENCHMARK_REDUCTION0(SEGMENT_SIZE, 2); \
BENCHMARK_REDUCTION0(SEGMENT_SIZE, 4); \
BENCHMARK_REDUCTION0(SEGMENT_SIZE, 8); \
BENCHMARK_REDUCTION0(SEGMENT_SIZE, 16)
BENCHMARK_REDUCTION(256);
BENCHMARK_REDUCTION(2 * 256);
BENCHMARK_REDUCTION(4 * 256);
BENCHMARK_REDUCTION(8 * 256);
BENCHMARK_REDUCTION(16 * 256);
// BENCHMARK_REDUCTION(32 * 256);
// BENCHMARK_REDUCTION(64 * 256);
// BENCHMARK_REDUCTION(128 * 256);
// BENCHMARK_REDUCTION(256 * 256);
// BENCHMARK_REDUCTION(512 * 256);
// BENCHMARK_REDUCTION(1024 * 256);
|
cc9a04e51b232c05f65075dd0b8664bf5f279984.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kinklin.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const double gamma = 1;
const double *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
double *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kinklin), dim3(gridBlock),dim3(threadBlock), 0, 0, n,gamma,a,b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kinklin), dim3(gridBlock),dim3(threadBlock), 0, 0, n,gamma,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kinklin), dim3(gridBlock),dim3(threadBlock), 0, 0, n,gamma,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | cc9a04e51b232c05f65075dd0b8664bf5f279984.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kinklin.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const double gamma = 1;
const double *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
double *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kinklin<<<gridBlock,threadBlock>>>(n,gamma,a,b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kinklin<<<gridBlock,threadBlock>>>(n,gamma,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kinklin<<<gridBlock,threadBlock>>>(n,gamma,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
83a5eba4e3db20c478f5a2d9355fd0327c142653.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zgeellmv.cu, normal z -> s, Mon Jun 25 18:24:24 2018
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
// ELLPACK SpMV kernel
//Michael Garland
__global__ void
sgeellmv_kernel(
int num_rows,
int num_cols,
int num_cols_per_row,
float alpha,
float * dval,
magma_index_t * dcolind,
float * dx,
float beta,
float * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_cols_per_row * row + n ];
float val = dval [ num_cols_per_row * row + n ];
if ( val != 0)
dot += val * dx[col ];
}
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
// shifted ELLPACK SpMV kernel
//Michael Garland
__global__ void
sgeellmv_kernel_shift(
int num_rows,
int num_cols,
int num_cols_per_row,
float alpha,
float lambda,
float * dval,
magma_index_t * dcolind,
float * dx,
float beta,
int offset,
int blocksize,
magma_index_t * addrows,
float * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_cols_per_row * row + n ];
float val = dval [ num_cols_per_row * row + n ];
if ( val != 0)
dot += val * dx[col ];
}
if ( row < blocksize )
dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
Input format is ELLPACK.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgeellmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
float alpha,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaFloat_ptr dx,
float beta,
magmaFloat_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sgeellmv_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU.
Input format is ELLPACK.
It is the shifted version of the ELLPACK SpMV.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha float
scalar multiplier
@param[in]
lambda float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgeellmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
float alpha,
float lambda,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaFloat_ptr dx,
float beta,
magma_int_t offset,
magma_int_t blocksize,
magmaIndex_ptr addrows,
magmaFloat_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sgeellmv_kernel_shift), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, nnz_per_row, alpha, lambda, dval, dcolind, dx,
beta, offset, blocksize, addrows, dy );
return MAGMA_SUCCESS;
}
| 83a5eba4e3db20c478f5a2d9355fd0327c142653.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zgeellmv.cu, normal z -> s, Mon Jun 25 18:24:24 2018
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
// ELLPACK SpMV kernel
//Michael Garland
__global__ void
sgeellmv_kernel(
int num_rows,
int num_cols,
int num_cols_per_row,
float alpha,
float * dval,
magma_index_t * dcolind,
float * dx,
float beta,
float * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_cols_per_row * row + n ];
float val = dval [ num_cols_per_row * row + n ];
if ( val != 0)
dot += val * dx[col ];
}
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
// shifted ELLPACK SpMV kernel
//Michael Garland
__global__ void
sgeellmv_kernel_shift(
int num_rows,
int num_cols,
int num_cols_per_row,
float alpha,
float lambda,
float * dval,
magma_index_t * dcolind,
float * dx,
float beta,
int offset,
int blocksize,
magma_index_t * addrows,
float * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_cols_per_row * row + n ];
float val = dval [ num_cols_per_row * row + n ];
if ( val != 0)
dot += val * dx[col ];
}
if ( row < blocksize )
dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
Input format is ELLPACK.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgeellmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
float alpha,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaFloat_ptr dx,
float beta,
magmaFloat_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
sgeellmv_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU.
Input format is ELLPACK.
It is the shifted version of the ELLPACK SpMV.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha float
scalar multiplier
@param[in]
lambda float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgeellmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
float alpha,
float lambda,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaFloat_ptr dx,
float beta,
magma_int_t offset,
magma_int_t blocksize,
magmaIndex_ptr addrows,
magmaFloat_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
sgeellmv_kernel_shift<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, nnz_per_row, alpha, lambda, dval, dcolind, dx,
beta, offset, blocksize, addrows, dy );
return MAGMA_SUCCESS;
}
|
00155da1ca3bb87ae81523148089a5b912e23c57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
GPU optimized versions for conf and change.
*/
#include "conf_gpu.h"
#include "model.h"
#include "gpu_debug.h"
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
__global__ void scalar_mult_kernel(float mult, const int n, float *vals) {
CUDA_KERNEL_LOOP(index, n)
{
vals[index] *= mult;
}
}
size_t change_gpu::idx_cpu2gpu(size_t cpu_node_idx, size_t offset_in_node,
const gpu_data& d) {
size_t gpu_node_idx = d.node_idx_cpu2gpu(cpu_node_idx);
constexpr size_t extra_floats_per_lig_root = 5;
size_t lig_roots_before_node = min(gpu_node_idx, d.nlig_roots);
size_t gpu_flat_idx = gpu_node_idx
+ extra_floats_per_lig_root * lig_roots_before_node + offset_in_node;
return gpu_flat_idx;
}
// CPU conf torsions are stored in dfs order, relative to the model's
// trees. GPU conf torsions are in bfs order.
size_t conf_gpu::idx_cpu2gpu(size_t cpu_node_idx, size_t offset_in_node,
const gpu_data& d) {
size_t gpu_node_idx = d.node_idx_cpu2gpu(cpu_node_idx);
constexpr size_t extra_floats_per_lig_root = 6;
size_t lig_roots_before_node = min(gpu_node_idx, d.nlig_roots);
size_t gpu_flat_idx = gpu_node_idx
+ extra_floats_per_lig_root * lig_roots_before_node + offset_in_node;
return gpu_flat_idx;
}
change_gpu::change_gpu(const change& src, const gpu_data& d,
device_buffer& buffer)
: n(src.num_floats()) {
std::unique_ptr<fl[]> data(new fl[n]);
for (sz i = 0; i < num_floats(); i++) {
sz cpu_node_idx;
sz offset_in_node;
fl cpu_val = src.get_with_node_idx(i, &cpu_node_idx, &offset_in_node);
assert(offset_in_node < 6);
assert(cpu_node_idx < n);
data[change_gpu::idx_cpu2gpu(cpu_node_idx, offset_in_node, d)] = cpu_val;
}
values = buffer.copy(data.get(), n, hipMemcpyHostToDevice);
}
//allocate and copy
change_gpu::change_gpu(const change_gpu& src, device_buffer& buffer)
: n(src.n), values(NULL) {
values = buffer.copy(src.values, n + 1, hipMemcpyDeviceToDevice);
}
__device__ change_gpu& change_gpu::operator=(const change_gpu& src) {
assert(values && n == src.n);
#ifndef __CUDA_ARCH__
CUDA_CHECK_GNINA(definitelyPinnedMemcpy(values, src.values, sizeof(float) * n,
hipMemcpyDeviceToDevice));
#else
memcpy(values, src.values, sizeof(float) * n);
#endif
return *this;
}
//dkoes - zeros out all differences
__device__ void change_gpu::clear() {
memset(values, 0, sizeof(float) * n);
}
//dkoes - multiply by -1
void change_gpu::invert() {
hipLaunchKernelGGL(( scalar_mult_kernel), dim3(1), dim3(min(GNINA_CUDA_NUM_THREADS, n)), 0, 0, -1.0, n, values);
}
//return dot product
__device__ float change_gpu::dot(const change_gpu& rhs) const {
__shared__ float out;
//TODO: n is no longer necessarily small
if (threadIdx.x < WARPSIZE) {
int start = threadIdx.x;
float val = 0.0;
for (int i = start; i < n; i += WARPSIZE)
{
val += values[i] * rhs.values[i];
}
//now warp reduce with shuffle
for (uint offset = WARPSIZE >> 1; offset > 0; offset >>= 1)
val += shuffle_down(val, offset);
if (start == 0) out = val;
}
__syncthreads();
return out;
}
//subtract rhs from this
__device__ void change_gpu::sub(const change_gpu& rhs) {
int nthreads = blockDim.x < n ? blockDim.x : n;
for (int index = threadIdx.x; index < n; index += nthreads)
values[index] -= rhs.values[index];
}
__device__
void change_gpu::minus_mat_vec_product(const flmat_gpu& m,
change_gpu& out) const {
int idx = threadIdx.x;
fl sum = 0;
VINA_FOR(j,n)
sum += m(m.index_permissive(idx, j)) * values[j];
out.values[idx] = -sum;
}
__host__ __device__ sz change_gpu::num_floats() const {
return n;
}
inline
bool constructor_valid(const conf_gpu& gpu, const conf& src,
const gpu_data& d) {
conf test_dst = src;
gpu.set_cpu(test_dst, d);
sz n = src.num_floats();
assert(n == test_dst.num_floats());
assert(test_dst == src);
return true;
}
conf_gpu::conf_gpu(const conf& src, const gpu_data& d, device_buffer& buffer)
: n(src.num_floats()) {
std::unique_ptr<fl[]> data(new fl[n]);
for (sz i = 0; i < n; i++) {
sz cpu_node_idx;
sz offset_in_node;
fl cpu_val = src.get_with_node_idx(i, &cpu_node_idx, &offset_in_node);
assert(offset_in_node < 7);
assert(cpu_node_idx < n);
data[conf_gpu::idx_cpu2gpu(cpu_node_idx, offset_in_node, d)] = cpu_val;
}
values = buffer.copy(data.get(), n, hipMemcpyHostToDevice);
assert(constructor_valid(*this, src, d));
}
//set cpu to gpu values, assumes correctly sized
void conf_gpu::set_cpu(conf& dst, const gpu_data& d) const {
std::vector<fl> data;
get_data(data);
for (sz i = 0; i < n; i++) {
// TODO: need get_with_node_idx for node_idx, but need operator()
// for writeable-ref.
sz cpu_node_idx;
sz offset_in_node;
dst.get_with_node_idx(i, &cpu_node_idx, &offset_in_node);
assert(offset_in_node < 7);
assert(cpu_node_idx < n);
dst.flat_index(i) = data[conf_gpu::idx_cpu2gpu(cpu_node_idx, offset_in_node,
d)];
}
}
//copy within buffer
conf_gpu::conf_gpu(const conf_gpu& src, device_buffer& buffer)
: n(src.n), values(NULL) {
values = buffer.copy(src.values, n, hipMemcpyDeviceToDevice);
}
__host__ __device__ conf_gpu& conf_gpu::operator=(const conf_gpu& src) {
assert(values && n == src.n);
#ifndef __CUDA_ARCH__
CUDA_CHECK_GNINA(
definitelyPinnedMemcpy(values, src.values, sizeof(float) * n,
hipMemcpyDeviceToDevice));
#else
memcpy(values, src.values, sizeof(float) * n);
#endif
return *this;
}
__device__ void conf_gpu::increment(const change_gpu& c, fl factor,
gpu_data* gdata) {
unsigned idx = threadIdx.x;
tree_gpu& tree = *gdata->treegpu;
unsigned lig_roots = tree.nlig_roots;
//update rigid with early threads
if (idx < lig_roots) {
//position
unsigned conf_offset = idx * 7;
unsigned change_offset = idx * 6;
for (int i = 0; i < 3; i++)
values[conf_offset + i] += c.values[change_offset + i] * factor;
//rotation
qt orientation(values[conf_offset + 3], values[conf_offset + 4],
values[conf_offset + 5], values[conf_offset + 6]);
vec rotation(factor * c.values[change_offset + 3],
factor * c.values[change_offset + 4],
factor * c.values[change_offset + 5]);
quaternion_increment(orientation, rotation);
values[conf_offset + 3] = orientation.R_component_1();
values[conf_offset + 4] = orientation.R_component_2();
values[conf_offset + 5] = orientation.R_component_3();
values[conf_offset + 6] = orientation.R_component_4();
}
//torsions updated by everybody else, with indexing to avoid touching rigid again
else
if (idx < tree.num_nodes) {
unsigned conf_offset = idx + (6 * lig_roots);
unsigned change_offset = idx + (5 * lig_roots);
values[conf_offset] += normalized_angle(factor * c.values[change_offset]);
normalize_angle(values[conf_offset]);
}
}
void conf_gpu::get_data(std::vector<float>& d) const {
d.resize(n);
CUDA_CHECK_GNINA(
definitelyPinnedMemcpy(&d[0], values, n * sizeof(float),
hipMemcpyDeviceToHost));
}
void conf_gpu::set_data(std::vector<float>& d) const {
CUDA_CHECK_GNINA(
definitelyPinnedMemcpy(values, &d[0], n * sizeof(float),
hipMemcpyHostToDevice));
}
__device__
void change_gpu::print() const {
pretty_print_array(values, n, "change_gpu", "%f");
}
__device__
void conf_gpu::print() const {
pretty_print_array(values, n, "conf_gpu", "%f");
}
| 00155da1ca3bb87ae81523148089a5b912e23c57.cu | /*
GPU optimized versions for conf and change.
*/
#include "conf_gpu.h"
#include "model.h"
#include "gpu_debug.h"
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
__global__ void scalar_mult_kernel(float mult, const int n, float *vals) {
CUDA_KERNEL_LOOP(index, n)
{
vals[index] *= mult;
}
}
size_t change_gpu::idx_cpu2gpu(size_t cpu_node_idx, size_t offset_in_node,
const gpu_data& d) {
size_t gpu_node_idx = d.node_idx_cpu2gpu(cpu_node_idx);
constexpr size_t extra_floats_per_lig_root = 5;
size_t lig_roots_before_node = min(gpu_node_idx, d.nlig_roots);
size_t gpu_flat_idx = gpu_node_idx
+ extra_floats_per_lig_root * lig_roots_before_node + offset_in_node;
return gpu_flat_idx;
}
// CPU conf torsions are stored in dfs order, relative to the model's
// trees. GPU conf torsions are in bfs order.
size_t conf_gpu::idx_cpu2gpu(size_t cpu_node_idx, size_t offset_in_node,
const gpu_data& d) {
size_t gpu_node_idx = d.node_idx_cpu2gpu(cpu_node_idx);
constexpr size_t extra_floats_per_lig_root = 6;
size_t lig_roots_before_node = min(gpu_node_idx, d.nlig_roots);
size_t gpu_flat_idx = gpu_node_idx
+ extra_floats_per_lig_root * lig_roots_before_node + offset_in_node;
return gpu_flat_idx;
}
change_gpu::change_gpu(const change& src, const gpu_data& d,
device_buffer& buffer)
: n(src.num_floats()) {
std::unique_ptr<fl[]> data(new fl[n]);
for (sz i = 0; i < num_floats(); i++) {
sz cpu_node_idx;
sz offset_in_node;
fl cpu_val = src.get_with_node_idx(i, &cpu_node_idx, &offset_in_node);
assert(offset_in_node < 6);
assert(cpu_node_idx < n);
data[change_gpu::idx_cpu2gpu(cpu_node_idx, offset_in_node, d)] = cpu_val;
}
values = buffer.copy(data.get(), n, cudaMemcpyHostToDevice);
}
//allocate and copy
change_gpu::change_gpu(const change_gpu& src, device_buffer& buffer)
: n(src.n), values(NULL) {
values = buffer.copy(src.values, n + 1, cudaMemcpyDeviceToDevice);
}
__device__ change_gpu& change_gpu::operator=(const change_gpu& src) {
assert(values && n == src.n);
#ifndef __CUDA_ARCH__
CUDA_CHECK_GNINA(definitelyPinnedMemcpy(values, src.values, sizeof(float) * n,
cudaMemcpyDeviceToDevice));
#else
memcpy(values, src.values, sizeof(float) * n);
#endif
return *this;
}
//dkoes - zeros out all differences
__device__ void change_gpu::clear() {
memset(values, 0, sizeof(float) * n);
}
//dkoes - multiply by -1
void change_gpu::invert() {
scalar_mult_kernel<<<1, min(GNINA_CUDA_NUM_THREADS, n)>>>(-1.0, n, values);
}
//return dot product
__device__ float change_gpu::dot(const change_gpu& rhs) const {
__shared__ float out;
//TODO: n is no longer necessarily small
if (threadIdx.x < WARPSIZE) {
int start = threadIdx.x;
float val = 0.0;
for (int i = start; i < n; i += WARPSIZE)
{
val += values[i] * rhs.values[i];
}
//now warp reduce with shuffle
for (uint offset = WARPSIZE >> 1; offset > 0; offset >>= 1)
val += shuffle_down(val, offset);
if (start == 0) out = val;
}
__syncthreads();
return out;
}
//subtract rhs from this
__device__ void change_gpu::sub(const change_gpu& rhs) {
int nthreads = blockDim.x < n ? blockDim.x : n;
for (int index = threadIdx.x; index < n; index += nthreads)
values[index] -= rhs.values[index];
}
__device__
void change_gpu::minus_mat_vec_product(const flmat_gpu& m,
change_gpu& out) const {
int idx = threadIdx.x;
fl sum = 0;
VINA_FOR(j,n)
sum += m(m.index_permissive(idx, j)) * values[j];
out.values[idx] = -sum;
}
__host__ __device__ sz change_gpu::num_floats() const {
return n;
}
inline
bool constructor_valid(const conf_gpu& gpu, const conf& src,
const gpu_data& d) {
conf test_dst = src;
gpu.set_cpu(test_dst, d);
sz n = src.num_floats();
assert(n == test_dst.num_floats());
assert(test_dst == src);
return true;
}
conf_gpu::conf_gpu(const conf& src, const gpu_data& d, device_buffer& buffer)
: n(src.num_floats()) {
std::unique_ptr<fl[]> data(new fl[n]);
for (sz i = 0; i < n; i++) {
sz cpu_node_idx;
sz offset_in_node;
fl cpu_val = src.get_with_node_idx(i, &cpu_node_idx, &offset_in_node);
assert(offset_in_node < 7);
assert(cpu_node_idx < n);
data[conf_gpu::idx_cpu2gpu(cpu_node_idx, offset_in_node, d)] = cpu_val;
}
values = buffer.copy(data.get(), n, cudaMemcpyHostToDevice);
assert(constructor_valid(*this, src, d));
}
//set cpu to gpu values, assumes correctly sized
void conf_gpu::set_cpu(conf& dst, const gpu_data& d) const {
std::vector<fl> data;
get_data(data);
for (sz i = 0; i < n; i++) {
// TODO: need get_with_node_idx for node_idx, but need operator()
// for writeable-ref.
sz cpu_node_idx;
sz offset_in_node;
dst.get_with_node_idx(i, &cpu_node_idx, &offset_in_node);
assert(offset_in_node < 7);
assert(cpu_node_idx < n);
dst.flat_index(i) = data[conf_gpu::idx_cpu2gpu(cpu_node_idx, offset_in_node,
d)];
}
}
//copy within buffer
conf_gpu::conf_gpu(const conf_gpu& src, device_buffer& buffer)
: n(src.n), values(NULL) {
values = buffer.copy(src.values, n, cudaMemcpyDeviceToDevice);
}
__host__ __device__ conf_gpu& conf_gpu::operator=(const conf_gpu& src) {
assert(values && n == src.n);
#ifndef __CUDA_ARCH__
CUDA_CHECK_GNINA(
definitelyPinnedMemcpy(values, src.values, sizeof(float) * n,
cudaMemcpyDeviceToDevice));
#else
memcpy(values, src.values, sizeof(float) * n);
#endif
return *this;
}
__device__ void conf_gpu::increment(const change_gpu& c, fl factor,
gpu_data* gdata) {
unsigned idx = threadIdx.x;
tree_gpu& tree = *gdata->treegpu;
unsigned lig_roots = tree.nlig_roots;
//update rigid with early threads
if (idx < lig_roots) {
//position
unsigned conf_offset = idx * 7;
unsigned change_offset = idx * 6;
for (int i = 0; i < 3; i++)
values[conf_offset + i] += c.values[change_offset + i] * factor;
//rotation
qt orientation(values[conf_offset + 3], values[conf_offset + 4],
values[conf_offset + 5], values[conf_offset + 6]);
vec rotation(factor * c.values[change_offset + 3],
factor * c.values[change_offset + 4],
factor * c.values[change_offset + 5]);
quaternion_increment(orientation, rotation);
values[conf_offset + 3] = orientation.R_component_1();
values[conf_offset + 4] = orientation.R_component_2();
values[conf_offset + 5] = orientation.R_component_3();
values[conf_offset + 6] = orientation.R_component_4();
}
//torsions updated by everybody else, with indexing to avoid touching rigid again
else
if (idx < tree.num_nodes) {
unsigned conf_offset = idx + (6 * lig_roots);
unsigned change_offset = idx + (5 * lig_roots);
values[conf_offset] += normalized_angle(factor * c.values[change_offset]);
normalize_angle(values[conf_offset]);
}
}
void conf_gpu::get_data(std::vector<float>& d) const {
d.resize(n);
CUDA_CHECK_GNINA(
definitelyPinnedMemcpy(&d[0], values, n * sizeof(float),
cudaMemcpyDeviceToHost));
}
void conf_gpu::set_data(std::vector<float>& d) const {
CUDA_CHECK_GNINA(
definitelyPinnedMemcpy(values, &d[0], n * sizeof(float),
cudaMemcpyHostToDevice));
}
__device__
void change_gpu::print() const {
pretty_print_array(values, n, "change_gpu", "%f");
}
__device__
void conf_gpu::print() const {
pretty_print_array(values, n, "conf_gpu", "%f");
}
|
08b6a4b0623a4d91c23e3976e39ba237269ba03c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _GPU_MINER_KERNEL_CUH_
#define _GPU_MINER_KERNEL_CUH_
//#include "gpu_multiVectorAnd.cu"
#include "gpu_vectorAndInit.cuh"
#include "CUDA_header.cuh"
////////////////////////////////////// Global Variable /////////////////////////
double copyTime = 0.0;
double countTime = 0.0;
double kernelTime = 0.0;
static __device__ __constant__ table_t c_byteTable[TABLE_SIZE];//cudaMalloc
int* d_id2List;
bytes_t* d_buf;
table_t* g_byteTable;
int* d_in1List;
int* d_in2List;
int* d_in2ListBound;
__global__
void multi_AND_wS_woC_kernel(bytes_t* d_in1,
const int* d_in1List,
const int in1ListLen,
const bytes_t* d_in2,
const int* d_in2List, const int in2ListLen,
const int* d_in2ListBound,
const int segLen,
bytes_t* d_out)
{
//1. load the d_in1List, d_in2List and d_in2ListBound to the shared memory
extern __shared__ int s_data[];
int* s_in1List = s_data; //len = in1ListLen
int* s_in2ListBound = s_data + in1ListLen; //len = in1ListLen
int* s_in2List = s_data + 2*in1ListLen; //len = in2ListLen
//threadIdx.x
for( int i = threadIdx.x; i < in1ListLen; i += blockDim.x )
{
s_in1List[i] = d_in1List[i];
s_in2ListBound[i] = d_in2ListBound[i];
}
for( int i = threadIdx.x; i < in2ListLen; i += blockDim.x )
{
//s_in2Listitemid
s_in2List[i] = d_in2List[i];
}
__syncthreads();
//2.
int outOffset = 0; //for output, also for get the d_segIn2List
bytes_t* d_segIn1;//pointer to d_in1 for current segment
int* d_segIn2List; //pointer to d_in2List for current segment
int segIn2ListLen;
int segOutLen = 0;
const unsigned int index = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int delta = blockDim.x*gridDim.x;
int idid2, offset, posInIn2;
for( int id1 = 0; id1 < in1ListLen; id1++ )
{
//d_segIn1 d_in101
d_segIn1 = d_in1 + segLen*(s_in1List[id1]);
//id1GPUs_in2ListBound[id1]200-400,16
//
d_segIn2List = s_in2List + outOffset;
segIn2ListLen = s_in2ListBound[id1];
segOutLen = segLen*segIn2ListLen;//segIn2ListLen1000
//for each segment,16
//index0-1536delta1536segOutLen16s_in2ListBound[id1]
for( int i = index; i < segOutLen; i += delta )
{
//idid2
idid2 = i/segLen;
//offsetd_segIn1d_in2 offsetd0-(segLen-1),
offset = i - idid2*segLen;
//posInIn2 16d_segIn2List[idid2]itemid
//offset
posInIn2 = d_segIn2List[idid2]*segLen + offset;
//d_out 2*segLen*1500*15,(15001000),15level15levelPos
//segOutLen < d_out
//d_out
//d_outoutOffset*segLen
d_out[i + outOffset*segLen] = d_segIn1[offset]&d_in2[posInIn2];
}
outOffset += (s_in2ListBound[id1]);
}
}
__global__
void multi_AND_woS_wC_kernel(bytes_t* d_in1,
const int* d_in1List,
const int in1ListLen,
const bytes_t* d_in2,
const int* d_in2List, const int in2ListLen,
const int* d_in2ListBound,
const int segLen,
table_t* d_countOut )
{
//1. load the d_in1List, d_in2List and d_in2ListBound to the shared memory
extern __shared__ int s_data[];
int* s_in1List = s_data; //len = in1ListLen
int* s_in2ListBound = s_data + in1ListLen; //len = in1ListLen
int* s_in2List = s_data + 2*in1ListLen; //len = in2ListLen
for( int i = threadIdx.x; i < in1ListLen; i += blockDim.x )
{
s_in1List[i] = d_in1List[i];
s_in2ListBound[i] = d_in2ListBound[i];
}
for( int i = threadIdx.x; i < in2ListLen; i += blockDim.x )
{
s_in2List[i] = d_in2List[i];
}
__syncthreads();
//2.
int outOffset = 0; //for output, also for get the d_segIn2List
bytes_t* d_segIn1;//pointer to d_in1 for current segment
int* d_segIn2List; //pointer to d_in2List for current segment
int segIn2ListLen;
int segOutLen = 0;
const unsigned int index = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int delta = blockDim.x*gridDim.x;
int idid2, offset, posInIn2;
bytes_t out;
for( int id1 = 0; id1 < in1ListLen; id1++ )
{
d_segIn1 = d_in1 + segLen*(s_in1List[id1]);
d_segIn2List = s_in2List + outOffset;
segIn2ListLen = s_in2ListBound[id1];
segOutLen = segLen*segIn2ListLen;
//for each segment
for( int i = index; i < segOutLen; i += delta )
{
idid2 = i/segLen;
//offsetd_segIn1d_in2 offsetd0-(segLen-1),
offset = i - idid2*segLen;
posInIn2 = d_segIn2List[idid2]*segLen + offset;
out = d_segIn1[offset]&d_in2[posInIn2];
//level1
d_countOut[i + outOffset*segLen] = c_byteTable[out];
//d_out[i + outOffset*segLen] = d_segIn1[offset]&d_in2[posInIn2];
}
outOffset += (s_in2ListBound[id1]);
}
}
////////////////////////////////////////////////////////////////////////////////
//! GPU primitive for vector and operation for multiple d_in1 and multiple d_in2
//! @param d_in1
//! @param d_in1List list contains the id in d_in1
//! @param in1ListLen
//! @param d_in2
//! @param d_in2List list contains the id in d_in2
//! @param in2ListLen in2ListLen >= in1ListLen
//! @param d_in2ListBoud number of corresponding ids in d_in2 for each id in d_in1, len = in1ListLen
//! @param segLen size of each segment for AND operation
//! @param d_out, outLen = in2ListLen*segLen
////////////////////////////////////////////////////////////////////////////////
void multi_AND_wS_woC(bytes_t* d_in1,
const int* d_in1List,
const int in1ListLen,
const bytes_t* d_in2,
const int* d_in2List, const int in2ListLen,
const int* d_in2ListBound,
const int segLen,
bytes_t* d_out,
const unsigned int numBlock = 8,
const unsigned int numThread = 192
)
{
//store the d_in1List, d_in2List and d_in2ListBound to the shared memory
unsigned int sharedSize = sizeof(int)*( 2*in1ListLen + in2ListLen );
hipLaunchKernelGGL(( multi_AND_wS_woC_kernel), dim3(numBlock), dim3(numThread), sharedSize, 0, d_in1, d_in1List, in1ListLen,
d_in2, d_in2List, in2ListLen, d_in2ListBound, segLen, d_out);
CUT_CHECK_ERROR( "multi_AND_wS_woC_kernel" );
SYNC();
}
void multi_AND_wS_woC_hostList(bytes_t* d_in1,
const int* h_in1List,
const int in1ListLen,
const bytes_t* d_in2,
const int* h_in2List, const int in2ListLen,
const int* h_in2ListBound,
const int segLen,
bytes_t* d_out,
const unsigned int numBlock = 8,
const unsigned int numThread = 192
)
{
unsigned int copyTimer = 0;
startTimer( ©Timer );
TOGPU( d_in1List, h_in1List, sizeof(int)*in1ListLen );
TOGPU( d_in2ListBound, h_in2ListBound, sizeof(int)*in1ListLen );
TOGPU( d_in2List, h_in2List, sizeof(int)*in2ListLen );
copyTime += endTimer( "", ©Timer );
unsigned int kernelTimer = 0;
startTimer( &kernelTimer );
multi_AND_wS_woC(d_in1, d_in1List,
in1ListLen,
d_in2,
d_in2List, in2ListLen,
d_in2ListBound,
segLen,
d_out);
kernelTime += endTimer( "", &kernelTimer );
}
//!!!!!
void multi_AND_woS_wC(bytes_t* d_in1,
const int* d_in1List,
const int in1ListLen,
const bytes_t* d_in2,
const int* d_in2List, const int in2ListLen /**/,
const int* d_in2ListBound,
const int segLen,
table_t* d_countOut,
const unsigned int numBlock = 8,
const unsigned int numThread = 192
)
{
//store the d_in1List, d_in2List and d_in2ListBound to the shared memory
unsigned int sharedSize = sizeof(int)*( 2*in1ListLen + in2ListLen );
hipLaunchKernelGGL(( multi_AND_woS_wC_kernel), dim3(numBlock), dim3(numThread), sharedSize, 0, d_in1, d_in1List, in1ListLen,
d_in2, d_in2List, in2ListLen, d_in2ListBound, segLen, d_countOut);
CUT_CHECK_ERROR( "multi_AND_wS_woC_kernel" );
SYNC();
}
void multi_AND_woS_wC_hostList(bytes_t* d_in1,
const int* h_in1List,
const int in1ListLen,
const bytes_t* d_in2,
const int* h_in2List, const int in2ListLen /**/,
const int* h_in2ListBound,
const int segLen,
table_t* d_countOut,
const unsigned int numBlock = 8,
const unsigned int numThread = 192
)
{
unsigned int copyTimer = 0;
startTimer( ©Timer );
TOGPU( d_in1List, h_in1List, sizeof(int)*in1ListLen );
TOGPU( d_in2ListBound, h_in2ListBound, sizeof(int)*in1ListLen );
TOGPU( d_in2List, h_in2List, sizeof(int)*in2ListLen );
copyTime += endTimer( "", ©Timer );
unsigned int kernelTimer = 0;
startTimer( &kernelTimer );
multi_AND_woS_wC(d_in1,
d_in1List,
in1ListLen,
d_in2,
d_in2List, in2ListLen /**/,
d_in2ListBound,
segLen,
d_countOut
);
kernelTime += endTimer( "", &kernelTimer );
}
bytes_t* d_bitmap;
bytes_t* d_midRes;
table_t* d_byteTable;
int itemSize;
int numTran;
int numTranInByte;
bytes_t* d_multiOut;
bytes_t* h_multiOut;
table_t* d_multiCountOut;
table_t* h_multiCountOut;
int* d_countBuf;
#define CPU_COUNT
//#define GPU_MINER_DEBUG
template<class T>
void copyFromGPU( T** h_out, T* d_in, unsigned int len )
{
CPUMALLOC( (void**)&(*h_out), sizeof(T)*len );
FROMGPU( (*h_out), d_in, sizeof(T)*len );
}
template<class T>
void copyToGPU( T** d_out, T* h_in, unsigned int len )
{
GPUMALLOC( (void**)&(*d_out), sizeof(T)*len );
TOGPU( (*d_out), h_in, sizeof(T)*len );
}
//////////////////////////////////////// The basic GPU functions //////////////////
extern "C"
void GPUInit( int argc, char** argv )
{
CUT_DEVICE_INIT( argc, argv );
}
extern "C"
void gpuMalloc( void** gpu_data, unsigned int sizeInByte )
{
GPUMALLOC( gpu_data, sizeInByte );
}
extern "C"
void copyCPUToGPU( void* cpu_data, void* gpu_data, unsigned int sizeInByte )
{
TOGPU( gpu_data, cpu_data, sizeInByte );
}
extern "C"
void copyCPUToGPUConstant( void* cpu_data, void* gpu_data, unsigned int sizeInByte )
{
CUDA_SAFE_CALL( hipMemcpyToSymbol( gpu_data, cpu_data, sizeInByte ) );
}
extern "C"
void copyGPUToCPU( void* gpu_data, void* cpu_data, unsigned int sizeInByte )
{
FROMGPU( cpu_data, gpu_data, sizeInByte );
}
extern "C"
void GPUFree( void* gpu_data )
{
CUDA_SAFE_CALL( hipFree( gpu_data ) );
}
/////////////////////////////////////////////// GPUMiner ////////////////////////
extern "C"
void GPUMiner_Free()
{
GPUFREE( d_bitmap );
GPUFREE( d_midRes );
GPUFREE( d_byteTable );
GPUFREE( d_multiCountOut );
#ifdef CPU_COUNT
CPUFREE( h_multiCountOut );
#endif
}
extern "C"
void arrayAndInit( const unsigned int maxListLen, const unsigned int numTranInByte )
{
//h_byteTableGPU
CUDA_SAFE_CALL( hipMemcpyToSymbol( c_byteTable, h_byteTable, sizeof(table_t)*TABLE_SIZE ) );
GPUMALLOC( (void**)&d_id2List, sizeof(int)*maxListLen ); //maxListLen 1
GPUMALLOC( (void**)&d_in1List, sizeof(int)*NUM_MAX_NODE_PER_CALL );
GPUMALLOC( (void**)&d_in2List, sizeof(int)*NUM_MAX_NODE_PER_CALL );
GPUMALLOC( (void**)&d_in2ListBound, sizeof(int)*NUM_MAX_NODE_PER_CALL );
#ifdef USE_GATHER_FOR_COUNT
GPUMALLOC( (void**)&g_byteTable, sizeof(table_t)*TABLE_SIZE );
TOGPU( g_byteTable, h_byteTable, sizeof(table_t)*TABLE_SIZE );
GPUMALLOC( (void**)&d_multiOut, sizeof(bytes_t)*numTranInByte*NUM_MAX_NODE_PER_CALL );
#endif
#ifdef USE_CONSTANT_BUF
CPUMALLOC( (void**)&hh_in, sizeof(bytes_t)*IN_SIZE );
#endif
#ifdef USE_COPY
GPUMALLOC( (void**)&d_buf, sizeof(bytes_t)*maxListLen*numTranInByte );
#endif
}
//GPU
/*
* h_numTran:
* h_itemSize:1
*/
extern "C"
void initGPUMiner( bytes_t* h_matrix, const unsigned int h_itemSize, const unsigned int h_numTran )
{
numTran = h_numTran;
numTranInByte = (int)ceil((float)numTran/NUM_BIT_IN_BYTE);//NUM_BIT_IN_BYTE = 1616
itemSize = h_itemSize;
unsigned int memTimer = 0;
startTimer( &memTimer );
//GPU*1
GPUMALLOC( (void**)&d_bitmap, sizeof(bytes_t)*numTranInByte*itemSize );//2sizeof(bytes_t),
//cudaMemcpyCPUh_matrixd_bitmap
TOGPU( d_bitmap, h_matrix, sizeof(bytes_t)*numTranInByte*itemSize);
//GPUmidRes
//GPUMALLOC( (void**)&d_midRes, sizeof(bytes_t)*numTranInByte*itemSize*NUM_MAX_LEVEL );
//GPUInit<bytes_t>( d_midRes, numTranInByte*itemSize, 0 );
GPUMALLOC( (void**)&d_midRes, sizeof(bytes_t)*numTranInByte*NUM_MAX_NODE_PER_CALL*NUM_MAX_LEVEL );
CUDA_SAFE_CALL( hipMemset(d_midRes, 0, sizeof(bytes_t)*numTranInByte*NUM_MAX_NODE_PER_CALL*NUM_MAX_LEVEL) );
//initialize for the andVector
arrayAndInit( itemSize, numTranInByte);
GPUMALLOC( (void**)&d_multiCountOut, sizeof(table_t)*numTranInByte*NUM_MAX_NODE_PER_CALL ); ///////////////!!!!!!! can be deleted
#ifdef CPU_COUNT
GPUMALLOC( (void**)&d_multiOut, sizeof(bytes_t)*numTranInByte*NUM_MAX_NODE_PER_CALL );
CPUMALLOC( (void**)&h_multiOut, sizeof(bytes_t)*numTranInByte*NUM_MAX_NODE_PER_CALL );
CPUMALLOC( (void**)&h_multiCountOut, sizeof(table_t)*numTranInByte*NUM_MAX_NODE_PER_CALL );
#else
GPUMALLOC( (void**)&d_countBuf, sizeof(int)*NUM_MAX_NODE_PER_CALL );
#endif
copyTime = endTimer("", &memTimer);
}
int levelPos[NUM_MAX_LEVEL] = {0};
//update the level size
inline void updateLevelSize( int levelIdx, const int levelSize )
{
levelPos[levelIdx] = levelPos[levelIdx - 1] + levelSize;
}
//get the starting address of offset in levelIdx
//levelIdx
inline int getLevelPos( const int levelIdx, const int offset )
{
return levelPos[levelIdx] + offset*numTranInByte;
}
//left: int midRltBeginPos, int *parentList
//if midRltBeginPos = -1, parentList->matrix
//if midRltBeginPos != -1, midRest's offset, last level
//right:
//itemIdList: matrix
//int *itemLenList, int pairNum
//midRltStoreLevel: if no counting, store
//|paraentList| and |itemLenList| = pairNum
//|itemIdList| = itemIdListLen
extern "C"
void gpuBoundVectorAnd( const int midRltBeginPos, const int *parentList, const int *itemLenList, const int pairNum,
const int *itemIdList, const int itemIdListLen, const int midRltStoreLevel,
const bool countSup, int *supList)
{
bytes_t* d_in1;
bytes_t* d_in2;
if( midRltBeginPos == -1 )
//if(!countSup)
{
d_in1 = d_bitmap; //from the bitmap
}
else
{
//
//level=3d_in1=d_midRes+gtLevelPos(1,0)max_level=15,d_in1
/*
inline int getLevelPos( const int levelIdx, const int offset )
{
return levelPos[levelIdx] + offset*numTranInByte;
}
*/
//
d_in1 = d_midRes + getLevelPos( midRltStoreLevel - 2, midRltBeginPos );//from the midRes
}
d_in2 = d_bitmap;
#ifdef GPU_MINER_DEBUG
bytes_t* h_in1;
bytes_t* h_in2;
copyFromGPU<bytes_t>( &h_in1, d_in1, numTranInByte*pairNum );
copyFromGPU<bytes_t>( &h_in2, d_in2, numTranInByte*itemSize );
int a = 1;
#endif
if( countSup )//no store, do counting
{
//unsigned int kernelTimer = 0;
//startTimer( &kernelTimer );
multi_AND_woS_wC_hostList(d_in1, parentList, pairNum,
d_in2,
itemIdList, itemIdListLen,
itemLenList,
numTranInByte,
d_multiCountOut );
//kernelTime += endTimer( "", &kernelTimer );
#ifdef CPU_COUNT
unsigned int copyTimer = 0;
startTimer( ©Timer );
//d_multiCountOutGPUCPU
FROMGPU( h_multiCountOut, d_multiCountOut, sizeof(table_t)*numTranInByte*itemIdListLen );
copyTime += endTimer("", ©Timer);
unsigned int countTimer = 0;
startTimer( &countTimer );
for( int i = 0; i < itemIdListLen; i++ )
{
int sum = 0;
for( int j = 0; j < numTranInByte; j++ )
{
sum += h_multiCountOut[i*numTranInByte + j];
}
supList[i] = sum;
}
countTime += endTimer( "", &countTimer );
#else
unsigned int countTimer = 0;
startTimer( &countTimer );
fixlenSum_v2<table_t, int>( d_multiCountOut, d_countBuf, numTranInByte, itemIdListLen, supList );
countTime += endTimer( "", &countTimer );
#endif
}
else //store, no counting
{
//midRltStoreLevel = 2
bytes_t* d_out = d_midRes + levelPos[midRltStoreLevel - 1];
multi_AND_wS_woC_hostList( d_in1, parentList, pairNum,
d_in2,
itemIdList, itemIdListLen,
itemLenList,
numTranInByte,
d_out
);
//itemIdListLen*numTranInByte
/*
inline void updateLevelSize( int levelIdx, const int levelSize )
{
levelPos[levelIdx] = levelPos[levelIdx - 1] + levelSize;
}
*/
//updateLevelSize levelPos()itemIdListLen*numTranInByte
updateLevelSize( midRltStoreLevel, itemIdListLen*numTranInByte );//
}
#ifdef GPU_MINER_DEBUG
bytes_t* h_midRes;
copyFromGPU<bytes_t>( &h_midRes, d_midRes, numTranInByte*NUM_MAX_NODE_PER_CALL*NUM_MAX_LEVEL );
a = 1;
#endif
}
extern "C"
double getCopyTime()
{
double timer = copyTime;
copyTime = 0.0;
return timer;
}
extern "C"
double getCountTime()
{
double timer = countTime;
countTime = 0.0;
return timer;
}
extern "C"
double getKernelTime()
{
double timer = kernelTime;
kernelTime = 0.0;
return timer;
}
#endif | 08b6a4b0623a4d91c23e3976e39ba237269ba03c.cu | #ifndef _GPU_MINER_KERNEL_CUH_
#define _GPU_MINER_KERNEL_CUH_
//#include "gpu_multiVectorAnd.cu"
#include "gpu_vectorAndInit.cuh"
#include "CUDA_header.cuh"
////////////////////////////////////// Global Variable /////////////////////////
double copyTime = 0.0;
double countTime = 0.0;
double kernelTime = 0.0;
static __device__ __constant__ table_t c_byteTable[TABLE_SIZE];//常量内存不用cudaMalloc
int* d_id2List;
bytes_t* d_buf;
table_t* g_byteTable;
int* d_in1List;
int* d_in2List;
int* d_in2ListBound;
__global__
void multi_AND_wS_woC_kernel(bytes_t* d_in1,
const int* d_in1List,
const int in1ListLen,
const bytes_t* d_in2,
const int* d_in2List, const int in2ListLen,
const int* d_in2ListBound,
const int segLen,
bytes_t* d_out)
{
//1. load the d_in1List, d_in2List and d_in2ListBound to the shared memory
extern __shared__ int s_data[];
int* s_in1List = s_data; //len = in1ListLen
int* s_in2ListBound = s_data + in1ListLen; //len = in1ListLen
int* s_in2List = s_data + 2*in1ListLen; //len = in2ListLen
//感觉这里可以不使用threadIdx.x
for( int i = threadIdx.x; i < in1ListLen; i += blockDim.x )
{
s_in1List[i] = d_in1List[i];
s_in2ListBound[i] = d_in2ListBound[i];
}
for( int i = threadIdx.x; i < in2ListLen; i += blockDim.x )
{
//s_in2List中每一项记录的是一个itemid值
s_in2List[i] = d_in2List[i];
}
__syncthreads();
//2.
int outOffset = 0; //for output, also for get the d_segIn2List
bytes_t* d_segIn1;//pointer to d_in1 for current segment
int* d_segIn2List; //pointer to d_in2List for current segment
int segIn2ListLen;
int segOutLen = 0;
const unsigned int index = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int delta = blockDim.x*gridDim.x;
int idid2, offset, posInIn2;
for( int id1 = 0; id1 < in1ListLen; id1++ )
{
//d_segIn1 指向的是d_in1当前要处理的片段,即当前父节点片段所有的事务的01值
d_segIn1 = d_in1 + segLen*(s_in1List[id1]);
//下面三行代码,表示内循环将处理父节点为id1的子节点,并且是通过GPU同时处理s_in2ListBound[id1](大约在200-400左右)对父子节点的与运算,每个线程处理16位
//数据的与运算
d_segIn2List = s_in2List + outOffset;
segIn2ListLen = s_in2ListBound[id1];
segOutLen = segLen*segIn2ListLen;//segIn2ListLen是小于1000的值
//for each segment,每个线程处理16位的数据与运算
//index的取值范围是0-1536,delta为1536,当前需要处理segOutLen个16位数据,因为同时要计算s_in2ListBound[id1]对父子节点的与运算,所以处理索引比较复杂
for( int i = index; i < segOutLen; i += delta )
{
//idid2 表示当前这个线程属于哪一个子节点
idid2 = i/segLen;
//offset在d_segIn1和d_in2中是同步的 offsetd的取值范围是0-(segLen-1),保证作与运算的是两个项
offset = i - idid2*segLen;
//posInIn2 表示在当前父节点下,所要计算的子节点的16位数据的索引,这个值要和对应的父节点相对应,其中d_segIn2List[idid2]记录的是子节点对应的item的id的值
//offset要保证两个做与运算的父子节点的偏移要一致
posInIn2 = d_segIn2List[idid2]*segLen + offset;
//d_out 存放中间结果,其容量是2*segLen*1500*15,(1500应该和1000是有关系),15的意思是最大level的值,关于15的用意需要理解levelPos这个数组
//segOutLen < d_out的容量,
//另外获取d_out可以放在常量存储器中
//将与运算的值放入d_out中,outOffset*segLen是为了确定当前父节点所包含的全部子节点在中间结果的位置
d_out[i + outOffset*segLen] = d_segIn1[offset]&d_in2[posInIn2];
}
outOffset += (s_in2ListBound[id1]);
}
}
__global__
void multi_AND_woS_wC_kernel(bytes_t* d_in1,
const int* d_in1List,
const int in1ListLen,
const bytes_t* d_in2,
const int* d_in2List, const int in2ListLen,
const int* d_in2ListBound,
const int segLen,
table_t* d_countOut )
{
//1. load the d_in1List, d_in2List and d_in2ListBound to the shared memory
extern __shared__ int s_data[];
int* s_in1List = s_data; //len = in1ListLen
int* s_in2ListBound = s_data + in1ListLen; //len = in1ListLen
int* s_in2List = s_data + 2*in1ListLen; //len = in2ListLen
for( int i = threadIdx.x; i < in1ListLen; i += blockDim.x )
{
s_in1List[i] = d_in1List[i];
s_in2ListBound[i] = d_in2ListBound[i];
}
for( int i = threadIdx.x; i < in2ListLen; i += blockDim.x )
{
s_in2List[i] = d_in2List[i];
}
__syncthreads();
//2.
int outOffset = 0; //for output, also for get the d_segIn2List
bytes_t* d_segIn1;//pointer to d_in1 for current segment
int* d_segIn2List; //pointer to d_in2List for current segment
int segIn2ListLen;
int segOutLen = 0;
const unsigned int index = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int delta = blockDim.x*gridDim.x;
int idid2, offset, posInIn2;
bytes_t out;
for( int id1 = 0; id1 < in1ListLen; id1++ )
{
d_segIn1 = d_in1 + segLen*(s_in1List[id1]);
d_segIn2List = s_in2List + outOffset;
segIn2ListLen = s_in2ListBound[id1];
segOutLen = segLen*segIn2ListLen;
//for each segment
for( int i = index; i < segOutLen; i += delta )
{
idid2 = i/segLen;
//offset在d_segIn1和d_in2中是同步的 offsetd的取值范围是0-(segLen-1),保证作与运算的是两个项
offset = i - idid2*segLen;
posInIn2 = d_segIn2List[idid2]*segLen + offset;
out = d_segIn1[offset]&d_in2[posInIn2];
//当计算到最后的level时,直接对最终结果求值,计算有多少个1
d_countOut[i + outOffset*segLen] = c_byteTable[out];
//d_out[i + outOffset*segLen] = d_segIn1[offset]&d_in2[posInIn2];
}
outOffset += (s_in2ListBound[id1]);
}
}
////////////////////////////////////////////////////////////////////////////////
//! GPU primitive for vector and operation for multiple d_in1 and multiple d_in2
//! @param d_in1
//! @param d_in1List list contains the id in d_in1
//! @param in1ListLen
//! @param d_in2
//! @param d_in2List list contains the id in d_in2
//! @param in2ListLen in2ListLen >= in1ListLen
//! @param d_in2ListBoud number of corresponding ids in d_in2 for each id in d_in1, len = in1ListLen
//! @param segLen size of each segment for AND operation
//! @param d_out, outLen = in2ListLen*segLen
////////////////////////////////////////////////////////////////////////////////
void multi_AND_wS_woC(bytes_t* d_in1,
const int* d_in1List,
const int in1ListLen,
const bytes_t* d_in2,
const int* d_in2List, const int in2ListLen,
const int* d_in2ListBound,
const int segLen,
bytes_t* d_out,
const unsigned int numBlock = 8,
const unsigned int numThread = 192
)
{
//store the d_in1List, d_in2List and d_in2ListBound to the shared memory
unsigned int sharedSize = sizeof(int)*( 2*in1ListLen + in2ListLen );
multi_AND_wS_woC_kernel<<<numBlock, numThread, sharedSize>>>( d_in1, d_in1List, in1ListLen,
d_in2, d_in2List, in2ListLen, d_in2ListBound, segLen, d_out);
CUT_CHECK_ERROR( "multi_AND_wS_woC_kernel" );
SYNC();
}
void multi_AND_wS_woC_hostList(bytes_t* d_in1,
const int* h_in1List,
const int in1ListLen,
const bytes_t* d_in2,
const int* h_in2List, const int in2ListLen,
const int* h_in2ListBound,
const int segLen,
bytes_t* d_out,
const unsigned int numBlock = 8,
const unsigned int numThread = 192
)
{
unsigned int copyTimer = 0;
startTimer( ©Timer );
TOGPU( d_in1List, h_in1List, sizeof(int)*in1ListLen );
TOGPU( d_in2ListBound, h_in2ListBound, sizeof(int)*in1ListLen );
TOGPU( d_in2List, h_in2List, sizeof(int)*in2ListLen );
copyTime += endTimer( "", ©Timer );
unsigned int kernelTimer = 0;
startTimer( &kernelTimer );
multi_AND_wS_woC(d_in1, d_in1List,
in1ListLen,
d_in2,
d_in2List, in2ListLen,
d_in2ListBound,
segLen,
d_out);
kernelTime += endTimer( "", &kernelTimer );
}
//!!!!!
void multi_AND_woS_wC(bytes_t* d_in1,
const int* d_in1List,
const int in1ListLen,
const bytes_t* d_in2,
const int* d_in2List, const int in2ListLen /**/,
const int* d_in2ListBound,
const int segLen,
table_t* d_countOut,
const unsigned int numBlock = 8,
const unsigned int numThread = 192
)
{
//store the d_in1List, d_in2List and d_in2ListBound to the shared memory
unsigned int sharedSize = sizeof(int)*( 2*in1ListLen + in2ListLen );
multi_AND_woS_wC_kernel<<<numBlock, numThread, sharedSize>>>( d_in1, d_in1List, in1ListLen,
d_in2, d_in2List, in2ListLen, d_in2ListBound, segLen, d_countOut);
CUT_CHECK_ERROR( "multi_AND_wS_woC_kernel" );
SYNC();
}
void multi_AND_woS_wC_hostList(bytes_t* d_in1,
const int* h_in1List,
const int in1ListLen,
const bytes_t* d_in2,
const int* h_in2List, const int in2ListLen /**/,
const int* h_in2ListBound,
const int segLen,
table_t* d_countOut,
const unsigned int numBlock = 8,
const unsigned int numThread = 192
)
{
unsigned int copyTimer = 0;
startTimer( ©Timer );
TOGPU( d_in1List, h_in1List, sizeof(int)*in1ListLen );
TOGPU( d_in2ListBound, h_in2ListBound, sizeof(int)*in1ListLen );
TOGPU( d_in2List, h_in2List, sizeof(int)*in2ListLen );
copyTime += endTimer( "", ©Timer );
unsigned int kernelTimer = 0;
startTimer( &kernelTimer );
multi_AND_woS_wC(d_in1,
d_in1List,
in1ListLen,
d_in2,
d_in2List, in2ListLen /**/,
d_in2ListBound,
segLen,
d_countOut
);
kernelTime += endTimer( "", &kernelTimer );
}
bytes_t* d_bitmap;
bytes_t* d_midRes;
table_t* d_byteTable;
int itemSize;
int numTran;
int numTranInByte;
bytes_t* d_multiOut;
bytes_t* h_multiOut;
table_t* d_multiCountOut;
table_t* h_multiCountOut;
int* d_countBuf;
#define CPU_COUNT
//#define GPU_MINER_DEBUG
template<class T>
void copyFromGPU( T** h_out, T* d_in, unsigned int len )
{
CPUMALLOC( (void**)&(*h_out), sizeof(T)*len );
FROMGPU( (*h_out), d_in, sizeof(T)*len );
}
template<class T>
void copyToGPU( T** d_out, T* h_in, unsigned int len )
{
GPUMALLOC( (void**)&(*d_out), sizeof(T)*len );
TOGPU( (*d_out), h_in, sizeof(T)*len );
}
//////////////////////////////////////// The basic GPU functions //////////////////
extern "C"
void GPUInit( int argc, char** argv )
{
CUT_DEVICE_INIT( argc, argv );
}
extern "C"
void gpuMalloc( void** gpu_data, unsigned int sizeInByte )
{
GPUMALLOC( gpu_data, sizeInByte );
}
extern "C"
void copyCPUToGPU( void* cpu_data, void* gpu_data, unsigned int sizeInByte )
{
TOGPU( gpu_data, cpu_data, sizeInByte );
}
extern "C"
void copyCPUToGPUConstant( void* cpu_data, void* gpu_data, unsigned int sizeInByte )
{
CUDA_SAFE_CALL( cudaMemcpyToSymbol( gpu_data, cpu_data, sizeInByte ) );
}
extern "C"
void copyGPUToCPU( void* gpu_data, void* cpu_data, unsigned int sizeInByte )
{
FROMGPU( cpu_data, gpu_data, sizeInByte );
}
extern "C"
void GPUFree( void* gpu_data )
{
CUDA_SAFE_CALL( cudaFree( gpu_data ) );
}
/////////////////////////////////////////////// GPUMiner ////////////////////////
extern "C"
void GPUMiner_Free()
{
GPUFREE( d_bitmap );
GPUFREE( d_midRes );
GPUFREE( d_byteTable );
GPUFREE( d_multiCountOut );
#ifdef CPU_COUNT
CPUFREE( h_multiCountOut );
#endif
}
extern "C"
void arrayAndInit( const unsigned int maxListLen, const unsigned int numTranInByte )
{
//这里使用常量内存来提高运行效率和时间,主要将h_byteTable这个放到GPU的常量内存中查询表
CUDA_SAFE_CALL( cudaMemcpyToSymbol( c_byteTable, h_byteTable, sizeof(table_t)*TABLE_SIZE ) );
GPUMALLOC( (void**)&d_id2List, sizeof(int)*maxListLen ); //maxListLen 初始传入的是频繁1项集个数
GPUMALLOC( (void**)&d_in1List, sizeof(int)*NUM_MAX_NODE_PER_CALL );
GPUMALLOC( (void**)&d_in2List, sizeof(int)*NUM_MAX_NODE_PER_CALL );
GPUMALLOC( (void**)&d_in2ListBound, sizeof(int)*NUM_MAX_NODE_PER_CALL );
#ifdef USE_GATHER_FOR_COUNT
GPUMALLOC( (void**)&g_byteTable, sizeof(table_t)*TABLE_SIZE );
TOGPU( g_byteTable, h_byteTable, sizeof(table_t)*TABLE_SIZE );
GPUMALLOC( (void**)&d_multiOut, sizeof(bytes_t)*numTranInByte*NUM_MAX_NODE_PER_CALL );
#endif
#ifdef USE_CONSTANT_BUF
CPUMALLOC( (void**)&hh_in, sizeof(bytes_t)*IN_SIZE );
#endif
#ifdef USE_COPY
GPUMALLOC( (void**)&d_buf, sizeof(bytes_t)*maxListLen*numTranInByte );
#endif
}
//初始化GPU来进行挖掘
/*
* h_numTran:当前事物的数量
* h_itemSize:频繁1项集的个数
*/
extern "C"
void initGPUMiner( bytes_t* h_matrix, const unsigned int h_itemSize, const unsigned int h_numTran )
{
numTran = h_numTran;
numTranInByte = (int)ceil((float)numTran/NUM_BIT_IN_BYTE);//NUM_BIT_IN_BYTE = 16,向上取整,为什么要定义一个字节中有16位
itemSize = h_itemSize;
unsigned int memTimer = 0;
startTimer( &memTimer );
//分配GPU的内存,分配的大小为基本上为事务所占字节数*频繁1项个数
GPUMALLOC( (void**)&d_bitmap, sizeof(bytes_t)*numTranInByte*itemSize );//因为刚刚多除了2,所以这里乘以sizeof(bytes_t),但是为什么要这么做
//调用cudaMemcpy将CPU中的h_matrix复制到d_bitmap
TOGPU( d_bitmap, h_matrix, sizeof(bytes_t)*numTranInByte*itemSize);
//为中间结果分配GPU内存空间midRes
//GPUMALLOC( (void**)&d_midRes, sizeof(bytes_t)*numTranInByte*itemSize*NUM_MAX_LEVEL );
//GPUInit<bytes_t>( d_midRes, numTranInByte*itemSize, 0 );
GPUMALLOC( (void**)&d_midRes, sizeof(bytes_t)*numTranInByte*NUM_MAX_NODE_PER_CALL*NUM_MAX_LEVEL );
CUDA_SAFE_CALL( cudaMemset(d_midRes, 0, sizeof(bytes_t)*numTranInByte*NUM_MAX_NODE_PER_CALL*NUM_MAX_LEVEL) );
//initialize for the andVector初始化,为交运算准备
arrayAndInit( itemSize, numTranInByte);
GPUMALLOC( (void**)&d_multiCountOut, sizeof(table_t)*numTranInByte*NUM_MAX_NODE_PER_CALL ); ///////////////!!!!!!! can be deleted
#ifdef CPU_COUNT
GPUMALLOC( (void**)&d_multiOut, sizeof(bytes_t)*numTranInByte*NUM_MAX_NODE_PER_CALL );
CPUMALLOC( (void**)&h_multiOut, sizeof(bytes_t)*numTranInByte*NUM_MAX_NODE_PER_CALL );
CPUMALLOC( (void**)&h_multiCountOut, sizeof(table_t)*numTranInByte*NUM_MAX_NODE_PER_CALL );
#else
GPUMALLOC( (void**)&d_countBuf, sizeof(int)*NUM_MAX_NODE_PER_CALL );
#endif
copyTime = endTimer("", &memTimer);
}
int levelPos[NUM_MAX_LEVEL] = {0};
//update the level size
inline void updateLevelSize( int levelIdx, const int levelSize )
{
levelPos[levelIdx] = levelPos[levelIdx - 1] + levelSize;
}
//get the starting address of offset in levelIdx
//取得levelIdx开始地址的偏移
inline int getLevelPos( const int levelIdx, const int offset )
{
return levelPos[levelIdx] + offset*numTranInByte;
}
//left: int midRltBeginPos, int *parentList
//if midRltBeginPos = -1, parentList->matrix
//if midRltBeginPos != -1, midRest's offset, last level
//right:
//itemIdList: matrix
//int *itemLenList, int pairNum
//midRltStoreLevel: if no counting, store
//|paraentList| and |itemLenList| = pairNum
//|itemIdList| = itemIdListLen
extern "C"
void gpuBoundVectorAnd( const int midRltBeginPos, const int *parentList, const int *itemLenList, const int pairNum,
const int *itemIdList, const int itemIdListLen, const int midRltStoreLevel,
const bool countSup, int *supList)
{
bytes_t* d_in1;
bytes_t* d_in2;
if( midRltBeginPos == -1 )
//if(!countSup)
{
d_in1 = d_bitmap; //from the bitmap
}
else
{
//需要从中间结果开始
//当level=3时,d_in1=d_midRes+gtLevelPos(1,0),max_level=15,d_in1指向中间结果开始的位置
/*
inline int getLevelPos( const int levelIdx, const int offset )
{
return levelPos[levelIdx] + offset*numTranInByte;
}
*/
//
d_in1 = d_midRes + getLevelPos( midRltStoreLevel - 2, midRltBeginPos );//from the midRes
}
d_in2 = d_bitmap;
#ifdef GPU_MINER_DEBUG
bytes_t* h_in1;
bytes_t* h_in2;
copyFromGPU<bytes_t>( &h_in1, d_in1, numTranInByte*pairNum );
copyFromGPU<bytes_t>( &h_in2, d_in2, numTranInByte*itemSize );
int a = 1;
#endif
if( countSup )//no store, do counting
{
//unsigned int kernelTimer = 0;
//startTimer( &kernelTimer );
multi_AND_woS_wC_hostList(d_in1, parentList, pairNum,
d_in2,
itemIdList, itemIdListLen,
itemLenList,
numTranInByte,
d_multiCountOut );
//kernelTime += endTimer( "", &kernelTimer );
#ifdef CPU_COUNT
unsigned int copyTimer = 0;
startTimer( ©Timer );
//将d_multiCountOut从GPU中拷贝到CPU中
FROMGPU( h_multiCountOut, d_multiCountOut, sizeof(table_t)*numTranInByte*itemIdListLen );
copyTime += endTimer("", ©Timer);
unsigned int countTimer = 0;
startTimer( &countTimer );
for( int i = 0; i < itemIdListLen; i++ )
{
int sum = 0;
for( int j = 0; j < numTranInByte; j++ )
{
sum += h_multiCountOut[i*numTranInByte + j];
}
supList[i] = sum;
}
countTime += endTimer( "", &countTimer );
#else
unsigned int countTimer = 0;
startTimer( &countTimer );
fixlenSum_v2<table_t, int>( d_multiCountOut, d_countBuf, numTranInByte, itemIdListLen, supList );
countTime += endTimer( "", &countTimer );
#endif
}
else //store, no counting
{
//当midRltStoreLevel = 2
bytes_t* d_out = d_midRes + levelPos[midRltStoreLevel - 1];
multi_AND_wS_woC_hostList( d_in1, parentList, pairNum,
d_in2,
itemIdList, itemIdListLen,
itemLenList,
numTranInByte,
d_out
);
//中间结果占据了itemIdListLen*numTranInByte个双字节
/*
inline void updateLevelSize( int levelIdx, const int levelSize )
{
levelPos[levelIdx] = levelPos[levelIdx - 1] + levelSize;
}
*/
//updateLevelSize 更新levelPos(当前计算深度)的值,由前一个深度加上itemIdListLen*numTranInByte
updateLevelSize( midRltStoreLevel, itemIdListLen*numTranInByte );//更新的是下一个中间结果开始的位置
}
#ifdef GPU_MINER_DEBUG
bytes_t* h_midRes;
copyFromGPU<bytes_t>( &h_midRes, d_midRes, numTranInByte*NUM_MAX_NODE_PER_CALL*NUM_MAX_LEVEL );
a = 1;
#endif
}
extern "C"
double getCopyTime()
{
double timer = copyTime;
copyTime = 0.0;
return timer;
}
extern "C"
double getCountTime()
{
double timer = countTime;
countTime = 0.0;
return timer;
}
extern "C"
double getKernelTime()
{
double timer = kernelTime;
kernelTime = 0.0;
return timer;
}
#endif |
808be6a88f598c3476b6acf4c1a3a65179faa1e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "pcl/gpu/utils/timers_cuda.hpp"
#include "pcl/gpu/utils/safe_call.hpp"
#include "internal.hpp"
#include "utils/boxutils.hpp"
#include<algorithm>
#include<limits>
using namespace pcl::gpu;
using namespace pcl::device;
using namespace std;
namespace pcl
{
namespace device
{
__global__ void get_cc_kernel(int *data)
{
data[threadIdx.x + blockDim.x * blockIdx.x] = threadIdx.x;
}
}
}
void pcl::device::OctreeImpl::get_gpu_arch_compiled_for(int& bin, int& ptx)
{
hipFuncAttributes attrs;
cudaSafeCall( hipFuncGetAttributes(&attrs, get_cc_kernel) );
bin = attrs.binaryVersion;
ptx = attrs.ptxVersion;
}
void pcl::device::OctreeImpl::setCloud(const PointCloud& input_points)
{
points = input_points;
}
void pcl::device::OctreeImpl::internalDownload()
{
int number;
DeviceArray<int>(octreeGlobal.nodes_num, 1).download(&number);
DeviceArray<int>(octreeGlobal.begs, number).download(host_octree.begs);
DeviceArray<int>(octreeGlobal.ends, number).download(host_octree.ends);
DeviceArray<int>(octreeGlobal.nodes, number).download(host_octree.nodes);
DeviceArray<int>(octreeGlobal.codes, number).download(host_octree.codes);
points_sorted.download(host_octree.points_sorted, host_octree.points_sorted_step);
indices.download(host_octree.indices);
host_octree.downloaded = true;
}
namespace
{
int getBitsNum(int integer)
{
int count = 0;
while(integer > 0)
{
if (integer & 1)
++count;
integer>>=1;
}
return count;
}
struct OctreeIteratorHost
{
const static int MAX_LEVELS_PLUS_ROOT = 11;
int paths[MAX_LEVELS_PLUS_ROOT];
int level;
OctreeIteratorHost()
{
level = 0; // root level
paths[level] = (0 << 8) + 1;
}
void gotoNextLevel(int first, int len)
{
++level;
paths[level] = (first << 8) + len;
}
int operator*() const
{
return paths[level] >> 8;
}
void operator++()
{
while(level >= 0)
{
int data = paths[level];
if ((data & 0xFF) > 1) // there are another siblings, can goto there
{
data += (1 << 8) - 1; // +1 to first and -1 from len
paths[level] = data;
break;
}
else
--level; //goto parent;
}
}
};
}
void pcl::device::OctreeImpl::radiusSearchHost(const PointType& query, float radius, vector<int>& out, int max_nn) const
{
out.clear();
float3 center = make_float3(query.x, query.y, query.z);
OctreeIteratorHost iterator;
while(iterator.level >= 0)
{
int node_idx = *iterator;
int code = host_octree.codes[node_idx];
float3 node_minp = octreeGlobal.minp;
float3 node_maxp = octreeGlobal.maxp;
calcBoundingBox(iterator.level, code, node_minp, node_maxp);
//if true, take nothing, and go to next
if (checkIfNodeOutsideSphere(node_minp, node_maxp, center, radius))
{
++iterator;
continue;
}
//if true, take all, and go to next
if (checkIfNodeInsideSphere(node_minp, node_maxp, center, radius))
{
int beg = host_octree.begs[node_idx];
int end = host_octree.ends[node_idx];
end = beg + min<int>((int)out.size() + end - beg, max_nn) - (int)out.size();
out.insert(out.end(), host_octree.indices.begin() + beg, host_octree.indices.begin() + end);
if (out.size() == (size_t)max_nn)
return;
++iterator;
continue;
}
// test children
int children_mask = host_octree.nodes[node_idx] & 0xFF;
bool isLeaf = children_mask == 0;
if (isLeaf)
{
const int beg = host_octree.begs[node_idx];
const int end = host_octree.ends[node_idx];
for(int j = beg; j < end; ++j)
{
int index = host_octree.indices[j];
float point_x = host_octree.points_sorted[j ];
float point_y = host_octree.points_sorted[j + host_octree.points_sorted_step ];
float point_z = host_octree.points_sorted[j + host_octree.points_sorted_step * 2];
float dx = (point_x - center.x);
float dy = (point_y - center.y);
float dz = (point_z - center.z);
float dist2 = dx * dx + dy * dy + dz * dz;
if (dist2 < radius * radius)
out.push_back(index);
if (out.size() == (size_t)max_nn)
return;
}
++iterator;
continue;
}
int first = host_octree.nodes[node_idx] >> 8;
iterator.gotoNextLevel(first, getBitsNum(children_mask));
}
}
void pcl::device::OctreeImpl::approxNearestSearchHost(const PointType& query, int& out_index, float& sqr_dist) const
{
float3 minp = octreeGlobal.minp;
float3 maxp = octreeGlobal.maxp;
int node_idx = 0;
bool out_of_root = query.x < minp.x || query.y < minp.y || query.z < minp.z || query.x > maxp.x || query.y > maxp.y || query.z > maxp.z;
if(!out_of_root)
{
int code = CalcMorton(minp, maxp)(query);
int level = 0;
for(;;)
{
int mask_pos = 1 << Morton::extractLevelCode(code, level);
int node = host_octree.nodes[node_idx];
int mask = node & 0xFF;
if(getBitsNum(mask) == 0) // leaf
break;
if ( (mask & mask_pos) == 0) // no child
break;
node_idx = (node >> 8) + getBitsNum(mask & (mask_pos - 1));
++level;
}
}
int beg = host_octree.begs[node_idx];
int end = host_octree.ends[node_idx];
sqr_dist = std::numeric_limits<float>::max();
for(int i = beg; i < end; ++i)
{
float point_x = host_octree.points_sorted[i ];
float point_y = host_octree.points_sorted[i + host_octree.points_sorted_step ];
float point_z = host_octree.points_sorted[i + host_octree.points_sorted_step * 2];
float dx = (point_x - query.x);
float dy = (point_y - query.y);
float dz = (point_z - query.z);
float d2 = dx * dx + dy * dy + dz * dz;
if (sqr_dist > d2)
{
sqr_dist = d2;
out_index = i;
}
}
out_index = host_octree.indices[out_index];
}
| 808be6a88f598c3476b6acf4c1a3a65179faa1e7.cu | /*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "pcl/gpu/utils/timers_cuda.hpp"
#include "pcl/gpu/utils/safe_call.hpp"
#include "internal.hpp"
#include "utils/boxutils.hpp"
#include<algorithm>
#include<limits>
using namespace pcl::gpu;
using namespace pcl::device;
using namespace std;
namespace pcl
{
namespace device
{
__global__ void get_cc_kernel(int *data)
{
data[threadIdx.x + blockDim.x * blockIdx.x] = threadIdx.x;
}
}
}
void pcl::device::OctreeImpl::get_gpu_arch_compiled_for(int& bin, int& ptx)
{
cudaFuncAttributes attrs;
cudaSafeCall( cudaFuncGetAttributes(&attrs, get_cc_kernel) );
bin = attrs.binaryVersion;
ptx = attrs.ptxVersion;
}
void pcl::device::OctreeImpl::setCloud(const PointCloud& input_points)
{
points = input_points;
}
void pcl::device::OctreeImpl::internalDownload()
{
int number;
DeviceArray<int>(octreeGlobal.nodes_num, 1).download(&number);
DeviceArray<int>(octreeGlobal.begs, number).download(host_octree.begs);
DeviceArray<int>(octreeGlobal.ends, number).download(host_octree.ends);
DeviceArray<int>(octreeGlobal.nodes, number).download(host_octree.nodes);
DeviceArray<int>(octreeGlobal.codes, number).download(host_octree.codes);
points_sorted.download(host_octree.points_sorted, host_octree.points_sorted_step);
indices.download(host_octree.indices);
host_octree.downloaded = true;
}
namespace
{
int getBitsNum(int integer)
{
int count = 0;
while(integer > 0)
{
if (integer & 1)
++count;
integer>>=1;
}
return count;
}
struct OctreeIteratorHost
{
const static int MAX_LEVELS_PLUS_ROOT = 11;
int paths[MAX_LEVELS_PLUS_ROOT];
int level;
OctreeIteratorHost()
{
level = 0; // root level
paths[level] = (0 << 8) + 1;
}
void gotoNextLevel(int first, int len)
{
++level;
paths[level] = (first << 8) + len;
}
int operator*() const
{
return paths[level] >> 8;
}
void operator++()
{
while(level >= 0)
{
int data = paths[level];
if ((data & 0xFF) > 1) // there are another siblings, can goto there
{
data += (1 << 8) - 1; // +1 to first and -1 from len
paths[level] = data;
break;
}
else
--level; //goto parent;
}
}
};
}
void pcl::device::OctreeImpl::radiusSearchHost(const PointType& query, float radius, vector<int>& out, int max_nn) const
{
out.clear();
float3 center = make_float3(query.x, query.y, query.z);
OctreeIteratorHost iterator;
while(iterator.level >= 0)
{
int node_idx = *iterator;
int code = host_octree.codes[node_idx];
float3 node_minp = octreeGlobal.minp;
float3 node_maxp = octreeGlobal.maxp;
calcBoundingBox(iterator.level, code, node_minp, node_maxp);
//if true, take nothing, and go to next
if (checkIfNodeOutsideSphere(node_minp, node_maxp, center, radius))
{
++iterator;
continue;
}
//if true, take all, and go to next
if (checkIfNodeInsideSphere(node_minp, node_maxp, center, radius))
{
int beg = host_octree.begs[node_idx];
int end = host_octree.ends[node_idx];
end = beg + min<int>((int)out.size() + end - beg, max_nn) - (int)out.size();
out.insert(out.end(), host_octree.indices.begin() + beg, host_octree.indices.begin() + end);
if (out.size() == (size_t)max_nn)
return;
++iterator;
continue;
}
// test children
int children_mask = host_octree.nodes[node_idx] & 0xFF;
bool isLeaf = children_mask == 0;
if (isLeaf)
{
const int beg = host_octree.begs[node_idx];
const int end = host_octree.ends[node_idx];
for(int j = beg; j < end; ++j)
{
int index = host_octree.indices[j];
float point_x = host_octree.points_sorted[j ];
float point_y = host_octree.points_sorted[j + host_octree.points_sorted_step ];
float point_z = host_octree.points_sorted[j + host_octree.points_sorted_step * 2];
float dx = (point_x - center.x);
float dy = (point_y - center.y);
float dz = (point_z - center.z);
float dist2 = dx * dx + dy * dy + dz * dz;
if (dist2 < radius * radius)
out.push_back(index);
if (out.size() == (size_t)max_nn)
return;
}
++iterator;
continue;
}
int first = host_octree.nodes[node_idx] >> 8;
iterator.gotoNextLevel(first, getBitsNum(children_mask));
}
}
void pcl::device::OctreeImpl::approxNearestSearchHost(const PointType& query, int& out_index, float& sqr_dist) const
{
float3 minp = octreeGlobal.minp;
float3 maxp = octreeGlobal.maxp;
int node_idx = 0;
bool out_of_root = query.x < minp.x || query.y < minp.y || query.z < minp.z || query.x > maxp.x || query.y > maxp.y || query.z > maxp.z;
if(!out_of_root)
{
int code = CalcMorton(minp, maxp)(query);
int level = 0;
for(;;)
{
int mask_pos = 1 << Morton::extractLevelCode(code, level);
int node = host_octree.nodes[node_idx];
int mask = node & 0xFF;
if(getBitsNum(mask) == 0) // leaf
break;
if ( (mask & mask_pos) == 0) // no child
break;
node_idx = (node >> 8) + getBitsNum(mask & (mask_pos - 1));
++level;
}
}
int beg = host_octree.begs[node_idx];
int end = host_octree.ends[node_idx];
sqr_dist = std::numeric_limits<float>::max();
for(int i = beg; i < end; ++i)
{
float point_x = host_octree.points_sorted[i ];
float point_y = host_octree.points_sorted[i + host_octree.points_sorted_step ];
float point_z = host_octree.points_sorted[i + host_octree.points_sorted_step * 2];
float dx = (point_x - query.x);
float dy = (point_y - query.y);
float dz = (point_z - query.z);
float d2 = dx * dx + dy * dy + dz * dz;
if (sqr_dist > d2)
{
sqr_dist = d2;
out_index = i;
}
}
out_index = host_octree.indices[out_index];
}
|
6781406995bb2b16ad38749d67556686a3abab3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <rocblas.h>
#define cudacall(call) \
do \
{ \
hipError_t err = (call); \
if(hipSuccess != err) \
{ \
fprintf(stderr,"CUDA Error:\nFile = %s\nLine = %d\nReason = %s\n", __FILE__, __LINE__, hipGetErrorString(err)); \
hipDeviceReset(); \
exit(EXIT_FAILURE); \
} \
} \
while (0)
#define cublascall(call) \
do \
{ \
hipblasStatus_t status = (call); \
if(HIPBLAS_STATUS_SUCCESS != status) \
{ \
fprintf(stderr,"CUBLAS Error:\nFile = %s\nLine = %d\nCode = %d\n", __FILE__, __LINE__, status); \
hipDeviceReset(); \
exit(EXIT_FAILURE); \
} \
\
} \
while(0)
__global__ void
writeShapeKernel(float** Ashp, float* Aflat, int n, int M) {
const unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x;
if(gid < M) {
Ashp[gid] = Aflat + gid*n*n;
}
}
static int64_t get_wall_time(void) {
struct timeval time;
gettimeofday(&time,NULL);
return time.tv_sec * 1000000 + time.tv_usec;
}
void invert0(float** src, float** dst, int n, int batchSize)
{
hipblasHandle_t handle;
cublascall(hipblasCreate(&handle));
int *P, *INFO;
cudacall(hipMalloc(&P, n * batchSize * sizeof(int)));
cudacall(hipMalloc(&INFO, batchSize * sizeof(int)));
int lda = n;
float **A = (float **)malloc(batchSize*sizeof(float *));
float **A_d, *A_dflat;
cudacall(hipMalloc(&A_d,batchSize*sizeof(float *)));
cudacall(hipMalloc(&A_dflat, n*n*batchSize*sizeof(float)));
A[0] = A_dflat;
for (int i = 1; i < batchSize; i++)
A[i] = A[i-1]+(n*n);
cudacall(hipMemcpy(A_d,A,batchSize*sizeof(float *),hipMemcpyHostToDevice));
for (int i = 0; i < batchSize; i++)
cudacall(hipMemcpy(A_dflat+(i*n*n), src[i], n*n*sizeof(float), hipMemcpyHostToDevice));
cublascall(hipblasSgetrfBatched(handle,n,A_d,lda,P,INFO,batchSize));
int INFOh[batchSize];
cudacall(hipMemcpy(INFOh,INFO,batchSize*sizeof(int),hipMemcpyDeviceToHost));
for (int i = 0; i < batchSize; i++)
if(INFOh[i] != 0)
{
fprintf(stderr, "Factorization of matrix %d Failed: Matrix may be singular\n", i);
hipDeviceReset();
exit(EXIT_FAILURE);
}
float **C = (float **)malloc(batchSize*sizeof(float *));
float **C_d, *C_dflat;
cudacall(hipMalloc(&C_d,batchSize*sizeof(float *)));
cudacall(hipMalloc(&C_dflat, n*n*batchSize*sizeof(float)));
C[0] = C_dflat;
for (int i = 1; i < batchSize; i++)
C[i] = C[i-1] + (n*n);
cudacall(hipMemcpy(C_d,C,batchSize*sizeof(float *),hipMemcpyHostToDevice));
cublascall(hipblasSgetriBatched(handle,n,(const float **)A_d,lda,P,C_d,lda,INFO,batchSize));
cudacall(hipMemcpy(INFOh,INFO,batchSize*sizeof(int),hipMemcpyDeviceToHost));
for (int i = 0; i < batchSize; i++)
if(INFOh[i] != 0)
{
fprintf(stderr, "Inversion of matrix %d Failed: Matrix may be singular\n", i);
hipDeviceReset();
exit(EXIT_FAILURE);
}
for (int i = 0; i < batchSize; i++)
cudacall(hipMemcpy(dst[i], C_dflat + (i*n*n), n*n*sizeof(float), hipMemcpyDeviceToHost));
hipFree(A_d); hipFree(A_dflat); free(A);
hipFree(C_d); hipFree(C_dflat); free(C);
hipFree(P); hipFree(INFO); hipblasDestroy(handle);
}
void invert(float** src, float** dst, int n, int batchSize)
{
hipblasHandle_t handle;
cublascall(hipblasCreate(&handle));
int *P, *INFO;
int lda = n;
float **A = (float **)malloc(batchSize*sizeof(float *));
float **A_d, *A_dflat;
cudacall(hipMalloc(&A_d,batchSize*sizeof(float *)));
cudacall(hipMalloc(&A_dflat, n*n*batchSize*sizeof(float)));
A[0] = A_dflat;
for (int i = 1; i < batchSize; i++)
A[i] = A[i-1]+(n*n);
cudacall(hipMemcpy(A_d,A,batchSize*sizeof(float *),hipMemcpyHostToDevice));
for (int i = 0; i < batchSize; i++)
cudacall(hipMemcpy(A_dflat+(i*n*n), src[i], n*n*sizeof(float), hipMemcpyHostToDevice));
// for second call
float **C = (float **)malloc(batchSize*sizeof(float *));
float **C_d, *C_dflat;
cudacall(hipMalloc(&C_d,batchSize*sizeof(float *)));
cudacall(hipMalloc(&C_dflat, n*n*batchSize*sizeof(float)));
C[0] = C_dflat;
for (int i = 1; i < batchSize; i++)
C[i] = C[i-1] + (n*n);
cudacall(hipMemcpy(C_d,C,batchSize*sizeof(float *),hipMemcpyHostToDevice));
int INFOh[batchSize];
{
hipDeviceSynchronize();
int64_t elapsed, aft, bef = get_wall_time();
cudacall(hipMalloc(&P, n * batchSize * sizeof(int)));
cudacall(hipMalloc(&INFO, batchSize * sizeof(int)));
const unsigned int block_size = 256;
const unsigned int num_blocks = (batchSize + block_size - 1) / block_size;
hipLaunchKernelGGL(( writeShapeKernel), dim3(num_blocks),dim3(block_size), 0, 0, A_d, A_dflat, n, batchSize);
cublascall(hipblasSgetrfBatched(handle,n,A_d,lda,P,INFO,batchSize));
#if 0
cudacall(hipMemcpy(INFOh,INFO,batchSize*sizeof(int),hipMemcpyDeviceToHost));
for (int i = 0; i < batchSize; i++) {
if(INFOh[i] != 0)
{
fprintf(stderr, "Factorization of matrix %d Failed: Matrix may be singular\n", i);
hipDeviceReset();
exit(EXIT_FAILURE);
}
}
#endif
hipLaunchKernelGGL(( writeShapeKernel), dim3(num_blocks),dim3(block_size), 0, 0, C_d, C_dflat, n, batchSize);
cublascall(hipblasSgetriBatched(handle,n,(const float **)A_d,lda,P,C_d,lda,INFO,batchSize));
hipDeviceSynchronize();
aft = get_wall_time();
elapsed = aft - bef;
printf("%lds\n", elapsed);
}
cudacall(hipMemcpy(INFOh,INFO,batchSize*sizeof(int),hipMemcpyDeviceToHost));
for (int i = 0; i < batchSize; i++)
if(INFOh[i] != 0)
{
fprintf(stderr, "Inversion of matrix %d Failed: Matrix may be singular\n", i);
hipDeviceReset();
exit(EXIT_FAILURE);
}
for (int i = 0; i < batchSize; i++)
cudacall(hipMemcpy(dst[i], C_dflat + (i*n*n), n*n*sizeof(float), hipMemcpyDeviceToHost));
hipFree(A_d); hipFree(A_dflat); free(A);
hipFree(C_d); hipFree(C_dflat); free(C);
hipFree(P); hipFree(INFO); hipblasDestroy(handle);
}
float** mkRandData(int K, int M) {
float **inputs = (float **)malloc(M*sizeof(float *));
for(int i=0; i<M; i++) {
float* mat = (float*)malloc(K*K*sizeof(float));
for(int k=0; k<K*K; k++) {
mat[k] = (rand() / (float)RAND_MAX) * 1000.0;
}
inputs[i] = mat;
}
return inputs;
}
void test_invert(const int mybatch, const int n)
{
#if 0
const int n = 8; //3; //8;
const int mybatch = 111556; //16384*4; //4; //16384;
//Random matrix with full pivots
float full_pivot[n*n] = { 0.5, 3, 4,
1, 3, 10,
4 , 9, 16 };
//Almost same as above matrix with first pivot zero
float zero_pivot[n*n] = { 0, 3, 4,
1, 3, 10,
4 , 9, 16 };
float another_zero_pivot[n*n] = { 0, 3, 4,
1, 5, 6,
9, 8, 2 };
float another_full_pivot[n * n] = { 22, 3, 4,
1, 5, 6,
9, 8, 2 };
float **inputs = (float **)malloc(mybatch*sizeof(float *));
inputs[0] = zero_pivot;
inputs[1] = full_pivot;
inputs[2] = another_zero_pivot;
inputs[3] = another_full_pivot;
#else
float** inputs = mkRandData(n, mybatch);
#endif
float *result_flat = (float *)malloc(mybatch*n*n*sizeof(float));
float **results = (float **)malloc(mybatch*sizeof(float *));
for (int i = 0; i < mybatch; i++)
results[i] = result_flat + (i*n*n);
#if 0
for (int qq = 0; qq < mybatch; qq++){
fprintf(stdout, "Input %d:\n\n", qq);
for(int i=0; i<n; i++)
{
for(int j=0; j<n; j++)
fprintf(stdout,"%f\t",inputs[qq][i*n+j]);
fprintf(stdout,"\n");
}
}
fprintf(stdout,"\n\n");
#endif
invert(inputs, results, n, mybatch);
#if 0
for (int qq = 0; qq < mybatch; qq++){
fprintf(stdout, "Inverse %d:\n\n", qq);
for(int i=0; i<n; i++)
{
for(int j=0; j<n; j++)
fprintf(stdout,"%f\t",results[qq][i*n+j]);
fprintf(stdout,"\n");
}
}
#endif
}
int main(int argc, char** argv)
{
if(argc != 3) {
printf("Mat-Inv expects 2 arguments:\n");
printf("(1) the size of the batch\n");
printf("(2) the dimension K of the KxK matrix\n");
exit(0);
}
int32_t M = atoi(argv[1]);
int32_t K = atoi(argv[2]);
test_invert(M, K);
return 0;
}
| 6781406995bb2b16ad38749d67556686a3abab3e.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <cublas_v2.h>
#define cudacall(call) \
do \
{ \
cudaError_t err = (call); \
if(cudaSuccess != err) \
{ \
fprintf(stderr,"CUDA Error:\nFile = %s\nLine = %d\nReason = %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); \
cudaDeviceReset(); \
exit(EXIT_FAILURE); \
} \
} \
while (0)
#define cublascall(call) \
do \
{ \
cublasStatus_t status = (call); \
if(CUBLAS_STATUS_SUCCESS != status) \
{ \
fprintf(stderr,"CUBLAS Error:\nFile = %s\nLine = %d\nCode = %d\n", __FILE__, __LINE__, status); \
cudaDeviceReset(); \
exit(EXIT_FAILURE); \
} \
\
} \
while(0)
__global__ void
writeShapeKernel(float** Ashp, float* Aflat, int n, int M) {
const unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x;
if(gid < M) {
Ashp[gid] = Aflat + gid*n*n;
}
}
static int64_t get_wall_time(void) {
struct timeval time;
gettimeofday(&time,NULL);
return time.tv_sec * 1000000 + time.tv_usec;
}
void invert0(float** src, float** dst, int n, int batchSize)
{
cublasHandle_t handle;
cublascall(cublasCreate_v2(&handle));
int *P, *INFO;
cudacall(cudaMalloc(&P, n * batchSize * sizeof(int)));
cudacall(cudaMalloc(&INFO, batchSize * sizeof(int)));
int lda = n;
float **A = (float **)malloc(batchSize*sizeof(float *));
float **A_d, *A_dflat;
cudacall(cudaMalloc(&A_d,batchSize*sizeof(float *)));
cudacall(cudaMalloc(&A_dflat, n*n*batchSize*sizeof(float)));
A[0] = A_dflat;
for (int i = 1; i < batchSize; i++)
A[i] = A[i-1]+(n*n);
cudacall(cudaMemcpy(A_d,A,batchSize*sizeof(float *),cudaMemcpyHostToDevice));
for (int i = 0; i < batchSize; i++)
cudacall(cudaMemcpy(A_dflat+(i*n*n), src[i], n*n*sizeof(float), cudaMemcpyHostToDevice));
cublascall(cublasSgetrfBatched(handle,n,A_d,lda,P,INFO,batchSize));
int INFOh[batchSize];
cudacall(cudaMemcpy(INFOh,INFO,batchSize*sizeof(int),cudaMemcpyDeviceToHost));
for (int i = 0; i < batchSize; i++)
if(INFOh[i] != 0)
{
fprintf(stderr, "Factorization of matrix %d Failed: Matrix may be singular\n", i);
cudaDeviceReset();
exit(EXIT_FAILURE);
}
float **C = (float **)malloc(batchSize*sizeof(float *));
float **C_d, *C_dflat;
cudacall(cudaMalloc(&C_d,batchSize*sizeof(float *)));
cudacall(cudaMalloc(&C_dflat, n*n*batchSize*sizeof(float)));
C[0] = C_dflat;
for (int i = 1; i < batchSize; i++)
C[i] = C[i-1] + (n*n);
cudacall(cudaMemcpy(C_d,C,batchSize*sizeof(float *),cudaMemcpyHostToDevice));
cublascall(cublasSgetriBatched(handle,n,(const float **)A_d,lda,P,C_d,lda,INFO,batchSize));
cudacall(cudaMemcpy(INFOh,INFO,batchSize*sizeof(int),cudaMemcpyDeviceToHost));
for (int i = 0; i < batchSize; i++)
if(INFOh[i] != 0)
{
fprintf(stderr, "Inversion of matrix %d Failed: Matrix may be singular\n", i);
cudaDeviceReset();
exit(EXIT_FAILURE);
}
for (int i = 0; i < batchSize; i++)
cudacall(cudaMemcpy(dst[i], C_dflat + (i*n*n), n*n*sizeof(float), cudaMemcpyDeviceToHost));
cudaFree(A_d); cudaFree(A_dflat); free(A);
cudaFree(C_d); cudaFree(C_dflat); free(C);
cudaFree(P); cudaFree(INFO); cublasDestroy_v2(handle);
}
void invert(float** src, float** dst, int n, int batchSize)
{
cublasHandle_t handle;
cublascall(cublasCreate_v2(&handle));
int *P, *INFO;
int lda = n;
float **A = (float **)malloc(batchSize*sizeof(float *));
float **A_d, *A_dflat;
cudacall(cudaMalloc(&A_d,batchSize*sizeof(float *)));
cudacall(cudaMalloc(&A_dflat, n*n*batchSize*sizeof(float)));
A[0] = A_dflat;
for (int i = 1; i < batchSize; i++)
A[i] = A[i-1]+(n*n);
cudacall(cudaMemcpy(A_d,A,batchSize*sizeof(float *),cudaMemcpyHostToDevice));
for (int i = 0; i < batchSize; i++)
cudacall(cudaMemcpy(A_dflat+(i*n*n), src[i], n*n*sizeof(float), cudaMemcpyHostToDevice));
// for second call
float **C = (float **)malloc(batchSize*sizeof(float *));
float **C_d, *C_dflat;
cudacall(cudaMalloc(&C_d,batchSize*sizeof(float *)));
cudacall(cudaMalloc(&C_dflat, n*n*batchSize*sizeof(float)));
C[0] = C_dflat;
for (int i = 1; i < batchSize; i++)
C[i] = C[i-1] + (n*n);
cudacall(cudaMemcpy(C_d,C,batchSize*sizeof(float *),cudaMemcpyHostToDevice));
int INFOh[batchSize];
{
cudaDeviceSynchronize();
int64_t elapsed, aft, bef = get_wall_time();
cudacall(cudaMalloc(&P, n * batchSize * sizeof(int)));
cudacall(cudaMalloc(&INFO, batchSize * sizeof(int)));
const unsigned int block_size = 256;
const unsigned int num_blocks = (batchSize + block_size - 1) / block_size;
writeShapeKernel<<<num_blocks,block_size>>>(A_d, A_dflat, n, batchSize);
cublascall(cublasSgetrfBatched(handle,n,A_d,lda,P,INFO,batchSize));
#if 0
cudacall(cudaMemcpy(INFOh,INFO,batchSize*sizeof(int),cudaMemcpyDeviceToHost));
for (int i = 0; i < batchSize; i++) {
if(INFOh[i] != 0)
{
fprintf(stderr, "Factorization of matrix %d Failed: Matrix may be singular\n", i);
cudaDeviceReset();
exit(EXIT_FAILURE);
}
}
#endif
writeShapeKernel<<<num_blocks,block_size>>>(C_d, C_dflat, n, batchSize);
cublascall(cublasSgetriBatched(handle,n,(const float **)A_d,lda,P,C_d,lda,INFO,batchSize));
cudaDeviceSynchronize();
aft = get_wall_time();
elapsed = aft - bef;
printf("%ldμs\n", elapsed);
}
cudacall(cudaMemcpy(INFOh,INFO,batchSize*sizeof(int),cudaMemcpyDeviceToHost));
for (int i = 0; i < batchSize; i++)
if(INFOh[i] != 0)
{
fprintf(stderr, "Inversion of matrix %d Failed: Matrix may be singular\n", i);
cudaDeviceReset();
exit(EXIT_FAILURE);
}
for (int i = 0; i < batchSize; i++)
cudacall(cudaMemcpy(dst[i], C_dflat + (i*n*n), n*n*sizeof(float), cudaMemcpyDeviceToHost));
cudaFree(A_d); cudaFree(A_dflat); free(A);
cudaFree(C_d); cudaFree(C_dflat); free(C);
cudaFree(P); cudaFree(INFO); cublasDestroy_v2(handle);
}
float** mkRandData(int K, int M) {
float **inputs = (float **)malloc(M*sizeof(float *));
for(int i=0; i<M; i++) {
float* mat = (float*)malloc(K*K*sizeof(float));
for(int k=0; k<K*K; k++) {
mat[k] = (rand() / (float)RAND_MAX) * 1000.0;
}
inputs[i] = mat;
}
return inputs;
}
void test_invert(const int mybatch, const int n)
{
#if 0
const int n = 8; //3; //8;
const int mybatch = 111556; //16384*4; //4; //16384;
//Random matrix with full pivots
float full_pivot[n*n] = { 0.5, 3, 4,
1, 3, 10,
4 , 9, 16 };
//Almost same as above matrix with first pivot zero
float zero_pivot[n*n] = { 0, 3, 4,
1, 3, 10,
4 , 9, 16 };
float another_zero_pivot[n*n] = { 0, 3, 4,
1, 5, 6,
9, 8, 2 };
float another_full_pivot[n * n] = { 22, 3, 4,
1, 5, 6,
9, 8, 2 };
float **inputs = (float **)malloc(mybatch*sizeof(float *));
inputs[0] = zero_pivot;
inputs[1] = full_pivot;
inputs[2] = another_zero_pivot;
inputs[3] = another_full_pivot;
#else
float** inputs = mkRandData(n, mybatch);
#endif
float *result_flat = (float *)malloc(mybatch*n*n*sizeof(float));
float **results = (float **)malloc(mybatch*sizeof(float *));
for (int i = 0; i < mybatch; i++)
results[i] = result_flat + (i*n*n);
#if 0
for (int qq = 0; qq < mybatch; qq++){
fprintf(stdout, "Input %d:\n\n", qq);
for(int i=0; i<n; i++)
{
for(int j=0; j<n; j++)
fprintf(stdout,"%f\t",inputs[qq][i*n+j]);
fprintf(stdout,"\n");
}
}
fprintf(stdout,"\n\n");
#endif
invert(inputs, results, n, mybatch);
#if 0
for (int qq = 0; qq < mybatch; qq++){
fprintf(stdout, "Inverse %d:\n\n", qq);
for(int i=0; i<n; i++)
{
for(int j=0; j<n; j++)
fprintf(stdout,"%f\t",results[qq][i*n+j]);
fprintf(stdout,"\n");
}
}
#endif
}
int main(int argc, char** argv)
{
if(argc != 3) {
printf("Mat-Inv expects 2 arguments:\n");
printf("(1) the size of the batch\n");
printf("(2) the dimension K of the KxK matrix\n");
exit(0);
}
int32_t M = atoi(argv[1]);
int32_t K = atoi(argv[2]);
test_invert(M, K);
return 0;
}
|
3ca9e9d2dce12b559c49f96ef9a7bbfabdff5432.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by qinbin on 2018/5/9.
//
#include "thundergbm/hist_cut.h"
#include "thundergbm/quantile_sketch.h"
#include "thundergbm/syncarray.h"
#include <sstream>
#include <omp.h>
#include <thundergbm/hist_cut.h>
#include "thundergbm/util/device_lambda.cuh"
#include "thrust/unique.h"
void HistCut::get_cut_points(SparseColumns &columns, InsStat &stats, int max_num_bins, int n_instances) {
LOG(TRACE) << "get cut points";
LOG(DEBUG) << "val = " << columns.csc_val;
LOG(DEBUG) << "idx = " << columns.csc_row_idx;
LOG(DEBUG) << "ptr = " << columns.csc_col_ptr;
int n_features = columns.n_column;
// std::cout<<"n_featrues:"<<n_features<<std::endl;
vector<quanSketch> sketchs(n_features);
//kFactor times more cut point candidates are considered when building the summary.
const int kFactor = 8;
for (int i = 0; i < n_features; i++) {
sketchs[i].Init(n_instances, 1.0 / (max_num_bins * kFactor));
}
float_type *val_ptr = columns.csc_val.host_data();
int *row_ptr = columns.csc_row_idx.host_data();
int *col_ptr = columns.csc_col_ptr.host_data();
auto stat_gh_ptr = stats.gh_pair.host_data();
// std::cout<<"before add"<<std::endl;
#pragma omp parallel for
for (int i = 0; i < columns.csc_col_ptr.size() - 1; i++) {
for (int j = col_ptr[i + 1] - 1; j >= col_ptr[i]; j--) {
float_type val = val_ptr[j];
float_type weight = stat_gh_ptr[row_ptr[j]].h;
sketchs[i].Add(val, weight);
}
}
// std::cout<<"after add"<<std::endl;
vector<summary> n_summary(n_features);
// summary n_summary[n_features];
// std::cout<<"before prune"<<std::endl;
#pragma omp parallel for
for (int i = 0; i < n_features; i++) {
summary ts;
sketchs[i].GetSummary(ts);
n_summary[i].Reserve(max_num_bins * kFactor);
n_summary[i].Prune(ts, max_num_bins * kFactor);
}
int nthread = omp_get_max_threads();
// LOG(DEBUG)<<"nthread = " << nthread;
vector<vector<float_type>> cut_points_local;
cut_points_local.resize(n_features);
vector<int> cut_points_size(n_features);
for (int i = 0; i < n_features; i++)
cut_points_local[i].resize(max_num_bins);
#pragma omp parallel num_threads(nthread)
{
int tid = omp_get_thread_num();
// LOG(DEBUG)<<"tid = "<< tid;
int nstep = (n_features + nthread - 1) / nthread;
int sbegin = ::min(tid * nstep, n_features);
int send = ::min((tid + 1) * nstep, n_features);
for (int i = sbegin; i < send; i++) {
int k = 0;
summary ts;
ts.Reserve(max_num_bins);
ts.Prune(n_summary[i], max_num_bins);
if (ts.entry_size == 0) {
cut_points_size[i] = 0;
continue;
}
float_type min_val = ts.entries[0].val;
cut_points_local[i][k++] = min_val - (fabsf(min_val) + 1e-5);
if (ts.entry_size > 1 && ts.entry_size <= 16) {
cut_points_local[i][k++] = (ts.entries[0].val + ts.entries[1].val) / 2;
for (int j = 2; j < ts.entry_size; j++) {
float_type mid = (ts.entries[j - 1].val + ts.entries[j].val) / 2;
if (mid > cut_points_local[i][k - 1]) {
cut_points_local[i][k++] = mid;
}
}
} else {
if (ts.entry_size > 1)
cut_points_local[i][k++] = ts.entries[1].val;
for (int j = 2; j < ts.entry_size; j++) {
float_type val = ts.entries[j].val;
if (val > cut_points_local[i][k - 1]) {
cut_points_local[i][k++] = val;
}
}
}
/*
float_type max_val = ts.entries[ts.entry_size - 1].val;
if(max_val > 0){
cut_points_local[i][k++] = max_val*2 + 1e-5;
}
else{
cut_points_local[i][k++] = 1e-5;
}
*/
cut_points_size[i] = k;
}
}
for (int i = 0; i < n_features; i++) {
if (cut_points_size[i] != 0)
this->cut_points.insert(cut_points.end(), cut_points_local[i].begin(),
cut_points_local[i].begin() + cut_points_size[i]);
}
this->row_ptr.push_back(0);
for (int i = 0; i < n_features; i++) {
this->row_ptr.push_back(cut_points_size[i] + this->row_ptr.back());
}
cut_row_ptr.resize(this->row_ptr.size());
cut_row_ptr.copy_from(this->row_ptr.data(), this->row_ptr.size());
cut_points_val.resize(this->cut_points.size());
auto cut_points_val_ptr = cut_points_val.host_data();
auto cut_row_ptr_data = cut_row_ptr.host_data();
for (int i = 0; i < cut_row_ptr.size(); i++) {
int sum = cut_row_ptr_data[i] + cut_row_ptr_data[i + 1] - 1;
for (int j = cut_row_ptr_data[i + 1] - 1; j >= cut_row_ptr_data[i]; j--)
cut_points_val_ptr[j] = this->cut_points[sum - j];
}
LOG(DEBUG) << cut_row_ptr;
LOG(DEBUG) << cut_fid.size();
cut_fid.resize(cut_points.size());
auto cut_fid_data = cut_fid.device_data();
device_loop_2d(n_features, cut_row_ptr.device_data(), [=] __device__(int fid, int i) {
cut_fid_data[i] = fid;
});
}
void HistCut::get_cut_points2(SparseColumns &columns, int max_num_bins, int n_instances) {
int n_column = columns.n_column;
auto csc_val_data = columns.csc_val.host_data();
auto csc_col_ptr_data = columns.csc_col_ptr.host_data();
cut_points.clear();
row_ptr.clear();
row_ptr.resize(1, 0);
//TODO do this on GPU
for (int fid = 0; fid < n_column; ++fid) {
int col_start = csc_col_ptr_data[fid];
int col_len = csc_col_ptr_data[fid + 1] - col_start;
auto val_data = csc_val_data + col_start;
vector<float_type> unique_val(col_len);
int unique_len = thrust::unique_copy(thrust::host, val_data, val_data + col_len, unique_val.data()) - unique_val.data();
if (unique_len <= max_num_bins) {
row_ptr.push_back(unique_len + row_ptr.back());
for (int i = 0; i < unique_len; ++i) {
cut_points.push_back(unique_val[i]);
}
} else {
row_ptr.push_back(max_num_bins + row_ptr.back());
for (int i = 0; i < max_num_bins; ++i) {
cut_points.push_back(unique_val[unique_len / max_num_bins * i]);
}
}
}
cut_points_val.resize(cut_points.size());
cut_points_val.copy_from(cut_points.data(), cut_points.size());
cut_row_ptr.resize(row_ptr.size());
cut_row_ptr.copy_from(row_ptr.data(), row_ptr.size());
cut_fid.resize(cut_points.size());
auto cut_fid_data = cut_fid.device_data();
device_loop_2d(n_column, cut_row_ptr.device_data(), [=] __device__(int fid, int i) {
cut_fid_data[i] = fid;
});
}
| 3ca9e9d2dce12b559c49f96ef9a7bbfabdff5432.cu | //
// Created by qinbin on 2018/5/9.
//
#include "thundergbm/hist_cut.h"
#include "thundergbm/quantile_sketch.h"
#include "thundergbm/syncarray.h"
#include <sstream>
#include <omp.h>
#include <thundergbm/hist_cut.h>
#include "thundergbm/util/device_lambda.cuh"
#include "thrust/unique.h"
void HistCut::get_cut_points(SparseColumns &columns, InsStat &stats, int max_num_bins, int n_instances) {
LOG(TRACE) << "get cut points";
LOG(DEBUG) << "val = " << columns.csc_val;
LOG(DEBUG) << "idx = " << columns.csc_row_idx;
LOG(DEBUG) << "ptr = " << columns.csc_col_ptr;
int n_features = columns.n_column;
// std::cout<<"n_featrues:"<<n_features<<std::endl;
vector<quanSketch> sketchs(n_features);
//kFactor times more cut point candidates are considered when building the summary.
const int kFactor = 8;
for (int i = 0; i < n_features; i++) {
sketchs[i].Init(n_instances, 1.0 / (max_num_bins * kFactor));
}
float_type *val_ptr = columns.csc_val.host_data();
int *row_ptr = columns.csc_row_idx.host_data();
int *col_ptr = columns.csc_col_ptr.host_data();
auto stat_gh_ptr = stats.gh_pair.host_data();
// std::cout<<"before add"<<std::endl;
#pragma omp parallel for
for (int i = 0; i < columns.csc_col_ptr.size() - 1; i++) {
for (int j = col_ptr[i + 1] - 1; j >= col_ptr[i]; j--) {
float_type val = val_ptr[j];
float_type weight = stat_gh_ptr[row_ptr[j]].h;
sketchs[i].Add(val, weight);
}
}
// std::cout<<"after add"<<std::endl;
vector<summary> n_summary(n_features);
// summary n_summary[n_features];
// std::cout<<"before prune"<<std::endl;
#pragma omp parallel for
for (int i = 0; i < n_features; i++) {
summary ts;
sketchs[i].GetSummary(ts);
n_summary[i].Reserve(max_num_bins * kFactor);
n_summary[i].Prune(ts, max_num_bins * kFactor);
}
int nthread = omp_get_max_threads();
// LOG(DEBUG)<<"nthread = " << nthread;
vector<vector<float_type>> cut_points_local;
cut_points_local.resize(n_features);
vector<int> cut_points_size(n_features);
for (int i = 0; i < n_features; i++)
cut_points_local[i].resize(max_num_bins);
#pragma omp parallel num_threads(nthread)
{
int tid = omp_get_thread_num();
// LOG(DEBUG)<<"tid = "<< tid;
int nstep = (n_features + nthread - 1) / nthread;
int sbegin = std::min(tid * nstep, n_features);
int send = std::min((tid + 1) * nstep, n_features);
for (int i = sbegin; i < send; i++) {
int k = 0;
summary ts;
ts.Reserve(max_num_bins);
ts.Prune(n_summary[i], max_num_bins);
if (ts.entry_size == 0) {
cut_points_size[i] = 0;
continue;
}
float_type min_val = ts.entries[0].val;
cut_points_local[i][k++] = min_val - (fabsf(min_val) + 1e-5);
if (ts.entry_size > 1 && ts.entry_size <= 16) {
cut_points_local[i][k++] = (ts.entries[0].val + ts.entries[1].val) / 2;
for (int j = 2; j < ts.entry_size; j++) {
float_type mid = (ts.entries[j - 1].val + ts.entries[j].val) / 2;
if (mid > cut_points_local[i][k - 1]) {
cut_points_local[i][k++] = mid;
}
}
} else {
if (ts.entry_size > 1)
cut_points_local[i][k++] = ts.entries[1].val;
for (int j = 2; j < ts.entry_size; j++) {
float_type val = ts.entries[j].val;
if (val > cut_points_local[i][k - 1]) {
cut_points_local[i][k++] = val;
}
}
}
/*
float_type max_val = ts.entries[ts.entry_size - 1].val;
if(max_val > 0){
cut_points_local[i][k++] = max_val*2 + 1e-5;
}
else{
cut_points_local[i][k++] = 1e-5;
}
*/
cut_points_size[i] = k;
}
}
for (int i = 0; i < n_features; i++) {
if (cut_points_size[i] != 0)
this->cut_points.insert(cut_points.end(), cut_points_local[i].begin(),
cut_points_local[i].begin() + cut_points_size[i]);
}
this->row_ptr.push_back(0);
for (int i = 0; i < n_features; i++) {
this->row_ptr.push_back(cut_points_size[i] + this->row_ptr.back());
}
cut_row_ptr.resize(this->row_ptr.size());
cut_row_ptr.copy_from(this->row_ptr.data(), this->row_ptr.size());
cut_points_val.resize(this->cut_points.size());
auto cut_points_val_ptr = cut_points_val.host_data();
auto cut_row_ptr_data = cut_row_ptr.host_data();
for (int i = 0; i < cut_row_ptr.size(); i++) {
int sum = cut_row_ptr_data[i] + cut_row_ptr_data[i + 1] - 1;
for (int j = cut_row_ptr_data[i + 1] - 1; j >= cut_row_ptr_data[i]; j--)
cut_points_val_ptr[j] = this->cut_points[sum - j];
}
LOG(DEBUG) << cut_row_ptr;
LOG(DEBUG) << cut_fid.size();
cut_fid.resize(cut_points.size());
auto cut_fid_data = cut_fid.device_data();
device_loop_2d(n_features, cut_row_ptr.device_data(), [=] __device__(int fid, int i) {
cut_fid_data[i] = fid;
});
}
void HistCut::get_cut_points2(SparseColumns &columns, int max_num_bins, int n_instances) {
int n_column = columns.n_column;
auto csc_val_data = columns.csc_val.host_data();
auto csc_col_ptr_data = columns.csc_col_ptr.host_data();
cut_points.clear();
row_ptr.clear();
row_ptr.resize(1, 0);
//TODO do this on GPU
for (int fid = 0; fid < n_column; ++fid) {
int col_start = csc_col_ptr_data[fid];
int col_len = csc_col_ptr_data[fid + 1] - col_start;
auto val_data = csc_val_data + col_start;
vector<float_type> unique_val(col_len);
int unique_len = thrust::unique_copy(thrust::host, val_data, val_data + col_len, unique_val.data()) - unique_val.data();
if (unique_len <= max_num_bins) {
row_ptr.push_back(unique_len + row_ptr.back());
for (int i = 0; i < unique_len; ++i) {
cut_points.push_back(unique_val[i]);
}
} else {
row_ptr.push_back(max_num_bins + row_ptr.back());
for (int i = 0; i < max_num_bins; ++i) {
cut_points.push_back(unique_val[unique_len / max_num_bins * i]);
}
}
}
cut_points_val.resize(cut_points.size());
cut_points_val.copy_from(cut_points.data(), cut_points.size());
cut_row_ptr.resize(row_ptr.size());
cut_row_ptr.copy_from(row_ptr.data(), row_ptr.size());
cut_fid.resize(cut_points.size());
auto cut_fid_data = cut_fid.device_data();
device_loop_2d(n_column, cut_row_ptr.device_data(), [=] __device__(int fid, int i) {
cut_fid_data[i] = fid;
});
}
|
a5ad2a4497db8c01d56fe08b74f945a57adef0a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Matrix Vector multiplication without a barrier at the end of execution to wait for the tasks to finish before copying the results back.
*/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
//Grid dimension
#define B 100
//Block dimension
#define T 256
//Array size
#define C B*T
// Iteration number
#define N 100
// Macro for checking errors in CUDA API calls
#define cudaErrorCheck(call) \
do{ \
hipError_t cuErr = call; \
if(hipSuccess != cuErr){ \
printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(cuErr));\
exit(0); \
} \
}while(0)
//Host pointer for matrix b, input vector a and result vector c
int *a;
int *b;
int *c;
//Device pointer for matrix d_b, input vector d_a and result vector d_c
int *d_a;
int *d_b;
int *d_c;
//Initialization and allocation of the host variables
int init(){
//Allocating host variables
a = (int *) malloc(C*sizeof(int));
b = (int *) malloc(C*C*sizeof(int));
//c = (int *) malloc(C*sizeof(int));
cudaErrorCheck(hipHostMalloc(&c, C*sizeof(int)));
//Initialize host values
for(int i=0; i<C; i++){
for(int j=0; j<C; j++){
b[j+i*C]=1;
}
a[i]=1;
c[i]=0;
}
return 0;
}
//Kernel
__global__ void Mult(int* d_a, int* d_b, int* d_c){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for(int i=0; i<N; i++){
d_c[tid] = 0;
for(int j=0; j<C; j++){
d_c[tid]+=d_b[j+tid*C]*d_a[j];
}
}
}
//Checking if the values stored in c are correct
int check(){
bool test = false;
for(int i=C-1; i>=0; i--){
//printf("c[%i]:%i\n",i,c[i]);
if(c[i]!=C){
test = true;
}
}
printf("Memory Access Issue visible: %s\n",test ? "true\n" : "false\n");
return 0;
}
//Initialization of the variables on the GPU
int initcuda(){
//Allocation of GPU memory for d_a,d_b,d_c
cudaErrorCheck( hipMalloc(&d_a, C*sizeof(int)));
cudaErrorCheck( hipMalloc(&d_b, C*C*sizeof(int)));
cudaErrorCheck( hipMalloc(&d_c, C*sizeof(int)));
//Copying the array a and the matrix b from the host to the array d_a and the matrix d_b on the device
cudaErrorCheck( hipMemcpy(d_a,a,C*sizeof(int),hipMemcpyHostToDevice));
cudaErrorCheck( hipMemcpy(d_b,b,C*C*sizeof(int),hipMemcpyHostToDevice));
return 0;
}
//Main programm
int main(){
//Calling the initialization methods
init();
initcuda();
//Launch Kernel
//Mult<<<B,T,0,stream1>>>(d_a,d_b,d_c);
hipLaunchKernelGGL(( Mult), dim3(B),dim3(T), 0, 0, d_a,d_b,d_c);
// Check for errors in kernel launch (e.g. invalid execution configuration paramters)
cudaErrorCheck( hipGetLastError());
// Check for errors on the GPU after control is returned to CPU
cudaErrorCheck( hipDeviceSynchronize());
//Copying back half the result d_c from the device to the host array c
//cudaErrorCheck(hipMemcpyAsync(c,d_c,C*sizeof(int),hipMemcpyDeviceToHost,stream2));
cudaErrorCheck(hipMemcpyAsync(c,d_c,C*sizeof(int),hipMemcpyDeviceToHost));
//Verify result
check();
//Freeing GPU memory
cudaErrorCheck( hipFree(d_a));
cudaErrorCheck( hipFree(d_b));
cudaErrorCheck( hipFree(d_c));
//Freeing CPU memory
free(a);
free(b);
//free(c);
cudaErrorCheck(hipHostFree(c));
return 0;
} | a5ad2a4497db8c01d56fe08b74f945a57adef0a2.cu | /*
Matrix Vector multiplication without a barrier at the end of execution to wait for the tasks to finish before copying the results back.
*/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
//Grid dimension
#define B 100
//Block dimension
#define T 256
//Array size
#define C B*T
// Iteration number
#define N 100
// Macro for checking errors in CUDA API calls
#define cudaErrorCheck(call) \
do{ \
cudaError_t cuErr = call; \
if(cudaSuccess != cuErr){ \
printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErr));\
exit(0); \
} \
}while(0)
//Host pointer for matrix b, input vector a and result vector c
int *a;
int *b;
int *c;
//Device pointer for matrix d_b, input vector d_a and result vector d_c
int *d_a;
int *d_b;
int *d_c;
//Initialization and allocation of the host variables
int init(){
//Allocating host variables
a = (int *) malloc(C*sizeof(int));
b = (int *) malloc(C*C*sizeof(int));
//c = (int *) malloc(C*sizeof(int));
cudaErrorCheck(cudaMallocHost(&c, C*sizeof(int)));
//Initialize host values
for(int i=0; i<C; i++){
for(int j=0; j<C; j++){
b[j+i*C]=1;
}
a[i]=1;
c[i]=0;
}
return 0;
}
//Kernel
__global__ void Mult(int* d_a, int* d_b, int* d_c){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for(int i=0; i<N; i++){
d_c[tid] = 0;
for(int j=0; j<C; j++){
d_c[tid]+=d_b[j+tid*C]*d_a[j];
}
}
}
//Checking if the values stored in c are correct
int check(){
bool test = false;
for(int i=C-1; i>=0; i--){
//printf("c[%i]:%i\n",i,c[i]);
if(c[i]!=C){
test = true;
}
}
printf("Memory Access Issue visible: %s\n",test ? "true\n" : "false\n");
return 0;
}
//Initialization of the variables on the GPU
int initcuda(){
//Allocation of GPU memory for d_a,d_b,d_c
cudaErrorCheck( cudaMalloc(&d_a, C*sizeof(int)));
cudaErrorCheck( cudaMalloc(&d_b, C*C*sizeof(int)));
cudaErrorCheck( cudaMalloc(&d_c, C*sizeof(int)));
//Copying the array a and the matrix b from the host to the array d_a and the matrix d_b on the device
cudaErrorCheck( cudaMemcpy(d_a,a,C*sizeof(int),cudaMemcpyHostToDevice));
cudaErrorCheck( cudaMemcpy(d_b,b,C*C*sizeof(int),cudaMemcpyHostToDevice));
return 0;
}
//Main programm
int main(){
//Calling the initialization methods
init();
initcuda();
//Launch Kernel
//Mult<<<B,T,0,stream1>>>(d_a,d_b,d_c);
Mult<<<B,T>>>(d_a,d_b,d_c);
// Check for errors in kernel launch (e.g. invalid execution configuration paramters)
cudaErrorCheck( cudaGetLastError());
// Check for errors on the GPU after control is returned to CPU
cudaErrorCheck( cudaDeviceSynchronize());
//Copying back half the result d_c from the device to the host array c
//cudaErrorCheck(cudaMemcpyAsync(c,d_c,C*sizeof(int),cudaMemcpyDeviceToHost,stream2));
cudaErrorCheck(cudaMemcpyAsync(c,d_c,C*sizeof(int),cudaMemcpyDeviceToHost));
//Verify result
check();
//Freeing GPU memory
cudaErrorCheck( cudaFree(d_a));
cudaErrorCheck( cudaFree(d_b));
cudaErrorCheck( cudaFree(d_c));
//Freeing CPU memory
free(a);
free(b);
//free(c);
cudaErrorCheck(cudaFreeHost(c));
return 0;
} |
0469fa412d1c3968e1507905f5d1e64fc2d834ca.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <helper_cuda.h>
void
transfer_3d(double ***dst, double ***src, int m, int n, int k, int flag)
{
long nPtr = m + m * n;
long nBlk = k * n * m;
// we only transfer the value block
checkCudaErrors( hipMemcpy((double *) dst + nPtr,
(double *) src + nPtr,
nBlk * sizeof(double),
(hipMemcpyKind) flag) );
}
void
transfer_3d_to_1d(double *dst, double ***src, int m, int n, int k, int flag)
{
long nPtr = m + m * n;
long nBlk = k * n * m;
// we only transfer the value block
checkCudaErrors( hipMemcpy((double *) dst,
(double *) src + nPtr,
nBlk * sizeof(double),
(hipMemcpyKind) flag) );
}
void
transfer_3d_from_1d(double ***dst, double *src, int m, int n, int k, int flag)
{
long nPtr = m + m * n;
long nBlk = k * n * m;
// we only transfer the value block
checkCudaErrors( hipMemcpy((double *) dst + nPtr,
(double *) src,
nBlk * sizeof(double),
(hipMemcpyKind) flag) );
}
| 0469fa412d1c3968e1507905f5d1e64fc2d834ca.cu | #include <cuda_runtime_api.h>
#include <helper_cuda.h>
void
transfer_3d(double ***dst, double ***src, int m, int n, int k, int flag)
{
long nPtr = m + m * n;
long nBlk = k * n * m;
// we only transfer the value block
checkCudaErrors( cudaMemcpy((double *) dst + nPtr,
(double *) src + nPtr,
nBlk * sizeof(double),
(cudaMemcpyKind) flag) );
}
void
transfer_3d_to_1d(double *dst, double ***src, int m, int n, int k, int flag)
{
long nPtr = m + m * n;
long nBlk = k * n * m;
// we only transfer the value block
checkCudaErrors( cudaMemcpy((double *) dst,
(double *) src + nPtr,
nBlk * sizeof(double),
(cudaMemcpyKind) flag) );
}
void
transfer_3d_from_1d(double ***dst, double *src, int m, int n, int k, int flag)
{
long nPtr = m + m * n;
long nBlk = k * n * m;
// we only transfer the value block
checkCudaErrors( cudaMemcpy((double *) dst + nPtr,
(double *) src,
nBlk * sizeof(double),
(cudaMemcpyKind) flag) );
}
|
c61f5553c2bcf3e00d2c9ed67e041c5ec2fe232f.hip | // !!! This is a file automatically generated by hipify!!!
// RUN: %run_test hipify "%s" "%t" %cuda_args
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CHECK: #include "hipblas.h"
#include "rocblas.h"
#define M 6
#define N 5
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
static __inline__ void modify(float *m, int ldm, int n, int p, int q, float
alpha, float beta) {
// CHECK: hipblasSscal(n - p, alpha, &m[IDX2C(p, q, ldm)], ldm);
// CHECK: hipblasSscal(ldm - p, beta, &m[IDX2C(p, q, ldm)], 1);
hipblasSscal(n - p, alpha, &m[IDX2C(p, q, ldm)], ldm);
hipblasSscal(ldm - p, beta, &m[IDX2C(p, q, ldm)], 1);
}
int main(void) {
int i, j;
// CHECK: hipblasStatus_t stat;
cublasStatus stat;
float* devPtrA;
float* a = 0;
a = (float *)malloc(M * N * sizeof(*a));
if (!a) {
printf("host memory allocation failed");
return EXIT_FAILURE;
}
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
a[IDX2C(i, j, M)] = (float)(i * M + j + 1);
}
}
// hipblasInit is not supported yet
hipblasInit();
stat = hipblasAlloc(M*N, sizeof(*a), (void**)&devPtrA);
// CHECK: if (stat != HIPBLAS_STATUS_SUCCESS) {
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf("device memory allocation failed");
// hipblasShutdown is not supported yet
hipblasShutdown();
return EXIT_FAILURE;
}
// CHECK: stat = hipblasSetMatrix(M, N, sizeof(*a), a, M, devPtrA, M);
stat = hipblasSetMatrix(M, N, sizeof(*a), a, M, devPtrA, M);
// CHECK: if (stat != HIPBLAS_STATUS_SUCCESS) {
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf("data download failed");
// hipblasFree is not supported yet
hipblasFree(devPtrA);
// hipblasShutdown is not supported yet
hipblasShutdown();
return EXIT_FAILURE;
}
modify(devPtrA, M, N, 1, 2, 16.0f, 12.0f);
// CHECK: stat = hipblasGetMatrix(M, N, sizeof(*a), devPtrA, M, a, M);
stat = hipblasGetMatrix(M, N, sizeof(*a), devPtrA, M, a, M);
// CHECK: if (stat != HIPBLAS_STATUS_SUCCESS) {
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf("data upload failed");
// hipblasFree is not supported yet
hipblasFree(devPtrA);
// hipblasShutdown is not supported yet
hipblasShutdown();
return EXIT_FAILURE;
}
// hipblasFree is not supported yet
hipblasFree(devPtrA);
// hipblasShutdown is not supported yet
hipblasShutdown();
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
printf("%7.0f", a[IDX2C(i, j, M)]);
}
printf("\n");
}
free(a);
return EXIT_SUCCESS;
}
| c61f5553c2bcf3e00d2c9ed67e041c5ec2fe232f.cu | // RUN: %run_test hipify "%s" "%t" %cuda_args
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CHECK: #include "hipblas.h"
#include "cublas.h"
#define M 6
#define N 5
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
static __inline__ void modify(float *m, int ldm, int n, int p, int q, float
alpha, float beta) {
// CHECK: hipblasSscal(n - p, alpha, &m[IDX2C(p, q, ldm)], ldm);
// CHECK: hipblasSscal(ldm - p, beta, &m[IDX2C(p, q, ldm)], 1);
cublasSscal(n - p, alpha, &m[IDX2C(p, q, ldm)], ldm);
cublasSscal(ldm - p, beta, &m[IDX2C(p, q, ldm)], 1);
}
int main(void) {
int i, j;
// CHECK: hipblasStatus_t stat;
cublasStatus stat;
float* devPtrA;
float* a = 0;
a = (float *)malloc(M * N * sizeof(*a));
if (!a) {
printf("host memory allocation failed");
return EXIT_FAILURE;
}
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
a[IDX2C(i, j, M)] = (float)(i * M + j + 1);
}
}
// cublasInit is not supported yet
cublasInit();
stat = cublasAlloc(M*N, sizeof(*a), (void**)&devPtrA);
// CHECK: if (stat != HIPBLAS_STATUS_SUCCESS) {
if (stat != CUBLAS_STATUS_SUCCESS) {
printf("device memory allocation failed");
// cublasShutdown is not supported yet
cublasShutdown();
return EXIT_FAILURE;
}
// CHECK: stat = hipblasSetMatrix(M, N, sizeof(*a), a, M, devPtrA, M);
stat = cublasSetMatrix(M, N, sizeof(*a), a, M, devPtrA, M);
// CHECK: if (stat != HIPBLAS_STATUS_SUCCESS) {
if (stat != CUBLAS_STATUS_SUCCESS) {
printf("data download failed");
// cublasFree is not supported yet
cublasFree(devPtrA);
// cublasShutdown is not supported yet
cublasShutdown();
return EXIT_FAILURE;
}
modify(devPtrA, M, N, 1, 2, 16.0f, 12.0f);
// CHECK: stat = hipblasGetMatrix(M, N, sizeof(*a), devPtrA, M, a, M);
stat = cublasGetMatrix(M, N, sizeof(*a), devPtrA, M, a, M);
// CHECK: if (stat != HIPBLAS_STATUS_SUCCESS) {
if (stat != CUBLAS_STATUS_SUCCESS) {
printf("data upload failed");
// cublasFree is not supported yet
cublasFree(devPtrA);
// cublasShutdown is not supported yet
cublasShutdown();
return EXIT_FAILURE;
}
// cublasFree is not supported yet
cublasFree(devPtrA);
// cublasShutdown is not supported yet
cublasShutdown();
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
printf("%7.0f", a[IDX2C(i, j, M)]);
}
printf("\n");
}
free(a);
return EXIT_SUCCESS;
}
|
50ed237c213015ab8c573b0f815300f074ced2e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void readOffsetUnroll2(float *A, float *B, float *C, const int n, int offset)
{
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[i] = A[k] + B[k];
if (k + blockDim.x < n) {
C[i + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x];
}
} | 50ed237c213015ab8c573b0f815300f074ced2e7.cu | #include "includes.h"
__global__ void readOffsetUnroll2(float *A, float *B, float *C, const int n, int offset)
{
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[i] = A[k] + B[k];
if (k + blockDim.x < n) {
C[i + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x];
}
} |
777a0a84e58b4efccddd254334c82dee06a61fd1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "parse_oo.h"
void initContext(GraphChiContext *context, int vertices, int edges) {
// int tid = blockDim.x * blockIdx.x + threadIdx.x;
context->setNumIterations(0);
context->setNumVertices(vertices);
context->setNumEdges(edges);
}
void initObject(VirtVertex<int, int> *vertex, GraphChiContext *context,
int *row, int *col, int *inrow, int *incol, obj_alloc *alloc) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
// vertex[tid].inEdgeDataArray =
// (ChiEdge<int> *)alloc->my_new<Edge<int>>(indegree);
// vertex[tid].outEdgeDataArray =
// (ChiEdge<int> **)alloc->my_new<Edge<int> *>(outdegree);
new (&vertex[tid]) ChiVertex<int, int>(tid, indegree, outdegree, alloc);
vertex[tid].setValue(INT_MAX);
for (int i = in_start; i < in_end; i++) {
vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
void part0_initObject(VirtVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col, int *inrow, int *incol,
obj_alloc *alloc) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
vertex[tid] =
(VirtVertex<int, int> *)alloc->my_new<ChiVertex<int, int>>();
}
}
void part1_initObject(VirtVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col, int *inrow, int *incol,
obj_alloc *alloc) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
// int out_start = row[tid];
// int out_end;
// if (tid + 1 < context->getNumVertices()) {
// out_end = row[tid + 1];
// } else {
// out_end = context->getNumEdges();
// }
// int in_start = inrow[tid];
// int in_end;
// if (tid + 1 < context->getNumVertices()) {
// in_end = inrow[tid + 1];
// } else {
// in_end = context->getNumEdges();
// }
// int indegree = in_end - in_start;
// int outdegree = out_end - out_start;
// vertex[tid].inEdgeDataArray =
// (ChiEdge<myType> *)alloc->my_new<Edge<myType>>(indegree);
// vertex[tid].outEdgeDataArray =
// (ChiEdge<myType> **)alloc->my_new<Edge<myType> *>(outdegree);
// new (&vertex[tid]) ChiVertex<int, int>(tid, indegree,
// outdegree,alloc);
vertex[tid]->set_in_out(alloc);
// vertex[tid].setValue(INT_MAX);
// for (int i = in_start; i < in_end; i++) {
// vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
// }
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
__global__ void part_kern0_initObject(VirtVertex<int, int> **vertex,
GraphChiContext *context, int *row,
int *col, int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
new (vertex[tid]) ChiVertex<int, int>(tid, indegree, outdegree);
// vertex[tid].setValue(INT_MAX);
// for (int i = in_start; i < in_end; i++) {
// vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
// }
}
}
__global__ void part_kern1_initObject(VirtVertex<int, int> **vertex,
GraphChiContext *context, int *row,
int *col, int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
// int out_start = row[tid];
// int out_end;
// if (tid + 1 < context->getNumVertices()) {
// out_end = row[tid + 1];
// } else {
// out_end = context->getNumEdges();
// }
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
vertex[tid]->setValue(INT_MAX);
for (int i = in_start; i < in_end; i++) {
vertex[tid]->setInEdge(i - in_start, incol[i], INT_MAX);
}
}
}
void initOutEdge(VirtVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i],
INT_MAX);
}
}
}
__global__ void kern_initObject(VirtVertex<int, int> *vertex,
GraphChiContext *context, int *row, int *col,
int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
new (&vertex[tid]) ChiVertex<int, int>(tid, indegree, outdegree);
vertex[tid].setValue(INT_MAX);
for (int i = in_start; i < in_end; i++) {
vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
}
//}
}
__global__ void kern_initOutEdge(VirtVertex<int, int> **vertex,
GraphChiContext *context, int *row, int *col) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
// int in_start = inrow[tid];
// int in_end;
// if (tid + 1 < context->getNumVertices()) {
// in_end = inrow[tid + 1];
//} else {
// in_end = context->getNumEdges();
//}
// int indegree = in_end - in_start;
// int outdegree = out_end - out_start;
// vertex[tid] = new ChiVertex<float, float>(tid, indegree, outdegree);
// for (int i = in_start; i < in_end; i++) {
// vertex[tid]->setInEdge(i - in_start, incol[i], 0.0f);
//}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i],
INT_MAX);
}
}
}
__managed__ __align__(16) char buf2[128];
template <class myType>
__global__ void vptrPatch(myType *array, int n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// printf("-----\n");
myType *obj;
obj = new (buf2) myType();
// void *p;
// p=(void *)0x111111111;
// memcpy(p, obj, sizeof(void *));
// printf("---%p--\n", p);
if (tid < n) {
memcpy(&array[tid], obj, sizeof(void *));
// printf("---%p--\n",p);
}
}
__global__ void vptrPatch_Edge(ChiVertex<int, int> *vertex, int n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
Edge<int> *obj;
obj = new (buf2) Edge<int>();
if (tid < n)
if (tid == 0)
vertex[tid].vptrPatch(obj, 1);
else
vertex[tid].vptrPatch(obj, 1);
}
__managed__ range_tree_node *range_tree;
__managed__ unsigned tree_size_g;
__managed__ void *temp_copyBack;
__managed__ void *temp_Bfs;
__global__ void BFS(VirtVertex<int, int> **vertex, GraphChiContext *context,
int iteration) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
if (iteration == 0) {
if (tid == 0) {
vertex[tid]->setValue(0);
int numOutEdge;
numOutEdge = vertex[tid]->numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int> *outEdge;
outEdge = vertex[tid]->getOutEdge(i);
outEdge->setValue(1);
}
}
} else {
int curmin;
curmin = vertex[tid]->getValue();
int numInEdge;
numInEdge = vertex[tid]->numInEdges();
for (int i = 0; i < numInEdge; i++) {
ChiEdge<int> *inEdge;
inEdge = vertex[tid]->getInEdge(i);
curmin = min(curmin, inEdge->getValue());
}
int vertValue;
vertValue = vertex[tid]->getValue();
if (curmin < vertValue) {
vertex[tid]->setValue(curmin);
int numOutEdge;
numOutEdge = vertex[tid]->numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int> *outEdge;
outEdge = vertex[tid]->getOutEdge(i);
int edgeValue;
edgeValue = outEdge->getValue();
if (edgeValue > curmin + 1) {
outEdge->setValue(curmin + 1);
}
}
}
}
}
}
__managed__ void *temp_vfun;
__global__ void vfunCheck(VirtVertex<int, int> *vertex) {
void **vtable;
unsigned tree_size = tree_size_g;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
vtable = get_vfunc(&vertex[tid], range_tree, tree_size);
temp_vfun = vtable[1];
vertex[tid].setId(155);
temp_vfun = vtable[0];
printf("%d\n", vertex[tid].getId());
temp_vfun = vtable[3];
vertex[tid].setValue(999);
temp_vfun = vtable[2];
printf("%d\n", vertex[tid].getValue());
temp_vfun = vtable[4];
printf("%d\n", vertex[tid].numInEdges());
temp_vfun = vtable[5];
printf("%d\n", vertex[tid].numOutEdges());
temp_vfun = vtable[6];
printf("%p\n", vertex[tid].getInEdge(0));
temp_vfun = vtable[7];
printf("%p\n", vertex[tid].getOutEdge(0));
}
void BFS_cpu(VirtVertex<int, int> *vertex, GraphChiContext *context) {
int tid = 0;
// printf("ffff\n");
for (tid = 0; tid < context->getNumVertices(); tid++) {
if (context->getNumIterations() == 0) {
if (tid == 0) {
vertex[tid].setValue(0);
int numOutEdge;
numOutEdge = vertex[tid].numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int> *outEdge;
outEdge = vertex[tid].getOutEdge(i);
outEdge->setValue(1);
}
}
} else {
int curmin;
curmin = vertex[tid].getValue();
int numInEdge;
numInEdge = vertex[tid].numInEdges();
for (int i = 0; i < numInEdge; i++) {
ChiEdge<int> *inEdge;
inEdge = vertex[tid].getInEdge(i);
curmin = min(curmin, inEdge->getValue());
}
int vertValue;
vertValue = vertex[tid].getValue();
if (curmin < vertValue) {
vertex[tid].setValue(curmin);
int numOutEdge;
numOutEdge = vertex[tid].numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int> *outEdge;
outEdge = vertex[tid].getOutEdge(i);
int edgeValue;
edgeValue = outEdge->getValue();
if (edgeValue > curmin + 1) {
outEdge->setValue(curmin + 1);
}
}
}
}
// context->setNumIterations(context->getNumIterations() + 1);
}
}
__global__ void copyBack(VirtVertex<int, int> **vertex,
GraphChiContext *context, int *index) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned tree_size = tree_size_g;
// ChiVertex<int, int> *obj;
// obj = new (buf2) ChiVertex<int, int>();
// long ***mVtable = (long ***)&vertex[tid];
// long ***mVtable2 = (long ***)obj;
// //memcpy(&vertex[tid],obj,sizeof(void*));
// printf("[%d]-obj %p vert %p\n",tid,*mVtable2,*mVtable);
// *mVtable=*mVtable2;
// printf("[%d]after obj %p vert %p\n",tid,*mVtable2,*mVtable);
if (tid < context->getNumVertices()) {
void **vtable = get_vfunc(vertex[tid], range_tree, tree_size);
temp_copyBack = vtable[2];
// printf("%d\n",index[tid]);
index[tid] = vertex[tid]->getValue();
// if(mVtable[0][0]!=mVtable2[0][0])
// printf("[%d]why !! obj %p vert
// %p\n",tid,mVtable[0][0],mVtable2[0][0]);
// printf("%d\n",index[tid]);
}
}
| 777a0a84e58b4efccddd254334c82dee06a61fd1.cu | #include "parse_oo.h"
void initContext(GraphChiContext *context, int vertices, int edges) {
// int tid = blockDim.x * blockIdx.x + threadIdx.x;
context->setNumIterations(0);
context->setNumVertices(vertices);
context->setNumEdges(edges);
}
void initObject(VirtVertex<int, int> *vertex, GraphChiContext *context,
int *row, int *col, int *inrow, int *incol, obj_alloc *alloc) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
// vertex[tid].inEdgeDataArray =
// (ChiEdge<int> *)alloc->my_new<Edge<int>>(indegree);
// vertex[tid].outEdgeDataArray =
// (ChiEdge<int> **)alloc->my_new<Edge<int> *>(outdegree);
new (&vertex[tid]) ChiVertex<int, int>(tid, indegree, outdegree, alloc);
vertex[tid].setValue(INT_MAX);
for (int i = in_start; i < in_end; i++) {
vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
void part0_initObject(VirtVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col, int *inrow, int *incol,
obj_alloc *alloc) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
vertex[tid] =
(VirtVertex<int, int> *)alloc->my_new<ChiVertex<int, int>>();
}
}
void part1_initObject(VirtVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col, int *inrow, int *incol,
obj_alloc *alloc) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
// int out_start = row[tid];
// int out_end;
// if (tid + 1 < context->getNumVertices()) {
// out_end = row[tid + 1];
// } else {
// out_end = context->getNumEdges();
// }
// int in_start = inrow[tid];
// int in_end;
// if (tid + 1 < context->getNumVertices()) {
// in_end = inrow[tid + 1];
// } else {
// in_end = context->getNumEdges();
// }
// int indegree = in_end - in_start;
// int outdegree = out_end - out_start;
// vertex[tid].inEdgeDataArray =
// (ChiEdge<myType> *)alloc->my_new<Edge<myType>>(indegree);
// vertex[tid].outEdgeDataArray =
// (ChiEdge<myType> **)alloc->my_new<Edge<myType> *>(outdegree);
// new (&vertex[tid]) ChiVertex<int, int>(tid, indegree,
// outdegree,alloc);
vertex[tid]->set_in_out(alloc);
// vertex[tid].setValue(INT_MAX);
// for (int i = in_start; i < in_end; i++) {
// vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
// }
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
__global__ void part_kern0_initObject(VirtVertex<int, int> **vertex,
GraphChiContext *context, int *row,
int *col, int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
new (vertex[tid]) ChiVertex<int, int>(tid, indegree, outdegree);
// vertex[tid].setValue(INT_MAX);
// for (int i = in_start; i < in_end; i++) {
// vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
// }
}
}
__global__ void part_kern1_initObject(VirtVertex<int, int> **vertex,
GraphChiContext *context, int *row,
int *col, int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
// int out_start = row[tid];
// int out_end;
// if (tid + 1 < context->getNumVertices()) {
// out_end = row[tid + 1];
// } else {
// out_end = context->getNumEdges();
// }
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
vertex[tid]->setValue(INT_MAX);
for (int i = in_start; i < in_end; i++) {
vertex[tid]->setInEdge(i - in_start, incol[i], INT_MAX);
}
}
}
void initOutEdge(VirtVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i],
INT_MAX);
}
}
}
__global__ void kern_initObject(VirtVertex<int, int> *vertex,
GraphChiContext *context, int *row, int *col,
int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
new (&vertex[tid]) ChiVertex<int, int>(tid, indegree, outdegree);
vertex[tid].setValue(INT_MAX);
for (int i = in_start; i < in_end; i++) {
vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
}
//}
}
__global__ void kern_initOutEdge(VirtVertex<int, int> **vertex,
GraphChiContext *context, int *row, int *col) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
// int in_start = inrow[tid];
// int in_end;
// if (tid + 1 < context->getNumVertices()) {
// in_end = inrow[tid + 1];
//} else {
// in_end = context->getNumEdges();
//}
// int indegree = in_end - in_start;
// int outdegree = out_end - out_start;
// vertex[tid] = new ChiVertex<float, float>(tid, indegree, outdegree);
// for (int i = in_start; i < in_end; i++) {
// vertex[tid]->setInEdge(i - in_start, incol[i], 0.0f);
//}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i],
INT_MAX);
}
}
}
__managed__ __align__(16) char buf2[128];
template <class myType>
__global__ void vptrPatch(myType *array, int n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// printf("-----\n");
myType *obj;
obj = new (buf2) myType();
// void *p;
// p=(void *)0x111111111;
// memcpy(p, obj, sizeof(void *));
// printf("---%p--\n", p);
if (tid < n) {
memcpy(&array[tid], obj, sizeof(void *));
// printf("---%p--\n",p);
}
}
__global__ void vptrPatch_Edge(ChiVertex<int, int> *vertex, int n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
Edge<int> *obj;
obj = new (buf2) Edge<int>();
if (tid < n)
if (tid == 0)
vertex[tid].vptrPatch(obj, 1);
else
vertex[tid].vptrPatch(obj, 1);
}
__managed__ range_tree_node *range_tree;
__managed__ unsigned tree_size_g;
__managed__ void *temp_copyBack;
__managed__ void *temp_Bfs;
__global__ void BFS(VirtVertex<int, int> **vertex, GraphChiContext *context,
int iteration) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
if (iteration == 0) {
if (tid == 0) {
vertex[tid]->setValue(0);
int numOutEdge;
numOutEdge = vertex[tid]->numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int> *outEdge;
outEdge = vertex[tid]->getOutEdge(i);
outEdge->setValue(1);
}
}
} else {
int curmin;
curmin = vertex[tid]->getValue();
int numInEdge;
numInEdge = vertex[tid]->numInEdges();
for (int i = 0; i < numInEdge; i++) {
ChiEdge<int> *inEdge;
inEdge = vertex[tid]->getInEdge(i);
curmin = min(curmin, inEdge->getValue());
}
int vertValue;
vertValue = vertex[tid]->getValue();
if (curmin < vertValue) {
vertex[tid]->setValue(curmin);
int numOutEdge;
numOutEdge = vertex[tid]->numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int> *outEdge;
outEdge = vertex[tid]->getOutEdge(i);
int edgeValue;
edgeValue = outEdge->getValue();
if (edgeValue > curmin + 1) {
outEdge->setValue(curmin + 1);
}
}
}
}
}
}
__managed__ void *temp_vfun;
__global__ void vfunCheck(VirtVertex<int, int> *vertex) {
void **vtable;
unsigned tree_size = tree_size_g;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
vtable = get_vfunc(&vertex[tid], range_tree, tree_size);
temp_vfun = vtable[1];
vertex[tid].setId(155);
temp_vfun = vtable[0];
printf("%d\n", vertex[tid].getId());
temp_vfun = vtable[3];
vertex[tid].setValue(999);
temp_vfun = vtable[2];
printf("%d\n", vertex[tid].getValue());
temp_vfun = vtable[4];
printf("%d\n", vertex[tid].numInEdges());
temp_vfun = vtable[5];
printf("%d\n", vertex[tid].numOutEdges());
temp_vfun = vtable[6];
printf("%p\n", vertex[tid].getInEdge(0));
temp_vfun = vtable[7];
printf("%p\n", vertex[tid].getOutEdge(0));
}
void BFS_cpu(VirtVertex<int, int> *vertex, GraphChiContext *context) {
int tid = 0;
// printf("ffff\n");
for (tid = 0; tid < context->getNumVertices(); tid++) {
if (context->getNumIterations() == 0) {
if (tid == 0) {
vertex[tid].setValue(0);
int numOutEdge;
numOutEdge = vertex[tid].numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int> *outEdge;
outEdge = vertex[tid].getOutEdge(i);
outEdge->setValue(1);
}
}
} else {
int curmin;
curmin = vertex[tid].getValue();
int numInEdge;
numInEdge = vertex[tid].numInEdges();
for (int i = 0; i < numInEdge; i++) {
ChiEdge<int> *inEdge;
inEdge = vertex[tid].getInEdge(i);
curmin = min(curmin, inEdge->getValue());
}
int vertValue;
vertValue = vertex[tid].getValue();
if (curmin < vertValue) {
vertex[tid].setValue(curmin);
int numOutEdge;
numOutEdge = vertex[tid].numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int> *outEdge;
outEdge = vertex[tid].getOutEdge(i);
int edgeValue;
edgeValue = outEdge->getValue();
if (edgeValue > curmin + 1) {
outEdge->setValue(curmin + 1);
}
}
}
}
// context->setNumIterations(context->getNumIterations() + 1);
}
}
__global__ void copyBack(VirtVertex<int, int> **vertex,
GraphChiContext *context, int *index) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned tree_size = tree_size_g;
// ChiVertex<int, int> *obj;
// obj = new (buf2) ChiVertex<int, int>();
// long ***mVtable = (long ***)&vertex[tid];
// long ***mVtable2 = (long ***)obj;
// //memcpy(&vertex[tid],obj,sizeof(void*));
// printf("[%d]-obj %p vert %p\n",tid,*mVtable2,*mVtable);
// *mVtable=*mVtable2;
// printf("[%d]after obj %p vert %p\n",tid,*mVtable2,*mVtable);
if (tid < context->getNumVertices()) {
void **vtable = get_vfunc(vertex[tid], range_tree, tree_size);
temp_copyBack = vtable[2];
// printf("%d\n",index[tid]);
index[tid] = vertex[tid]->getValue();
// if(mVtable[0][0]!=mVtable2[0][0])
// printf("[%d]why !! obj %p vert
// %p\n",tid,mVtable[0][0],mVtable2[0][0]);
// printf("%d\n",index[tid]);
}
}
|
8faacf03c84c9933fee73dd88cd11924a7abeea4.hip | // !!! This is a file automatically generated by hipify!!!
/* (C) Copyright 2018, 2020 Anthony D. Dutoi and Yuhong Liu
*
* This file is part of Qode.
*
* Qode is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Qode is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Qode. If not, see <http://www.gnu.org/licenses/>.
*/
#include "PyC_types.h"
#include<cuda.h>
//#include"wrapper.h"
#include "hip/hip_runtime.h"
#include "stdlib.h"
#include "time.h"
#include "stdio.h"
//#include "wrapper.h"
#include "PyC_types.h"
#include "stdlib.h"
#include <sys/time.h>
#include<omp.h>
#define T 6
#define TR 8
extern "C"
{
/*
// Global variables
double *d_V1112,*d_Rcca1,*d_Ra2,*d_V1222,*d_Rcaa2,*d_Rc1;
double *h_H,*d_h;
double *d_H,*h_Hr,*d_Hr;
*/
// reduction works
__device__ void warpReduce(volatile double* sdata, int tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__
void reduction(double *H,double *R, int numelements)
{
extern __shared__ volatile double sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
if(i<numelements)
{
sdata[tid] = H[i] + H[i+blockDim.x];
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>32; s>>=1) {
//if (tid < s)
{
sdata[tid] += sdata[tid + s];
__syncthreads();
}
}
if (tid<32) warpReduce(sdata, tid);
// write result for this block to global mem
if (tid == 0)
{
R[blockIdx.x] = sdata[0];
}
}
}
/*
__global__
void reduction(double *H,double *R, int numelements)
{
extern __shared__ double sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i<numelements)
{
sdata[tid] = H[i];
__syncthreads();
//if (i<numelements)
// {
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid<s)
{
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
}
if (tid==0)
{
R[blockIdx.x]=sdata[0];
}
}
}
*/
__global__
void outerloop(double *V1112,double *Rcca1,double *Ra2,double *H,int n_orb1,int n_orb2,double *V1222,double *Rcaa2, double *Rc1,double *h)
{
int p1=threadIdx.x+blockIdx.x*blockDim.x;
int q1=threadIdx.y+blockIdx.y*blockDim.y;
int r1=threadIdx.z+blockIdx.z*blockDim.z;
double Hlocal=0;
if (p1 <n_orb1 && q1 <n_orb1 && r1<n_orb1)
{ for (int s2=0; s2<n_orb2; s2++)
{
//upperloop
Hlocal += V1112[((p1*n_orb1 + q1)*n_orb1 + r1)*n_orb2 + s2] * Rcca1[(q1*n_orb1 + p1)*n_orb1 + r1] * Ra2[s2];
//middleloop
Hlocal+=V1222[((p1*n_orb2 + q1)*n_orb2 + r1)*n_orb2 + s2] * Rc1[p1] * Rcaa2[(q1*n_orb2 + s2)*n_orb2 + r1];
}
//bottomloop
H[(p1*n_orb1+q1)*n_orb1+r1]=2*Hlocal;
H[(p1*n_orb1+q1)*n_orb1+r1]+=(r1==0)?(h[p1*n_orb2 + q1] * Rc1[p1] * Ra2[q1]):0;
}
//reduction still performed externally
}
//exploring that n_orb1 and n_orb2 will be same
//TODO: Syed performs cudaMallocs just once (optimization 1, save and push file as cutest_s1.cu). Syed performs reduction inside kernel (optimization 2, which builds on 1,cutest_s2.cu).
//TODO: Thor performs cudastreams on this version and then later includes optimizations 1 and 2 from Syed. Thor also works on his own version for comparison.
//VKP: Skeleton code
//double dimer_1min1pls(int n_orb1, int n_orb2, double* Rc1, double* Rcca1, double* Ra2, double* Rcaa2, double* h, double* V1112, double* V1222,int freevaribales)
void dimer_1min1pls_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyFloat* sign, PyInt* n_orb1_in, PyInt* n_orb2_in, Double** Rc1, Double** Rcca1, Double** Ra2, Double** Rcaa2, Double** h, Double** V1, Double** V2)
{
//printf("hello");
//hipMemcpy(d_h, h[n], sizeof(double)*N_h, hipMemcpyHostToDevice);
//if(DEBUG) printf("h copied\n");
double *d_V1112,*d_Rcca1,*d_Ra2,*d_V1222,*d_Rcaa2,*d_Rc1,*d_H,*h_H,*d_h,*h_Hr,*d_Hr;
// All of these assume that the values of n_orb1 and n_orb2 don't change
const int n_orb1 = n_orb1_in[0];
const int n_orb2 = n_orb2_in[0];
const int blocks = (n_orb1*n_orb1*n_orb1)/(TR*TR);
const int nbpgrid= n_orb1/T;
const int N_V1112 = n_orb1*n_orb1*n_orb1*n_orb2;
const int N_Rcca1 = n_orb1*n_orb1*n_orb1;
const int N_Ra2 = n_orb2;
const int N_V1222 = n_orb1*n_orb2*n_orb2*n_orb2;
const int N_Rcaa2 = n_orb2*n_orb2*n_orb2;
const int N_Rc1 = n_orb1;
const int N_H = n_orb1*n_orb1*n_orb1; // this assumes n_orb1 = n_orb2
const int N_h = n_orb1*n_orb2;
const int N_Hr = blocks;
const dim3 dimblock(T,T,T);
const dim3 dimgrid(nbpgrid,nbpgrid,nbpgrid);
const dim3 dimblockR(T*T);
const dim3 dimgridR(blocks);
hipMalloc((void **) &d_V1112, sizeof(double)*N_V1112);
hipMalloc((void **) &d_Ra2, sizeof(double)*N_Ra2);
hipMalloc((void **) &d_V1222, sizeof(double)*N_V1222);
hipMalloc((void **) &d_Rcca1, sizeof(double)*N_Rcca1);
hipMalloc((void **) &d_Rcaa2, sizeof(double)*N_Rcaa2);
hipMalloc((void **) &d_Rc1, sizeof(double)*N_Rc1);
hipMalloc((void **) &d_H, sizeof(double)*N_H);
hipMalloc((void **) &d_h, sizeof(double)*N_h);
hipMalloc((void **) &d_Hr, sizeof(double)*N_Hr);
h_Hr=(double *)malloc(sizeof(double)*N_Hr);
h_H=(double *)malloc(sizeof(double)*N_H);
#pragma unroll
/*
hipMemcpy(d_Rcca1, Rcca1, sizeof(double)*N_Rcca1, hipMemcpyHostToDevice);
hipMemcpy(d_Rcaa2, Rcaa2, sizeof(double)*N_Rcaa2, hipMemcpyHostToDevice);
hipMemcpy(d_Rc1, Rc1, sizeof(double)*N_Rc1, hipMemcpyHostToDevice);
hipMemcpy(d_Ra2, Ra2, sizeof(double)*N_Ra2, hipMemcpyHostToDevice);
hipMemcpy(d_h, h, sizeof(double)*N_h, hipMemcpyHostToDevice);
hipMemcpy(d_V1112, V1, sizeof(double)*N_V1112, hipMemcpyHostToDevice);
hipMemcpy(d_V1222, V2, sizeof(double)*N_V1222, hipMemcpyHostToDevice);
*/
for(int n=0; n<n_elem; n++) {
int index = i[n]*dim[n]+j[n];
double tmp = 0.0;
hipMemcpy(d_Rcca1, Rcca1[n], sizeof(double)*N_Rcca1, hipMemcpyHostToDevice);
hipMemcpy(d_Rcaa2, Rcaa2[n], sizeof(double)*N_Rcaa2, hipMemcpyHostToDevice);
hipMemcpy(d_Rc1, Rc1[n], sizeof(double)*N_Rc1, hipMemcpyHostToDevice);
hipMemcpy(d_Ra2, Ra2[n], sizeof(double)*N_Ra2, hipMemcpyHostToDevice);
hipMemcpy(d_h, h[n], sizeof(double)*N_h, hipMemcpyHostToDevice);
hipMemcpy(d_V1112, V1[n], sizeof(double)*N_V1112, hipMemcpyHostToDevice);
hipMemcpy(d_V1222, V2[n], sizeof(double)*N_V1222, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( outerloop), dim3(dimgrid),dim3(dimblock), 0, 0, d_V1112,d_Rcca1,d_Ra2,d_H,n_orb1,n_orb2,d_V1222,d_Rcaa2,d_Rc1,d_h);
hipMemcpy(h_H, d_H, sizeof(double)*N_H, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//f(DEBUG) printf("Launching reduction\n");
hipLaunchKernelGGL(( reduction), dim3(dimgridR),dim3(dimblockR),sizeof(double)*TR*TR, 0, d_H,d_Hr,(n_orb1*n_orb1*n_orb1));
//gpuErr(hipPeekAtLastError());
hipMemcpy(h_Hr, d_Hr, sizeof(double)*N_Hr, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
for(int k=0; k<blocks; k++) {
tmp += h_Hr[k];
}
H[n][index] = tmp*sign[n];
}
hipFree(d_V1112);
hipFree(d_Rcca1);
hipFree(d_Ra2);
hipFree(d_V1222);
hipFree(d_Rcaa2);
hipFree(d_Rc1);
hipFree(d_H);
hipFree(d_h);
hipFree(d_Hr);
free(h_Hr);
free(h_H);
//printf("hello ");
}
Double monomer(PyInt n_orb, Double* Rca, Double* Rccaa, Double* h, Double* V)
{
Double H = 0;
//printf("%d N orbibtal: ",n_orb);
PyInt p=0;
for (p=0; p<n_orb; p++)
{
PyInt q=0;
for (q=0; q<n_orb; q++)
{
PyInt r=0;
for ( r=0; r<n_orb; r++)
{
PyInt s=0;
for (s=0; s<n_orb; s++)
{
H += V[((p*n_orb + q)*n_orb + r)*n_orb + s] * Rccaa[((p*n_orb + q)*n_orb + s)*n_orb + r];
}
}
}
}
PyInt p1=0;
for (p1=0; p1<n_orb; p1++)
{
PyInt q1=0;
for (q1=0; q1<n_orb; q1++)
{
H += h[p1*n_orb + q1] * Rca[p1*n_orb + q1];
}
}
return H;
}
Double dimer_2min2pls(PyInt n_orb1, PyInt n_orb2, Double* Rcc1, Double* Raa2, Double* V)
{
Double H = 0;
PyInt p1=0;
for (p1=0; p1<n_orb1; p1++)
{
PyInt q1=0;
for (q1=0; q1<n_orb1; q1++)
{
PyInt r2=0;
for (r2=0; r2<n_orb2; r2++)
{
PyInt s2=0;
for ( s2=0; s2<n_orb2; s2++)
{
H += V[((p1*n_orb1 + q1)*n_orb2 + r2)*n_orb2 + s2] * Rcc1[p1*n_orb1 + q1] * Raa2[s2*n_orb2 + r2];
}
}
}
}
return H;
}
/*
static int count=0;
Double dimer_1min1pls(PyInt n_orb1, PyInt n_orb2, Double* Rc1, Double* Rcca1, Double* Ra2, Double* Rcaa2, Double* h, Double* V1112, Double* V1222, int freevariables)
{
test_wrapper(n_orb1, n_orb2, Rc1, Rcca1, Ra2, Rcaa2, h, V1112, V1222,freevariables);
count++;
//if(count>10)
//exit(0);
}*/
/*
Double dimer_1min1pls(PyInt n_orb1, PyInt n_orb2, Double* Rc1, Double* Rcca1, Double* Ra2, Double* Rcaa2, Double* h, Double* V1112, Double* V1222)
{
Double H = 0;
//printf("%d ord1: ",n_orb1);
//printf(" n_orb2:%d\n ",n_orb2);
//printf(" Rc1:%f \n",*Rc1);
//printf(" Rcca1:%f\n ",*Rcca1);
//printf(" Ra2: %f\n ",*Ra2);
//printf(" h:%f",*h);
//printf(" V1112:%f",* V1112);
PyInt p1=0;
for ( p1=0; p1<n_orb1; p1++)
{
PyInt q1=0;
for ( q1=0; q1<n_orb1; q1++)
{
PyInt r1=0;
for ( r1=0; r1<n_orb1; r1++)
{
PyInt s2=0;
for ( s2=0; s2<n_orb2; s2++)
{
H += V1112[((p1*n_orb1 + q1)*n_orb1 + r1)*n_orb2 + s2] * Rcca1[(q1*n_orb1 + p1)*n_orb1 + r1] * Ra2[s2];
}
}
}
}
PyInt p11=0;
for (p11=0; p11<n_orb1; p11++)
{
PyInt q2=0;
for (q2=0; q2<n_orb2; q2++)
{
PyInt r2=0;
for (r2=0; r2<n_orb2; r2++)
{
PyInt s2=0;
for ( s2=0; s2<n_orb2; s2++)
{
H += V1222[((p11*n_orb2 + q2)*n_orb2 + r2)*n_orb2 + s2] * Rc1[p1] * Rcaa2[(q2*n_orb2 + s2)*n_orb2 + r2];
}
}
}
}
H *= 2;
PyInt p12=0;
for (p12=0; p12<n_orb1; p12++)
{
PyInt q22=0;
for (q22=0; q22<n_orb2; q22++)
{
H += h[p12*n_orb2 + q22] * Rc1[p12] * Ra2[q22];
}
}
//printf("%f H: ",H);
return H;
}
*/
Double dimer_00(PyInt n_orb1, PyInt n_orb2, Double* Rca1, Double* Rca2, Double* V)
{
Double H = 0;
PyInt p1=0;
for ( p1=0; p1<n_orb1; p1++)
{
PyInt q2=0;
for ( q2=0; q2<n_orb2; q2++)
{
PyInt r1=0;
for ( r1=0; r1<n_orb1; r1++)
{
PyInt s2=0;
for ( s2=0; s2<n_orb2; s2++)
{
H += V[((p1*n_orb2 + q2)*n_orb1 + r1)*n_orb2 + s2] * Rca1[p1*n_orb1 + r1] * Rca2[q2*n_orb2 + s2];
}
}
}
}
return 4*H;
}
Double dimer_1pls1min(PyInt n_orb1, PyInt n_orb2, Double* Ra1, Double* Rcaa1, Double* Rc2, Double* Rcca2, Double* h, Double* V2221, Double* V2111)
{
Double H = 0;
PyInt p2=0;
for (p2=0; p2<n_orb2; p2++)
{
PyInt q2=0;
for ( q2=0; q2<n_orb2; q2++)
{
PyInt r2=0;
for ( r2=0; r2<n_orb2; r2++)
{
PyInt s1=0;
for ( s1=0; s1<n_orb1; s1++)
{
H += V2221[((p2*n_orb2 + q2)*n_orb2 + r2)*n_orb1 + s1] * Rcca2[(q2*n_orb2 + p2)*n_orb2 + r2] * Ra1[s1];
}
}
}
}
PyInt p22=0;
for (p22=0; p22<n_orb2; p22++)
{
PyInt q1=0;
for ( q1=0; q1<n_orb1; q1++)
{
PyInt r1=0;
for (r1=0; r1<n_orb1; r1++)
{
PyInt s2=0;
for (s2=0; s2<n_orb1; s2++)
{
H += V2111[((p22*n_orb1 + q1)*n_orb1 + r1)*n_orb1 + s2] * Rc2[p22] * Rcaa1[(q1*n_orb1 + s2)*n_orb1 + r1];
}
}
}
}
H *= 2;
PyInt p3=0;
for (p3=0; p3<n_orb2; p3++)
{
PyInt q3=0;
for (q3=0; q3<n_orb1; q3++)
{
H += h[p3*n_orb1 + q3] * Rc2[p3] * Ra1[q3];
}
}
return H;
}
Double dimer_2pls2min(PyInt n_orb1, PyInt n_orb2, Double* Raa1, Double* Rcc2, Double* V)
{
Double H = 0;
PyInt p2=0;
for (p2=0; p2<n_orb2; p2++)
{
PyInt q2=0;
for (q2=0; q2<n_orb2; q2++)
{
PyInt r1=0;
for (r1=0; r1<n_orb1; r1++)
{
PyInt s1=0;
for (s1=0; s1<n_orb1; s1++)
{
H += V[((p2*n_orb2 + q2)*n_orb1 + r1)*n_orb1 + s1] * Rcc2[p2*n_orb2 + q2] * Raa1[s1*n_orb1 + r1];
}
}
}
}
return H;
}
/*
void dimer_1min1pls_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyFloat* sign, PyInt* n_orb1, PyInt* n_orb2, Double** Rc1, Double** Rcca1, Double** Ra2, Double** Rcaa2, Double** h, Double** V1, Double** V2)
{
int n;
for(n=0; n<n_elem; n++) {
int index = i[n]*dim[n]+j[n];
double tmp = 0.0;
int p1,r1,q1,s2;
for(p1=0; p1<n_orb1[n]; p1++) {
for(r1=0; r1<n_orb1[n]; r1++) {
for(q1=0; q1<n_orb1[n]; q1++) {
for( s2=0; s2<n_orb2[n]; s2++) {
tmp += V1[n][((p1*n_orb1[n] + q1)*n_orb1[n] + r1)*n_orb2[n] + s2] * Rcca1[n][(q1*n_orb1[n] + p1)*n_orb1[n] + r1] * Ra2[n][s2];
}
}
}
}
int p11,q2,r2;
for(p11=0; p11<n_orb1[n]; p11++) {
for(q2=0; q2<n_orb2[n]; q2++) {
for(r2=0; r2<n_orb2[n]; r2++) {
for(s2=0; s2<n_orb2[n]; s2++) {
tmp += V2[n][((p11*n_orb2[n] + q2)*n_orb2[n] + r2)*n_orb2[n] + s2] * Rc1[n][p11] * Rcaa2[n][(q2*n_orb2[n] + s2)*n_orb2[n] + r2];
}
}
}
}
tmp *= 2;
int p12,q22;
for(p12=0; p12<n_orb1[n]; p12++) {
for(q22=0; q22<n_orb2[n]; q22++) {
tmp += h[n][p12*n_orb2[n]+q22] * Rc1[n][p12] * Ra2[n][q22];
}
}
H[n][index] = tmp*sign[n];
}
}
*/
void monomer_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyInt* n_orb, Double** Rca, Double** Rccaa, Double** h, Double** V)
{
PyInt n=0;
for ( n=0; n<n_elem; n++)
{
PyInt index = i[n]*dim[n] + j[n];
H[n][index] = monomer(n_orb[n], Rca[n], Rccaa[n], h[n], V[n]);
}
return;
}
void dimer_2min2pls_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyInt* n_orb1, PyInt* n_orb2, Double** Rcc1, Double** Raa2, Double** V)
{
PyInt n=0;
for ( n=0; n<n_elem; n++)
{
PyInt index = i[n]*dim[n] + j[n];
H[n][index] = dimer_2min2pls(n_orb1[n], n_orb2[n], Rcc1[n], Raa2[n], V[n]);
}
return;
}
/*
void dimer_1min1pls_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyFloat* sign, PyInt* n_orb1, PyInt* n_orb2, Double** Rc1, Double** Rcca1, Double** Ra2, Double** Rcaa2, Double** h, Double** V1, Double** V2)
{
int freevariables=0;
PyInt n=0;
#pragma unroll
for ( n=0; n<n_elem; n++)
{
PyInt index = i[n]*dim[n] + j[n];
H[n][index] = sign[n] * dimer_1min1pls(n_orb1[n], n_orb2[n], Rc1[n], Rcca1[n], Ra2[n], Rcaa2[n], h[n], V1[n], V2[n],freevariables);
}
freevariables=1;
dimer_1min1pls(n_orb1[n], n_orb2[n], Rc1[n], Rcca1[n], Ra2[n], Rcaa2[n], h[n], V1[n], V2[n],freevariables);
return;
}
*/
/*
void dimer_1min1pls_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyFloat* sign, PyInt* n_orb1, PyInt* n_orb2, Double** Rc1, Double** Rcca1, Double** Ra2, Double** Rcaa2, Double** h, Double** V1, Double** V2)
{
test_wrapper( n_elem,H,i, j,dim,sign,n_orb1,n_orb2,Rc1,Rcca1,Ra2,Rcaa2, h,V1,V2);
//return
}
*/
void dimer_00_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyInt* n_orb1, PyInt* n_orb2, Double** Rca1, Double** Rca2, Double** V)
{
PyInt n=0;
for ( n=0; n<n_elem; n++)
{
PyInt index = i[n]*dim[n] + j[n];
H[n][index] = dimer_00(n_orb1[n], n_orb2[n], Rca1[n], Rca2[n], V[n]);
}
return;
}
void dimer_1pls1min_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyFloat* sign, PyInt* n_orb1, PyInt* n_orb2, Double** Ra1, Double** Rcaa1, Double** Rc2, Double** Rcca2, Double** h, Double** V1, Double** V2)
{
PyInt n=0;
for ( n=0; n<n_elem; n++)
{
PyInt index = i[n]*dim[n] + j[n];
H[n][index] = sign[n] * dimer_1pls1min(n_orb1[n], n_orb2[n], Ra1[n], Rcaa1[n], Rc2[n], Rcca2[n], h[n], V1[n], V2[n]);
}
return;
}
void dimer_2pls2min_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyInt* n_orb1, PyInt* n_orb2, Double** Raa1, Double** Rcc2, Double** V)
{
PyInt n=0;
for ( n=0; n<n_elem; n++)
{
PyInt index = i[n]*dim[n] + j[n];
H[n][index] = dimer_2pls2min(n_orb1[n], n_orb2[n], Raa1[n], Rcc2[n], V[n]);
}
return;
}
}
| 8faacf03c84c9933fee73dd88cd11924a7abeea4.cu | /* (C) Copyright 2018, 2020 Anthony D. Dutoi and Yuhong Liu
*
* This file is part of Qode.
*
* Qode is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Qode is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Qode. If not, see <http://www.gnu.org/licenses/>.
*/
#include "PyC_types.h"
#include<cuda.h>
//#include"wrapper.h"
#include "cuda_runtime.h"
#include "stdlib.h"
#include "time.h"
#include "stdio.h"
//#include "wrapper.h"
#include "PyC_types.h"
#include "stdlib.h"
#include <sys/time.h>
#include<omp.h>
#define T 6
#define TR 8
extern "C"
{
/*
// Global variables
double *d_V1112,*d_Rcca1,*d_Ra2,*d_V1222,*d_Rcaa2,*d_Rc1;
double *h_H,*d_h;
double *d_H,*h_Hr,*d_Hr;
*/
// reduction works
__device__ void warpReduce(volatile double* sdata, int tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__
void reduction(double *H,double *R, int numelements)
{
extern __shared__ volatile double sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
if(i<numelements)
{
sdata[tid] = H[i] + H[i+blockDim.x];
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>32; s>>=1) {
//if (tid < s)
{
sdata[tid] += sdata[tid + s];
__syncthreads();
}
}
if (tid<32) warpReduce(sdata, tid);
// write result for this block to global mem
if (tid == 0)
{
R[blockIdx.x] = sdata[0];
}
}
}
/*
__global__
void reduction(double *H,double *R, int numelements)
{
extern __shared__ double sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i<numelements)
{
sdata[tid] = H[i];
__syncthreads();
//if (i<numelements)
// {
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid<s)
{
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
}
if (tid==0)
{
R[blockIdx.x]=sdata[0];
}
}
}
*/
__global__
void outerloop(double *V1112,double *Rcca1,double *Ra2,double *H,int n_orb1,int n_orb2,double *V1222,double *Rcaa2, double *Rc1,double *h)
{
int p1=threadIdx.x+blockIdx.x*blockDim.x;
int q1=threadIdx.y+blockIdx.y*blockDim.y;
int r1=threadIdx.z+blockIdx.z*blockDim.z;
double Hlocal=0;
if (p1 <n_orb1 && q1 <n_orb1 && r1<n_orb1)
{ for (int s2=0; s2<n_orb2; s2++)
{
//upperloop
Hlocal += V1112[((p1*n_orb1 + q1)*n_orb1 + r1)*n_orb2 + s2] * Rcca1[(q1*n_orb1 + p1)*n_orb1 + r1] * Ra2[s2];
//middleloop
Hlocal+=V1222[((p1*n_orb2 + q1)*n_orb2 + r1)*n_orb2 + s2] * Rc1[p1] * Rcaa2[(q1*n_orb2 + s2)*n_orb2 + r1];
}
//bottomloop
H[(p1*n_orb1+q1)*n_orb1+r1]=2*Hlocal;
H[(p1*n_orb1+q1)*n_orb1+r1]+=(r1==0)?(h[p1*n_orb2 + q1] * Rc1[p1] * Ra2[q1]):0;
}
//reduction still performed externally
}
//exploring that n_orb1 and n_orb2 will be same
//TODO: Syed performs cudaMallocs just once (optimization 1, save and push file as cutest_s1.cu). Syed performs reduction inside kernel (optimization 2, which builds on 1,cutest_s2.cu).
//TODO: Thor performs cudastreams on this version and then later includes optimizations 1 and 2 from Syed. Thor also works on his own version for comparison.
//VKP: Skeleton code
//double dimer_1min1pls(int n_orb1, int n_orb2, double* Rc1, double* Rcca1, double* Ra2, double* Rcaa2, double* h, double* V1112, double* V1222,int freevaribales)
void dimer_1min1pls_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyFloat* sign, PyInt* n_orb1_in, PyInt* n_orb2_in, Double** Rc1, Double** Rcca1, Double** Ra2, Double** Rcaa2, Double** h, Double** V1, Double** V2)
{
//printf("hello");
//cudaMemcpy(d_h, h[n], sizeof(double)*N_h, cudaMemcpyHostToDevice);
//if(DEBUG) printf("h copied\n");
double *d_V1112,*d_Rcca1,*d_Ra2,*d_V1222,*d_Rcaa2,*d_Rc1,*d_H,*h_H,*d_h,*h_Hr,*d_Hr;
// All of these assume that the values of n_orb1 and n_orb2 don't change
const int n_orb1 = n_orb1_in[0];
const int n_orb2 = n_orb2_in[0];
const int blocks = (n_orb1*n_orb1*n_orb1)/(TR*TR);
const int nbpgrid= n_orb1/T;
const int N_V1112 = n_orb1*n_orb1*n_orb1*n_orb2;
const int N_Rcca1 = n_orb1*n_orb1*n_orb1;
const int N_Ra2 = n_orb2;
const int N_V1222 = n_orb1*n_orb2*n_orb2*n_orb2;
const int N_Rcaa2 = n_orb2*n_orb2*n_orb2;
const int N_Rc1 = n_orb1;
const int N_H = n_orb1*n_orb1*n_orb1; // this assumes n_orb1 = n_orb2
const int N_h = n_orb1*n_orb2;
const int N_Hr = blocks;
const dim3 dimblock(T,T,T);
const dim3 dimgrid(nbpgrid,nbpgrid,nbpgrid);
const dim3 dimblockR(T*T);
const dim3 dimgridR(blocks);
cudaMalloc((void **) &d_V1112, sizeof(double)*N_V1112);
cudaMalloc((void **) &d_Ra2, sizeof(double)*N_Ra2);
cudaMalloc((void **) &d_V1222, sizeof(double)*N_V1222);
cudaMalloc((void **) &d_Rcca1, sizeof(double)*N_Rcca1);
cudaMalloc((void **) &d_Rcaa2, sizeof(double)*N_Rcaa2);
cudaMalloc((void **) &d_Rc1, sizeof(double)*N_Rc1);
cudaMalloc((void **) &d_H, sizeof(double)*N_H);
cudaMalloc((void **) &d_h, sizeof(double)*N_h);
cudaMalloc((void **) &d_Hr, sizeof(double)*N_Hr);
h_Hr=(double *)malloc(sizeof(double)*N_Hr);
h_H=(double *)malloc(sizeof(double)*N_H);
#pragma unroll
/*
cudaMemcpy(d_Rcca1, Rcca1, sizeof(double)*N_Rcca1, cudaMemcpyHostToDevice);
cudaMemcpy(d_Rcaa2, Rcaa2, sizeof(double)*N_Rcaa2, cudaMemcpyHostToDevice);
cudaMemcpy(d_Rc1, Rc1, sizeof(double)*N_Rc1, cudaMemcpyHostToDevice);
cudaMemcpy(d_Ra2, Ra2, sizeof(double)*N_Ra2, cudaMemcpyHostToDevice);
cudaMemcpy(d_h, h, sizeof(double)*N_h, cudaMemcpyHostToDevice);
cudaMemcpy(d_V1112, V1, sizeof(double)*N_V1112, cudaMemcpyHostToDevice);
cudaMemcpy(d_V1222, V2, sizeof(double)*N_V1222, cudaMemcpyHostToDevice);
*/
for(int n=0; n<n_elem; n++) {
int index = i[n]*dim[n]+j[n];
double tmp = 0.0;
cudaMemcpy(d_Rcca1, Rcca1[n], sizeof(double)*N_Rcca1, cudaMemcpyHostToDevice);
cudaMemcpy(d_Rcaa2, Rcaa2[n], sizeof(double)*N_Rcaa2, cudaMemcpyHostToDevice);
cudaMemcpy(d_Rc1, Rc1[n], sizeof(double)*N_Rc1, cudaMemcpyHostToDevice);
cudaMemcpy(d_Ra2, Ra2[n], sizeof(double)*N_Ra2, cudaMemcpyHostToDevice);
cudaMemcpy(d_h, h[n], sizeof(double)*N_h, cudaMemcpyHostToDevice);
cudaMemcpy(d_V1112, V1[n], sizeof(double)*N_V1112, cudaMemcpyHostToDevice);
cudaMemcpy(d_V1222, V2[n], sizeof(double)*N_V1222, cudaMemcpyHostToDevice);
outerloop<<<dimgrid,dimblock>>>(d_V1112,d_Rcca1,d_Ra2,d_H,n_orb1,n_orb2,d_V1222,d_Rcaa2,d_Rc1,d_h);
cudaMemcpy(h_H, d_H, sizeof(double)*N_H, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//f(DEBUG) printf("Launching reduction\n");
reduction<<<dimgridR,dimblockR,sizeof(double)*TR*TR>>>(d_H,d_Hr,(n_orb1*n_orb1*n_orb1));
//gpuErr(cudaPeekAtLastError());
cudaMemcpy(h_Hr, d_Hr, sizeof(double)*N_Hr, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for(int k=0; k<blocks; k++) {
tmp += h_Hr[k];
}
H[n][index] = tmp*sign[n];
}
cudaFree(d_V1112);
cudaFree(d_Rcca1);
cudaFree(d_Ra2);
cudaFree(d_V1222);
cudaFree(d_Rcaa2);
cudaFree(d_Rc1);
cudaFree(d_H);
cudaFree(d_h);
cudaFree(d_Hr);
free(h_Hr);
free(h_H);
//printf("hello ");
}
Double monomer(PyInt n_orb, Double* Rca, Double* Rccaa, Double* h, Double* V)
{
Double H = 0;
//printf("%d N orbibtal: ",n_orb);
PyInt p=0;
for (p=0; p<n_orb; p++)
{
PyInt q=0;
for (q=0; q<n_orb; q++)
{
PyInt r=0;
for ( r=0; r<n_orb; r++)
{
PyInt s=0;
for (s=0; s<n_orb; s++)
{
H += V[((p*n_orb + q)*n_orb + r)*n_orb + s] * Rccaa[((p*n_orb + q)*n_orb + s)*n_orb + r];
}
}
}
}
PyInt p1=0;
for (p1=0; p1<n_orb; p1++)
{
PyInt q1=0;
for (q1=0; q1<n_orb; q1++)
{
H += h[p1*n_orb + q1] * Rca[p1*n_orb + q1];
}
}
return H;
}
Double dimer_2min2pls(PyInt n_orb1, PyInt n_orb2, Double* Rcc1, Double* Raa2, Double* V)
{
Double H = 0;
PyInt p1=0;
for (p1=0; p1<n_orb1; p1++)
{
PyInt q1=0;
for (q1=0; q1<n_orb1; q1++)
{
PyInt r2=0;
for (r2=0; r2<n_orb2; r2++)
{
PyInt s2=0;
for ( s2=0; s2<n_orb2; s2++)
{
H += V[((p1*n_orb1 + q1)*n_orb2 + r2)*n_orb2 + s2] * Rcc1[p1*n_orb1 + q1] * Raa2[s2*n_orb2 + r2];
}
}
}
}
return H;
}
/*
static int count=0;
Double dimer_1min1pls(PyInt n_orb1, PyInt n_orb2, Double* Rc1, Double* Rcca1, Double* Ra2, Double* Rcaa2, Double* h, Double* V1112, Double* V1222, int freevariables)
{
test_wrapper(n_orb1, n_orb2, Rc1, Rcca1, Ra2, Rcaa2, h, V1112, V1222,freevariables);
count++;
//if(count>10)
//exit(0);
}*/
/*
Double dimer_1min1pls(PyInt n_orb1, PyInt n_orb2, Double* Rc1, Double* Rcca1, Double* Ra2, Double* Rcaa2, Double* h, Double* V1112, Double* V1222)
{
Double H = 0;
//printf("%d ord1: ",n_orb1);
//printf(" n_orb2:%d\n ",n_orb2);
//printf(" Rc1:%f \n",*Rc1);
//printf(" Rcca1:%f\n ",*Rcca1);
//printf(" Ra2: %f\n ",*Ra2);
//printf(" h:%f",*h);
//printf(" V1112:%f",* V1112);
PyInt p1=0;
for ( p1=0; p1<n_orb1; p1++)
{
PyInt q1=0;
for ( q1=0; q1<n_orb1; q1++)
{
PyInt r1=0;
for ( r1=0; r1<n_orb1; r1++)
{
PyInt s2=0;
for ( s2=0; s2<n_orb2; s2++)
{
H += V1112[((p1*n_orb1 + q1)*n_orb1 + r1)*n_orb2 + s2] * Rcca1[(q1*n_orb1 + p1)*n_orb1 + r1] * Ra2[s2];
}
}
}
}
PyInt p11=0;
for (p11=0; p11<n_orb1; p11++)
{
PyInt q2=0;
for (q2=0; q2<n_orb2; q2++)
{
PyInt r2=0;
for (r2=0; r2<n_orb2; r2++)
{
PyInt s2=0;
for ( s2=0; s2<n_orb2; s2++)
{
H += V1222[((p11*n_orb2 + q2)*n_orb2 + r2)*n_orb2 + s2] * Rc1[p1] * Rcaa2[(q2*n_orb2 + s2)*n_orb2 + r2];
}
}
}
}
H *= 2;
PyInt p12=0;
for (p12=0; p12<n_orb1; p12++)
{
PyInt q22=0;
for (q22=0; q22<n_orb2; q22++)
{
H += h[p12*n_orb2 + q22] * Rc1[p12] * Ra2[q22];
}
}
//printf("%f H: ",H);
return H;
}
*/
Double dimer_00(PyInt n_orb1, PyInt n_orb2, Double* Rca1, Double* Rca2, Double* V)
{
Double H = 0;
PyInt p1=0;
for ( p1=0; p1<n_orb1; p1++)
{
PyInt q2=0;
for ( q2=0; q2<n_orb2; q2++)
{
PyInt r1=0;
for ( r1=0; r1<n_orb1; r1++)
{
PyInt s2=0;
for ( s2=0; s2<n_orb2; s2++)
{
H += V[((p1*n_orb2 + q2)*n_orb1 + r1)*n_orb2 + s2] * Rca1[p1*n_orb1 + r1] * Rca2[q2*n_orb2 + s2];
}
}
}
}
return 4*H;
}
Double dimer_1pls1min(PyInt n_orb1, PyInt n_orb2, Double* Ra1, Double* Rcaa1, Double* Rc2, Double* Rcca2, Double* h, Double* V2221, Double* V2111)
{
Double H = 0;
PyInt p2=0;
for (p2=0; p2<n_orb2; p2++)
{
PyInt q2=0;
for ( q2=0; q2<n_orb2; q2++)
{
PyInt r2=0;
for ( r2=0; r2<n_orb2; r2++)
{
PyInt s1=0;
for ( s1=0; s1<n_orb1; s1++)
{
H += V2221[((p2*n_orb2 + q2)*n_orb2 + r2)*n_orb1 + s1] * Rcca2[(q2*n_orb2 + p2)*n_orb2 + r2] * Ra1[s1];
}
}
}
}
PyInt p22=0;
for (p22=0; p22<n_orb2; p22++)
{
PyInt q1=0;
for ( q1=0; q1<n_orb1; q1++)
{
PyInt r1=0;
for (r1=0; r1<n_orb1; r1++)
{
PyInt s2=0;
for (s2=0; s2<n_orb1; s2++)
{
H += V2111[((p22*n_orb1 + q1)*n_orb1 + r1)*n_orb1 + s2] * Rc2[p22] * Rcaa1[(q1*n_orb1 + s2)*n_orb1 + r1];
}
}
}
}
H *= 2;
PyInt p3=0;
for (p3=0; p3<n_orb2; p3++)
{
PyInt q3=0;
for (q3=0; q3<n_orb1; q3++)
{
H += h[p3*n_orb1 + q3] * Rc2[p3] * Ra1[q3];
}
}
return H;
}
Double dimer_2pls2min(PyInt n_orb1, PyInt n_orb2, Double* Raa1, Double* Rcc2, Double* V)
{
Double H = 0;
PyInt p2=0;
for (p2=0; p2<n_orb2; p2++)
{
PyInt q2=0;
for (q2=0; q2<n_orb2; q2++)
{
PyInt r1=0;
for (r1=0; r1<n_orb1; r1++)
{
PyInt s1=0;
for (s1=0; s1<n_orb1; s1++)
{
H += V[((p2*n_orb2 + q2)*n_orb1 + r1)*n_orb1 + s1] * Rcc2[p2*n_orb2 + q2] * Raa1[s1*n_orb1 + r1];
}
}
}
}
return H;
}
/*
void dimer_1min1pls_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyFloat* sign, PyInt* n_orb1, PyInt* n_orb2, Double** Rc1, Double** Rcca1, Double** Ra2, Double** Rcaa2, Double** h, Double** V1, Double** V2)
{
int n;
for(n=0; n<n_elem; n++) {
int index = i[n]*dim[n]+j[n];
double tmp = 0.0;
int p1,r1,q1,s2;
for(p1=0; p1<n_orb1[n]; p1++) {
for(r1=0; r1<n_orb1[n]; r1++) {
for(q1=0; q1<n_orb1[n]; q1++) {
for( s2=0; s2<n_orb2[n]; s2++) {
tmp += V1[n][((p1*n_orb1[n] + q1)*n_orb1[n] + r1)*n_orb2[n] + s2] * Rcca1[n][(q1*n_orb1[n] + p1)*n_orb1[n] + r1] * Ra2[n][s2];
}
}
}
}
int p11,q2,r2;
for(p11=0; p11<n_orb1[n]; p11++) {
for(q2=0; q2<n_orb2[n]; q2++) {
for(r2=0; r2<n_orb2[n]; r2++) {
for(s2=0; s2<n_orb2[n]; s2++) {
tmp += V2[n][((p11*n_orb2[n] + q2)*n_orb2[n] + r2)*n_orb2[n] + s2] * Rc1[n][p11] * Rcaa2[n][(q2*n_orb2[n] + s2)*n_orb2[n] + r2];
}
}
}
}
tmp *= 2;
int p12,q22;
for(p12=0; p12<n_orb1[n]; p12++) {
for(q22=0; q22<n_orb2[n]; q22++) {
tmp += h[n][p12*n_orb2[n]+q22] * Rc1[n][p12] * Ra2[n][q22];
}
}
H[n][index] = tmp*sign[n];
}
}
*/
void monomer_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyInt* n_orb, Double** Rca, Double** Rccaa, Double** h, Double** V)
{
PyInt n=0;
for ( n=0; n<n_elem; n++)
{
PyInt index = i[n]*dim[n] + j[n];
H[n][index] = monomer(n_orb[n], Rca[n], Rccaa[n], h[n], V[n]);
}
return;
}
void dimer_2min2pls_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyInt* n_orb1, PyInt* n_orb2, Double** Rcc1, Double** Raa2, Double** V)
{
PyInt n=0;
for ( n=0; n<n_elem; n++)
{
PyInt index = i[n]*dim[n] + j[n];
H[n][index] = dimer_2min2pls(n_orb1[n], n_orb2[n], Rcc1[n], Raa2[n], V[n]);
}
return;
}
/*
void dimer_1min1pls_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyFloat* sign, PyInt* n_orb1, PyInt* n_orb2, Double** Rc1, Double** Rcca1, Double** Ra2, Double** Rcaa2, Double** h, Double** V1, Double** V2)
{
int freevariables=0;
PyInt n=0;
#pragma unroll
for ( n=0; n<n_elem; n++)
{
PyInt index = i[n]*dim[n] + j[n];
H[n][index] = sign[n] * dimer_1min1pls(n_orb1[n], n_orb2[n], Rc1[n], Rcca1[n], Ra2[n], Rcaa2[n], h[n], V1[n], V2[n],freevariables);
}
freevariables=1;
dimer_1min1pls(n_orb1[n], n_orb2[n], Rc1[n], Rcca1[n], Ra2[n], Rcaa2[n], h[n], V1[n], V2[n],freevariables);
return;
}
*/
/*
void dimer_1min1pls_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyFloat* sign, PyInt* n_orb1, PyInt* n_orb2, Double** Rc1, Double** Rcca1, Double** Ra2, Double** Rcaa2, Double** h, Double** V1, Double** V2)
{
test_wrapper( n_elem,H,i, j,dim,sign,n_orb1,n_orb2,Rc1,Rcca1,Ra2,Rcaa2, h,V1,V2);
//return
}
*/
void dimer_00_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyInt* n_orb1, PyInt* n_orb2, Double** Rca1, Double** Rca2, Double** V)
{
PyInt n=0;
for ( n=0; n<n_elem; n++)
{
PyInt index = i[n]*dim[n] + j[n];
H[n][index] = dimer_00(n_orb1[n], n_orb2[n], Rca1[n], Rca2[n], V[n]);
}
return;
}
void dimer_1pls1min_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyFloat* sign, PyInt* n_orb1, PyInt* n_orb2, Double** Ra1, Double** Rcaa1, Double** Rc2, Double** Rcca2, Double** h, Double** V1, Double** V2)
{
PyInt n=0;
for ( n=0; n<n_elem; n++)
{
PyInt index = i[n]*dim[n] + j[n];
H[n][index] = sign[n] * dimer_1pls1min(n_orb1[n], n_orb2[n], Ra1[n], Rcaa1[n], Rc2[n], Rcca2[n], h[n], V1[n], V2[n]);
}
return;
}
void dimer_2pls2min_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyInt* n_orb1, PyInt* n_orb2, Double** Raa1, Double** Rcc2, Double** V)
{
PyInt n=0;
for ( n=0; n<n_elem; n++)
{
PyInt index = i[n]*dim[n] + j[n];
H[n][index] = dimer_2pls2min(n_orb1[n], n_orb2[n], Raa1[n], Rcc2[n], V[n]);
}
return;
}
}
|
094b0dd474e9f1808b6df94d051c2948e4aab6bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//Based on http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/bitonic/bitonicen.htm
#include <assert.h>
#include <cutil_inline.h>
#include "sortingNetworks_common.h"
#include "sortingNetworks_common.cuh"
////////////////////////////////////////////////////////////////////////////////
// Monolithic bitonic sort kernel for short arrays fitting into shared memory
////////////////////////////////////////////////////////////////////////////////
__global__ void bitonicSortShared(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint dir
){
//Shared memory storage for one or more short vectors
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
//Offset to the beginning of subbatch and load data
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
for(uint size = 2; size < arrayLength; size <<= 1){
//Bitonic merge
uint ddd = dir ^ ( (threadIdx.x & (size / 2)) != 0 );
for(uint stride = size / 2; stride > 0; stride >>= 1){
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
}
//ddd == dir for the last bitonic merge step
{
for(uint stride = arrayLength / 2; stride > 0; stride >>= 1){
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
dir
);
}
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
////////////////////////////////////////////////////////////////////////////////
// Bitonic sort kernel for large arrays (not fitting into shared memory)
////////////////////////////////////////////////////////////////////////////////
//Bottom-level bitonic sort
//Almost the same as bitonicSortShared with the exception of
//even / odd subarrays being sorted in opposite directions
//Bitonic merge accepts both
//Ascending | descending or descending | ascending sorted pairs
__global__ void bitonicSortShared1(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal
){
//Shared memory storage for current subarray
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
//Offset to the beginning of subarray and load data
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
for(uint size = 2; size < SHARED_SIZE_LIMIT; size <<= 1){
//Bitonic merge
uint ddd = (threadIdx.x & (size / 2)) != 0;
for(uint stride = size / 2; stride > 0; stride >>= 1){
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
}
//Odd / even arrays of SHARED_SIZE_LIMIT elements
//sorted in opposite directions
uint ddd = blockIdx.x & 1;
{
for(uint stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1){
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
//Bitonic merge iteration for stride >= SHARED_SIZE_LIMIT
__global__ void bitonicMergeGlobal(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint size,
uint stride,
uint dir
){
uint global_comparatorI = blockIdx.x * blockDim.x + threadIdx.x;
uint comparatorI = global_comparatorI & (arrayLength / 2 - 1);
//Bitonic merge
uint ddd = dir ^ ( (comparatorI & (size / 2)) != 0 );
uint pos = 2 * global_comparatorI - (global_comparatorI & (stride - 1));
uint keyA = d_SrcKey[pos + 0];
uint valA = d_SrcVal[pos + 0];
uint keyB = d_SrcKey[pos + stride];
uint valB = d_SrcVal[pos + stride];
Comparator(
keyA, valA,
keyB, valB,
ddd
);
d_DstKey[pos + 0] = keyA;
d_DstVal[pos + 0] = valA;
d_DstKey[pos + stride] = keyB;
d_DstVal[pos + stride] = valB;
}
//Combined bitonic merge steps for
//size > SHARED_SIZE_LIMIT and stride = [1 .. SHARED_SIZE_LIMIT / 2]
__global__ void bitonicMergeShared(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint size,
uint dir
){
//Shared memory storage for current subarray
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
//Bitonic merge
uint comparatorI = UMAD(blockIdx.x, blockDim.x, threadIdx.x) & ((arrayLength / 2) - 1);
uint ddd = dir ^ ( (comparatorI & (size / 2)) != 0 );
for(uint stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1){
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
//Helper function (also used by odd-even merge sort)
extern "C" uint factorRadix2(uint *log2L, uint L){
if(!L){
*log2L = 0;
return 0;
}else{
for(*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++);
return L;
}
}
extern "C" uint bitonicSort(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint batchSize,
uint arrayLength,
uint dir
){
//Nothing to sort
if(arrayLength < 2)
return 0;
//Only power-of-two array lengths are supported by this implementation
uint log2L;
uint factorizationRemainder = factorRadix2(&log2L, arrayLength);
assert( factorizationRemainder == 1 );
dir = (dir != 0);
uint blockCount = batchSize * arrayLength / SHARED_SIZE_LIMIT;
uint threadCount = SHARED_SIZE_LIMIT / 2;
if(arrayLength <= SHARED_SIZE_LIMIT){
assert( (batchSize * arrayLength) % SHARED_SIZE_LIMIT == 0 );
hipLaunchKernelGGL(( bitonicSortShared), dim3(blockCount), dim3(threadCount), 0, 0, d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, dir);
}else{
hipLaunchKernelGGL(( bitonicSortShared1), dim3(blockCount), dim3(threadCount), 0, 0, d_DstKey, d_DstVal, d_SrcKey, d_SrcVal);
for(uint size = 2 * SHARED_SIZE_LIMIT; size <= arrayLength; size <<= 1)
for(unsigned stride = size / 2; stride > 0; stride >>= 1)
if(stride >= SHARED_SIZE_LIMIT){
hipLaunchKernelGGL(( bitonicMergeGlobal), dim3((batchSize * arrayLength) / 512), dim3(256), 0, 0, d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, stride, dir);
}else{
hipLaunchKernelGGL(( bitonicMergeShared), dim3(blockCount), dim3(threadCount), 0, 0, d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, dir);
break;
}
}
return threadCount;
}
| 094b0dd474e9f1808b6df94d051c2948e4aab6bc.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//Based on http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/bitonic/bitonicen.htm
#include <assert.h>
#include <cutil_inline.h>
#include "sortingNetworks_common.h"
#include "sortingNetworks_common.cuh"
////////////////////////////////////////////////////////////////////////////////
// Monolithic bitonic sort kernel for short arrays fitting into shared memory
////////////////////////////////////////////////////////////////////////////////
__global__ void bitonicSortShared(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint dir
){
//Shared memory storage for one or more short vectors
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
//Offset to the beginning of subbatch and load data
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
for(uint size = 2; size < arrayLength; size <<= 1){
//Bitonic merge
uint ddd = dir ^ ( (threadIdx.x & (size / 2)) != 0 );
for(uint stride = size / 2; stride > 0; stride >>= 1){
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
}
//ddd == dir for the last bitonic merge step
{
for(uint stride = arrayLength / 2; stride > 0; stride >>= 1){
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
dir
);
}
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
////////////////////////////////////////////////////////////////////////////////
// Bitonic sort kernel for large arrays (not fitting into shared memory)
////////////////////////////////////////////////////////////////////////////////
//Bottom-level bitonic sort
//Almost the same as bitonicSortShared with the exception of
//even / odd subarrays being sorted in opposite directions
//Bitonic merge accepts both
//Ascending | descending or descending | ascending sorted pairs
__global__ void bitonicSortShared1(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal
){
//Shared memory storage for current subarray
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
//Offset to the beginning of subarray and load data
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
for(uint size = 2; size < SHARED_SIZE_LIMIT; size <<= 1){
//Bitonic merge
uint ddd = (threadIdx.x & (size / 2)) != 0;
for(uint stride = size / 2; stride > 0; stride >>= 1){
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
}
//Odd / even arrays of SHARED_SIZE_LIMIT elements
//sorted in opposite directions
uint ddd = blockIdx.x & 1;
{
for(uint stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1){
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
//Bitonic merge iteration for stride >= SHARED_SIZE_LIMIT
__global__ void bitonicMergeGlobal(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint size,
uint stride,
uint dir
){
uint global_comparatorI = blockIdx.x * blockDim.x + threadIdx.x;
uint comparatorI = global_comparatorI & (arrayLength / 2 - 1);
//Bitonic merge
uint ddd = dir ^ ( (comparatorI & (size / 2)) != 0 );
uint pos = 2 * global_comparatorI - (global_comparatorI & (stride - 1));
uint keyA = d_SrcKey[pos + 0];
uint valA = d_SrcVal[pos + 0];
uint keyB = d_SrcKey[pos + stride];
uint valB = d_SrcVal[pos + stride];
Comparator(
keyA, valA,
keyB, valB,
ddd
);
d_DstKey[pos + 0] = keyA;
d_DstVal[pos + 0] = valA;
d_DstKey[pos + stride] = keyB;
d_DstVal[pos + stride] = valB;
}
//Combined bitonic merge steps for
//size > SHARED_SIZE_LIMIT and stride = [1 .. SHARED_SIZE_LIMIT / 2]
__global__ void bitonicMergeShared(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint size,
uint dir
){
//Shared memory storage for current subarray
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
//Bitonic merge
uint comparatorI = UMAD(blockIdx.x, blockDim.x, threadIdx.x) & ((arrayLength / 2) - 1);
uint ddd = dir ^ ( (comparatorI & (size / 2)) != 0 );
for(uint stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1){
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
//Helper function (also used by odd-even merge sort)
extern "C" uint factorRadix2(uint *log2L, uint L){
if(!L){
*log2L = 0;
return 0;
}else{
for(*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++);
return L;
}
}
extern "C" uint bitonicSort(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint batchSize,
uint arrayLength,
uint dir
){
//Nothing to sort
if(arrayLength < 2)
return 0;
//Only power-of-two array lengths are supported by this implementation
uint log2L;
uint factorizationRemainder = factorRadix2(&log2L, arrayLength);
assert( factorizationRemainder == 1 );
dir = (dir != 0);
uint blockCount = batchSize * arrayLength / SHARED_SIZE_LIMIT;
uint threadCount = SHARED_SIZE_LIMIT / 2;
if(arrayLength <= SHARED_SIZE_LIMIT){
assert( (batchSize * arrayLength) % SHARED_SIZE_LIMIT == 0 );
bitonicSortShared<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, dir);
}else{
bitonicSortShared1<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal);
for(uint size = 2 * SHARED_SIZE_LIMIT; size <= arrayLength; size <<= 1)
for(unsigned stride = size / 2; stride > 0; stride >>= 1)
if(stride >= SHARED_SIZE_LIMIT){
bitonicMergeGlobal<<<(batchSize * arrayLength) / 512, 256>>>(d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, stride, dir);
}else{
bitonicMergeShared<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, dir);
break;
}
}
return threadCount;
}
|
3456e5ec0995e268265348b4c9c5493df1f95851.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hiprand/hiprand_kernel.h>
#define PI 3.141592654f
surface<void, cudaSurfaceType2D> surfaceWrite;
__global__ void kernel(hiprandState_t * randStates) {
//init rand
unsigned long long id = threadIdx.x + blockIdx.x * blockDim.x;
hiprand_init(1000, id, id, &randStates[id]);
//get two uniform random floats
float S1 = hiprand_uniform(&randStates[id]);
float S2 = hiprand_uniform(&randStates[id]);
//now write to output surface
ushort4 output;
output.x = __float2half_rn (S1);
output.y = __float2half_rn(0.5 * sin(2.0 * PI * S2) + 0.5); //scale down to between [0..1]
output.z = __float2half_rn(0.5 * cos(2.0 * PI * S2) + 0.5); //scale down to between [0..1]
output.w = __float2half_rn(S1 * S1);
int x = threadIdx.x;
int y = blockIdx.x;
surf2Dwrite(output, surfaceWrite, x * sizeof(ushort4), y);
}
extern "C" void cudaGenerateNoiseMapKernel(hipArray_t outputArray, unsigned int width, unsigned int height) {
hipError_t err;
hiprandState_t * randStates;
//alloc hiprand states
err = hipMalloc(&randStates, width * height * sizeof(hiprandState_t));
//bind array to global surface
err = hipBindSurfaceToArray(surfaceWrite, outputArray);
//call kernel
kernel << < width, height >> > (randStates);
//clean up
err = hipFree(randStates);
} | 3456e5ec0995e268265348b4c9c5493df1f95851.cu | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <curand_kernel.h>
#define PI 3.141592654f
surface<void, cudaSurfaceType2D> surfaceWrite;
__global__ void kernel(curandState * randStates) {
//init rand
unsigned long long id = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(1000, id, id, &randStates[id]);
//get two uniform random floats
float S1 = curand_uniform(&randStates[id]);
float S2 = curand_uniform(&randStates[id]);
//now write to output surface
ushort4 output;
output.x = __float2half_rn (S1);
output.y = __float2half_rn(0.5 * sin(2.0 * PI * S2) + 0.5); //scale down to between [0..1]
output.z = __float2half_rn(0.5 * cos(2.0 * PI * S2) + 0.5); //scale down to between [0..1]
output.w = __float2half_rn(S1 * S1);
int x = threadIdx.x;
int y = blockIdx.x;
surf2Dwrite(output, surfaceWrite, x * sizeof(ushort4), y);
}
extern "C" void cudaGenerateNoiseMapKernel(cudaArray_t outputArray, unsigned int width, unsigned int height) {
cudaError_t err;
curandState * randStates;
//alloc curand states
err = cudaMalloc(&randStates, width * height * sizeof(curandState));
//bind array to global surface
err = cudaBindSurfaceToArray(surfaceWrite, outputArray);
//call kernel
kernel << < width, height >> > (randStates);
//clean up
err = cudaFree(randStates);
} |
09e7485887e3e47568920f27f0b3728b75c0339f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_3d_layer_hessian_cuda_fermi.h"
#include <hip/hip_runtime.h>
#include <boost/format.hpp>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "packed_config.h"
#include "space_filling_curve.h"
#include "../convolution_layer.h"
texture<float, hipTextureType1D, hipReadModeElementType> input_tex_ref;
texture<float, hipTextureType1D, hipReadModeElementType> output_tex_ref;
texture<float, hipTextureType1D, hipReadModeElementType> input_squared_tex_ref;
#define FEATURE_MAP_BLOCK_SIZE 4
#define WINDOW_WIDTH_LOCAL 4
namespace nnforge
{
namespace cuda
{
template<int BLOCK_SIZE>
__global__ void convolution_3d_tex_blocked_hess_kernel_fermi(
float * __restrict output,
const float * __restrict weights,
const float * __restrict biases,
const packed_config<3> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int packed_config_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE;
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < output_width) && (packed_config_id < packed_config_count);
if (in_bounds)
{
int weight_count_per_output_feature_map = (window_width * window_height) * (window_depth * input_feature_map_count);
packed_config<3> conf = packed_config_list[packed_config_id];
int y = conf.get_val(0);
int z = conf.get_val(1);
int output_feature_map_id = conf.get_val(2);
int input_elem_id = ((entry_id * input_feature_map_count * input_depth + z) * input_height + y) * input_width + x;
const float * current_weights = weights + (int)(weight_count_per_output_feature_map * output_feature_map_id);
float bias_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
bias_list[i] = biases[output_feature_map_id + i];
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = bias_list[i];
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? weight_count_per_output_feature_map * i : 0;
for(int input_layer_id = 0; input_layer_id < input_feature_map_count; ++input_layer_id)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll 4
for(int input_x = 0; input_x < window_width; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch(input_tex_ref, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
current_weights++;
input_elem_id++;
} // for input_x
input_elem_id += input_width - window_width;
} // for input_y
input_elem_id += input_width * (input_height - window_height);
} // for input_z
input_elem_id += input_height * input_width * (input_depth - window_depth);
}
float * base_output = output + (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_depth * output_height * output_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[j + output_neuron_count_per_feature_map * i] = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE>
__global__ void convolution_3d_tex_exact_blocked_hess_kernel_fermi(
float * __restrict output,
const float * __restrict weights,
const float * __restrict biases,
const packed_config<3> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int packed_config_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE;
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < output_width) && (packed_config_id < packed_config_count);
if (in_bounds)
{
int weight_count_per_output_feature_map = (WINDOW_WIDTH * window_height) * (window_depth * input_feature_map_count);
packed_config<3> conf = packed_config_list[packed_config_id];
int y = conf.get_val(0);
int z = conf.get_val(1);
int output_feature_map_id = conf.get_val(2);
int input_elem_id = ((entry_id * input_feature_map_count * input_depth + z) * input_height + y) * input_width + x;
const float * current_weights = weights + (int)(weight_count_per_output_feature_map * output_feature_map_id);
float bias_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
bias_list[i] = biases[output_feature_map_id + i];
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = bias_list[i];
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? weight_count_per_output_feature_map * i : 0;
for(int input_layer_id = 0; input_layer_id < input_feature_map_count; ++input_layer_id)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch(input_tex_ref, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
current_weights++;
input_elem_id++;
} // for input_x
input_elem_id += input_width - WINDOW_WIDTH;
} // for input_y
input_elem_id += input_width * (input_height - window_height);
} // for input_z
input_elem_id += input_height * input_width * (input_depth - window_depth);
}
float * base_output = output + (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_depth * output_height * output_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[j + output_neuron_count_per_feature_map * i] = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
extern __shared__ float arr[];
__global__ void convolution_3d_update_biases_hess_kernel_fermi(
float * __restrict hessian_biases,
const float * __restrict output_errors,
int block_size,
int output_elem_count_per_feature_map,
int output_feature_map_count,
int entry_count)
{
int output_neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int output_feature_map_id = blockIdx.y;
int block_id = blockIdx.z * blockDim.z + threadIdx.z;
int base_entry_id = block_size * block_id;
int thread_id = blockDim.x * threadIdx.z + threadIdx.x;
int threadblock_size = blockDim.x * blockDim.z;
float sum = 0.0F;
int iteration_count = min(entry_count - base_entry_id, block_size);
if (output_neuron_id < output_elem_count_per_feature_map)
{
const float * current_error = output_errors + (base_entry_id * output_feature_map_count + output_feature_map_id) * output_elem_count_per_feature_map + output_neuron_id;
int output_elem_count_per_entry = output_elem_count_per_feature_map * output_feature_map_count;
for(int i = 0; i < iteration_count; ++i)
{
sum += *current_error;
current_error += output_elem_count_per_entry;
}
}
arr[thread_id] = sum;
__syncthreads();
int t_add_elems = threadblock_size >> 1;
int t_working_elems = (threadblock_size + 1) >> 1;
while (t_add_elems > 0)
{
if (thread_id < t_add_elems)
arr[thread_id] += arr[thread_id + t_working_elems];
t_add_elems = t_working_elems >> 1;
t_working_elems = (t_working_elems + 1) >> 1;
__syncthreads();
}
if (thread_id == 0)
atomicAdd(hessian_biases + output_feature_map_id, arr[0]);
}
template<int BLOCK_SIZE>
__global__ void convolution_3d_square_deriviative_tex_hess_kernel_fermi(
float * __restrict input_errors,
const float * __restrict weights_squared,
const packed_config<3> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int packed_config_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE + (BLOCK_SIZE - 1);
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < input_width + (BLOCK_SIZE - 1)) && (packed_config_id < packed_config_count);
if (in_bounds)
{
int weight_count_per_input_feature_map = window_depth * window_height * window_width;
packed_config<3> conf = packed_config_list[packed_config_id];
int y = conf.get_val(0);
int z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_elem_id = ((entry_id * output_feature_map_count * output_depth + z) * output_height + y) * output_width + x;
const float * current_weights = weights_squared + (int)(weight_count_per_input_feature_map * input_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < input_feature_map_count - input_feature_map_id) ? weight_count_per_input_feature_map * i : 0;
int min_z_exclusive = z - output_depth;
int max_z_inclusive = z;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
for(int output_layer_id = 0; output_layer_id < output_feature_map_count; ++output_layer_id)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
bool b_fit_z = (input_z > min_z_exclusive) && (input_z <= max_z_inclusive);
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit_y = b_fit_z && (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
int input_x = 0;
#pragma unroll 1
for(; input_x < (window_width - (WINDOW_WIDTH_LOCAL - 1)); input_x += WINDOW_WIDTH_LOCAL)
{
float output_vals[BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1; ++i)
{
bool b_fit_x = b_fit_y && (i > min_x_exclusive) && (i <= max_x_inclusive);;
if (b_fit_x)
output_vals[i] = tex1Dfetch(output_tex_ref, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
output_elem_id -= WINDOW_WIDTH_LOCAL;
#pragma unroll
for(int input_x_local = 0; input_x_local < WINDOW_WIDTH_LOCAL; ++input_x_local)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x_local + j] * weight_list[i];
}
current_weights++;
}
}
#pragma unroll 1
for(; input_x < window_width; ++input_x)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
bool b_fit_x = b_fit_y && (input_x + j > min_x_exclusive) && (input_x + j <= max_x_inclusive);
if (b_fit_x)
{
float inp = tex1Dfetch(output_tex_ref, output_elem_id - j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * current_weights[weight_offsets[i]];
}
}
current_weights++;
output_elem_id--;
}
output_elem_id += window_width - output_width;
} // for input_y
output_elem_id += output_width * (window_height - output_height);
} // for input_z
output_elem_id += output_width * output_height * (output_depth + window_depth);
current_weights += weight_count_per_input_feature_map * (input_feature_map_count - 1);
}
float * base_input = input_errors + (((entry_id * input_feature_map_count + input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_depth * input_height * input_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE>
__global__ void convolution_3d_square_deriviative_tex_exact_hess_kernel_fermi(
float * __restrict input_errors,
const float * __restrict weights_squared,
const packed_config<3> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int packed_config_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE + (BLOCK_SIZE - 1);
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < input_width + (BLOCK_SIZE - 1)) && (packed_config_id < packed_config_count);
if (in_bounds)
{
int weight_count_per_input_feature_map = window_depth * window_height * WINDOW_WIDTH;
packed_config<3> conf = packed_config_list[packed_config_id];
int y = conf.get_val(0);
int z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_elem_id = ((entry_id * output_feature_map_count * output_depth + z) * output_height + y) * output_width + x;
const float * current_weights = weights_squared + (int)(weight_count_per_input_feature_map * input_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < input_feature_map_count - input_feature_map_id) ? weight_count_per_input_feature_map * i : 0;
int min_z_exclusive = z - output_depth;
int max_z_inclusive = z;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
unsigned int mask = 0;
for(int i = BLOCK_SIZE + WINDOW_WIDTH - 2; i >= 0; --i)
mask = mask << 1 | (((i > min_x_exclusive) && (i <= max_x_inclusive)) ? 1 : 0);
for(int output_layer_id = 0; output_layer_id < output_feature_map_count; ++output_layer_id)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
bool b_fit_z = (input_z > min_z_exclusive) && (input_z <= max_z_inclusive);
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit_y = b_fit_z && (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
float output_vals[BLOCK_SIZE + WINDOW_WIDTH - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH - 1; ++i)
{
bool b_fit_x = b_fit_y && (((1 << i) & mask) != 0);
if (b_fit_x)
output_vals[i] = tex1Dfetch(output_tex_ref, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x + j] * weight_list[i];
}
current_weights++;
}
output_elem_id -= output_width;
} // for input_y
output_elem_id += output_width * (window_height - output_height);
} // for input_z
output_elem_id += output_width * output_height * (output_depth + window_depth);
current_weights += weight_count_per_input_feature_map * (input_feature_map_count - 1);
}
float * base_input = input_errors + (((entry_id * input_feature_map_count + input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_depth * input_height * input_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
__global__ void convolution_3d_update_weights_hess_kernel_fermi(
float * __restrict hessian_weights,
const float * __restrict output_errors,
const packed_config<6> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int block_size,
int packed_config_count)
{
int weight_x = (blockIdx.x * blockDim.x + threadIdx.x) * WINDOW_WIDTH_LOCAL;
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int base_entry_id = (blockIdx.z * blockDim.z + threadIdx.z) * block_size;
bool in_bounds = (packed_config_id < packed_config_count) && (weight_x < window_width) && (base_entry_id < entry_count);
if (in_bounds)
{
int output_neuron_count_per_feature_map = output_depth * output_width * output_height;
packed_config<6> conf = packed_config_list[packed_config_id];
int weight_y = conf.get_val(0);
int weight_z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_y = conf.get_val(3);
int output_z = conf.get_val(4);
int output_feature_map_id = conf.get_val(5);
int iteration_count = min(block_size, entry_count - base_entry_id);
const float * current_output_errors = output_errors + (((base_entry_id * output_feature_map_count + output_feature_map_id) * output_depth + output_z) * output_height + output_y) * output_width;
int input_elem_id = (((base_entry_id * input_feature_map_count + input_feature_map_id) * input_depth + output_z + weight_z) * input_height + output_y + weight_y) * input_width + weight_x;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL; ++i)
sums[i] = 0.0F;
int output_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? output_neuron_count_per_feature_map * i : 0;
for(int t = 0; t < iteration_count; ++t)
{
float input_squared_buf[WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH_LOCAL; ++i)
{
input_squared_buf[i] = tex1Dfetch(input_squared_tex_ref, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = current_output_errors[output_offsets[i]];
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH_LOCAL - 1; ++i)
input_squared_buf[i] = input_squared_buf[i + 1];
input_squared_buf[WINDOW_WIDTH_LOCAL - 1] = tex1Dfetch(input_squared_tex_ref, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
sums[i * WINDOW_WIDTH_LOCAL + j] += output_error_list[i] * input_squared_buf[j];
current_output_errors++;
input_elem_id++;
}
current_output_errors += (output_feature_map_count * output_height * output_depth - 1) * output_width;
input_elem_id += (input_feature_map_count * input_height * input_depth - 1) * input_width + (window_width - WINDOW_WIDTH_LOCAL);
}
float * base_weights = hessian_weights + (((output_feature_map_id * input_feature_map_count + input_feature_map_id) * window_depth + weight_z) * window_height + weight_y) * window_width + weight_x;
int weight_count_per_output_feature_map = input_feature_map_count * window_depth * window_height * window_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
atomicAdd(base_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH_LOCAL + j]);
}
}
}
}
template<int WINDOW_WIDTH>
__global__ void convolution_3d_update_weights_exact_hess_kernel_fermi(
float * __restrict hessian_weights,
const float * __restrict output_errors,
const packed_config<6> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int block_size,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int base_entry_id = (blockIdx.y * blockDim.y + threadIdx.y) * block_size;
bool in_bounds = (packed_config_id < packed_config_count) && (base_entry_id < entry_count);
if (in_bounds)
{
int output_neuron_count_per_feature_map = output_depth * output_width * output_height;
packed_config<6> conf = packed_config_list[packed_config_id];
int weight_y = conf.get_val(0);
int weight_z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_y = conf.get_val(3);
int output_z = conf.get_val(4);
int output_feature_map_id = conf.get_val(5);
int iteration_count = min(block_size, entry_count - base_entry_id);
const float * current_output_errors = output_errors + (((base_entry_id * output_feature_map_count + output_feature_map_id) * output_depth + output_z) * output_height + output_y) * output_width;
int input_elem_id = (((base_entry_id * input_feature_map_count + input_feature_map_id) * input_depth + output_z + weight_z) * input_height + output_y + weight_y) * input_width;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH; ++i)
sums[i] = 0.0F;
int output_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? output_neuron_count_per_feature_map * i : 0;
for(int t = 0; t < iteration_count; ++t)
{
float input_squared_buf[WINDOW_WIDTH];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
input_squared_buf[i] = tex1Dfetch(input_squared_tex_ref, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = current_output_errors[output_offsets[i]];
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH - 1; ++i)
input_squared_buf[i] = input_squared_buf[i + 1];
input_squared_buf[WINDOW_WIDTH - 1] = tex1Dfetch(input_squared_tex_ref, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
sums[i * WINDOW_WIDTH + j] += output_error_list[i] * input_squared_buf[j];
current_output_errors++;
input_elem_id++;
}
current_output_errors += (output_feature_map_count * output_height * output_depth - 1) * output_width;
input_elem_id += (input_feature_map_count * input_height * input_depth - 1) * input_width;
}
float * base_weights = hessian_weights + (((output_feature_map_id * input_feature_map_count + input_feature_map_id) * window_depth + weight_z) * window_height + weight_y) * WINDOW_WIDTH;
int weight_count_per_output_feature_map = input_feature_map_count * window_depth * window_height * WINDOW_WIDTH;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
atomicAdd(base_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH + j]);
}
}
}
}
convolution_3d_layer_hessian_cuda_fermi::convolution_3d_layer_hessian_cuda_fermi()
{
input_tex_ref.addressMode[0] = hipAddressModeBorder;
input_tex_ref.normalized = false;
output_tex_ref.addressMode[0] = hipAddressModeBorder;
output_tex_ref.normalized = false;
input_squared_tex_ref.addressMode[0] = hipAddressModeBorder;
input_squared_tex_ref.normalized = false;
}
convolution_3d_layer_hessian_cuda_fermi::~convolution_3d_layer_hessian_cuda_fermi()
{
}
#define MAX_BLOCK_SIZE 5
#define MAX_WINDOW_WIDTH 10
#define launch_exact_kernel_const_const(window_width_const, block_size_const) \
hipLaunchKernelGGL(( convolution_3d_tex_exact_blocked_hess_kernel_fermi<window_width_const,block_size_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_neurons_buffer, *data[0], *data[1], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, packed_config_count);
#define launch_exact_kernel_const(window_width, block_size_const) \
switch (window_width) \
{ \
case 1: \
launch_exact_kernel_const_const(1, block_size_const); \
break; \
case 2: \
launch_exact_kernel_const_const(2, block_size_const); \
break; \
case 3: \
launch_exact_kernel_const_const(3, block_size_const); \
break; \
case 4: \
launch_exact_kernel_const_const(4, block_size_const); \
break; \
case 5: \
launch_exact_kernel_const_const(5, block_size_const); \
break; \
case 6: \
launch_exact_kernel_const_const(6, block_size_const); \
break; \
case 7: \
launch_exact_kernel_const_const(7, block_size_const); \
break; \
case 8: \
launch_exact_kernel_const_const(8, block_size_const); \
break; \
case 9: \
launch_exact_kernel_const_const(9, block_size_const); \
break; \
case 10: \
launch_exact_kernel_const_const(10, block_size_const); \
break; \
};
#define launch_exact_kernel(window_width, block_size) \
switch (block_size) \
{ \
case 1: \
launch_exact_kernel_const(window_width, 1); \
break; \
case 2: \
launch_exact_kernel_const(window_width, 2); \
break; \
case 3: \
launch_exact_kernel_const(window_width, 3); \
break; \
case 4: \
launch_exact_kernel_const(window_width, 4); \
break; \
case 5: \
launch_exact_kernel_const(window_width, 5); \
break; \
};
#define launch_kernel_const(block_size_const) \
hipLaunchKernelGGL(( convolution_3d_tex_blocked_hess_kernel_fermi<block_size_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_neurons_buffer, *data[0], *data[1], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, packed_config_count);
#define launch_kernel(block_size) \
switch (block_size) \
{ \
case 1: \
launch_kernel_const(1); \
break; \
case 2: \
launch_kernel_const(2); \
break; \
case 3: \
launch_kernel_const(3); \
break; \
case 4: \
launch_kernel_const(4); \
break; \
case 5: \
launch_kernel_const(5); \
break; \
};
#define launch_backprop_exact_kernel_const_const(window_width_const, block_size_const) \
hipLaunchKernelGGL(( convolution_3d_square_deriviative_tex_exact_hess_kernel_fermi<window_width_const,block_size_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *data_squared[0], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, packed_config_count);
#define launch_backprop_exact_kernel_const(window_width, block_size_const) \
switch (window_width) \
{ \
case 1: \
launch_backprop_exact_kernel_const_const(1, block_size_const); \
break; \
case 2: \
launch_backprop_exact_kernel_const_const(2, block_size_const); \
break; \
case 3: \
launch_backprop_exact_kernel_const_const(3, block_size_const); \
break; \
case 4: \
launch_backprop_exact_kernel_const_const(4, block_size_const); \
break; \
case 5: \
launch_backprop_exact_kernel_const_const(5, block_size_const); \
break; \
case 6: \
launch_backprop_exact_kernel_const_const(6, block_size_const); \
break; \
case 7: \
launch_backprop_exact_kernel_const_const(7, block_size_const); \
break; \
case 8: \
launch_backprop_exact_kernel_const_const(8, block_size_const); \
break; \
case 9: \
launch_backprop_exact_kernel_const_const(9, block_size_const); \
break; \
case 10: \
launch_backprop_exact_kernel_const_const(10, block_size_const); \
break; \
};
#define launch_backprop_exact_kernel(window_width, block_size) \
switch (block_size) \
{ \
case 1: \
launch_backprop_exact_kernel_const(window_width, 1); \
break; \
case 2: \
launch_backprop_exact_kernel_const(window_width, 2); \
break; \
case 3: \
launch_backprop_exact_kernel_const(window_width, 3); \
break; \
case 4: \
launch_backprop_exact_kernel_const(window_width, 4); \
break; \
case 5: \
launch_backprop_exact_kernel_const(window_width, 5); \
break; \
};
#define launch_backprop_kernel_const(block_size_const) \
hipLaunchKernelGGL(( convolution_3d_square_deriviative_tex_hess_kernel_fermi<block_size_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *data_squared[0], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, packed_config_count);
#define launch_backprop_kernel(block_size) \
switch (block_size) \
{ \
case 1: \
launch_backprop_kernel_const(1); \
break; \
case 2: \
launch_backprop_kernel_const(2); \
break; \
case 3: \
launch_backprop_kernel_const(3); \
break; \
case 4: \
launch_backprop_kernel_const(4); \
break; \
case 5: \
launch_backprop_kernel_const(5); \
break; \
};
#define launch_update_weights_exact_kernel_const(window_width_const) \
hipLaunchKernelGGL(( convolution_3d_update_weights_exact_hess_kernel_fermi<window_width_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *hessian_data[0], *output_errors_buffer, packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, block_size, packed_config_count);
#define launch_update_weights_exact_kernel(window_width) \
switch (window_width) \
{ \
case 1: \
launch_update_weights_exact_kernel_const(1); \
break; \
case 2: \
launch_update_weights_exact_kernel_const(2); \
break; \
case 3: \
launch_update_weights_exact_kernel_const(3); \
break; \
case 4: \
launch_update_weights_exact_kernel_const(4); \
break; \
case 5: \
launch_update_weights_exact_kernel_const(5); \
break; \
case 6: \
launch_update_weights_exact_kernel_const(6); \
break; \
case 7: \
launch_update_weights_exact_kernel_const(7); \
break; \
case 8: \
launch_update_weights_exact_kernel_const(8); \
break; \
case 9: \
launch_update_weights_exact_kernel_const(9); \
break; \
case 10: \
launch_update_weights_exact_kernel_const(10); \
break; \
};
void convolution_3d_layer_hessian_cuda_fermi::enqueue_test(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
cuda_safe_call(hipBindTexture(0, input_tex_ref, *input_neurons_buffer, desc, input_elem_count_per_entry * entry_count * sizeof(float)));
int packed_config_count = output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * forward_output_feature_map_block_count;
const packed_config<3> * packed_config_list = static_cast<const packed_config<3> *>((const void *)*additional_buffers[1]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
forward_x_block_count,
packed_config_count,
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
launch_exact_kernel(window_sizes[0], forward_x_block_size);
}
else
{
launch_kernel(forward_x_block_size);
}
}
void convolution_3d_layer_hessian_cuda_fermi::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_squared,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
cuda_safe_call(hipBindTexture(0, output_tex_ref, *output_errors_buffer, desc, output_elem_count_per_entry * entry_count * sizeof(float)));
int packed_config_count = input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2]* backward_input_feature_map_block_count;
const packed_config<3> * packed_config_list = static_cast<const packed_config<3> *>((const void *)*additional_buffers[3]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
backward_x_block_count,
packed_config_count,
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size);
}
else
{
launch_backprop_kernel(backward_x_block_size);
}
}
void convolution_3d_layer_hessian_cuda_fermi::enqueue_update_hessian(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& hessian_data,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
cuda_safe_call(hipBindTexture(0, input_squared_tex_ref, *additional_buffers[0], desc, input_elem_count_per_entry * entry_count * sizeof(float)));
// Update weights
{
// Store input neurons multiplied element-wise by themselves
cuda_util::multiply_by_itself(
*cuda_config,
*input_neurons_buffer,
*additional_buffers[0],
input_elem_count_per_entry * entry_count,
stream_id);
int block_size = get_weights_update_block_size(entry_count);
int block_count = (entry_count + block_size - 1) / block_size;
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
int packed_config_count = window_sizes[1] * window_sizes[2] * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count;
const packed_config<6> * packed_config_list = static_cast<const packed_config<6> *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
packed_config_count,
block_count,
1);
launch_update_weights_exact_kernel(window_sizes[0]);
}
else
{
int packed_config_count = window_sizes[1] * window_sizes[2] * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count;
const packed_config<6> * packed_config_list = static_cast<const packed_config<6> *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
updater_window_x_block_count,
packed_config_count,
block_count);
hipLaunchKernelGGL(( convolution_3d_update_weights_hess_kernel_fermi), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*hessian_data[0],
*output_errors_buffer,
packed_config_list,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
output_configuration_specific.dimension_sizes[2],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
input_configuration_specific.dimension_sizes[2],
window_sizes[0],
window_sizes[1],
window_sizes[2],
input_configuration_specific.feature_map_count,
output_configuration_specific.feature_map_count,
entry_count,
block_size,
packed_config_count);
}
}
// Update biases
{
int block_size = get_bias_update_block_size(entry_count);
int block_count = (entry_count + block_size - 1) / block_size;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
1,
block_count);
kernel_dims.first.y = output_configuration_specific.feature_map_count;
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = threadblock_size * sizeof(float);
hipLaunchKernelGGL(( convolution_3d_update_biases_hess_kernel_fermi), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id,
*hessian_data[1],
*output_errors_buffer,
block_size,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
}
}
int convolution_3d_layer_hessian_cuda_fermi::get_block_size(int width)
{
int block_count = (width + MAX_BLOCK_SIZE - 1) / MAX_BLOCK_SIZE;
int block_size = (width + block_count - 1) / block_count;
return block_size;
}
void convolution_3d_layer_hessian_cuda_fermi::hessian_configured()
{
std::tr1::shared_ptr<const convolution_layer> layer_derived = std::tr1::dynamic_pointer_cast<const convolution_layer>(layer_schema);
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_sizes.push_back(static_cast<int>(*it));
forward_x_block_size = get_block_size(output_configuration_specific.dimension_sizes[0]);
forward_x_block_count = (output_configuration_specific.dimension_sizes[0] + forward_x_block_size - 1) / forward_x_block_size;
forward_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_window_x_block_count = (window_sizes[0] <= MAX_WINDOW_WIDTH) ? 1 : (window_sizes[0] + WINDOW_WIDTH_LOCAL - 1) / WINDOW_WIDTH_LOCAL;
{
std::tr1::array<int, 4> size_list;
size_list[0] = window_sizes[1];
size_list[1] = window_sizes[2];
size_list[2] = input_configuration_specific.feature_map_count;
size_list[3] = output_configuration_specific.dimension_sizes[1];
space_filling_curve<4>::fill_pattern(size_list, updater_config_ordered_list1);
}
{
std::tr1::array<int, 2> size_list;
size_list[0] = output_configuration_specific.dimension_sizes[2];
size_list[1] = updater_output_feature_map_block_count;
space_filling_curve<2>::fill_pattern(size_list, updater_config_ordered_list2);
}
if (backprop_required)
{
backward_x_block_size = get_block_size(input_configuration_specific.dimension_sizes[0]);
backward_x_block_count = (input_configuration_specific.dimension_sizes[0] + backward_x_block_size - 1) / backward_x_block_size;
backward_input_feature_map_block_count = (input_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
}
}
bool convolution_3d_layer_hessian_cuda_fermi::is_in_place_backprop() const
{
return false;
}
std::vector<size_t> convolution_3d_layer_hessian_cuda_fermi::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(input_elem_count_per_entry * sizeof(float));
return res;
}
std::vector<unsigned int> convolution_3d_layer_hessian_cuda_fermi::get_linear_addressing_through_texture_per_entry() const
{
std::vector<unsigned int> res;
res.push_back(input_elem_count_per_entry);
res.push_back(output_elem_count_per_entry);
return res;
}
int convolution_3d_layer_hessian_cuda_fermi::get_bias_update_block_size(int entry_count)
{
int block_size = std::min<int>(std::max<int>(static_cast<int>(sqrtf(static_cast<float>(entry_count))), 1), entry_count);
return block_size;
}
int convolution_3d_layer_hessian_cuda_fermi::get_weights_update_block_size(int entry_count)
{
int block_size = std::min<int>(std::max<int>(static_cast<int>(sqrtf(static_cast<float>(entry_count))), 1), entry_count);
return block_size;
}
std::vector<size_t> convolution_3d_layer_hessian_cuda_fermi::get_sizes_of_additional_buffers_fixed() const
{
std::vector<size_t> res;
res.push_back(sizeof(packed_config<3>) * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * forward_output_feature_map_block_count);
res.push_back(sizeof(packed_config<6>) * window_sizes[1] * window_sizes[2] * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count);
if (backprop_required)
{
res.push_back(sizeof(packed_config<3>) * input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2] * backward_input_feature_map_block_count);
}
return res;
}
void convolution_3d_layer_hessian_cuda_fermi::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
{
std::vector<packed_config<3> > task_list;
packed_config<3> new_elem;
for(int output_feature_map_block_id = 0; output_feature_map_block_id < forward_output_feature_map_block_count; ++output_feature_map_block_id)
{
new_elem.set_val(2, output_feature_map_block_id * FEATURE_MAP_BLOCK_SIZE);
for(int z = 0; z < output_configuration_specific.dimension_sizes[2]; ++z)
{
new_elem.set_val(1, z);
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
{
new_elem.set_val(0, y);
task_list.push_back(new_elem);
}
}
}
cuda_safe_call(hipMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(packed_config<3>) * task_list.size(), hipMemcpyHostToDevice));
}
{
std::vector<packed_config<6> > task_list;
packed_config<6> new_elem;
for(std::vector<std::tr1::array<int, 2> >::const_iterator it2 = updater_config_ordered_list2.begin(); it2 != updater_config_ordered_list2.end(); ++it2)
{
new_elem.set_val(4, it2->at(0));
new_elem.set_val(5, it2->at(1) * FEATURE_MAP_BLOCK_SIZE);
for(std::vector<std::tr1::array<int, 4> >::const_iterator it1 = updater_config_ordered_list1.begin(); it1 != updater_config_ordered_list1.end(); ++it1)
{
new_elem.set_val(0, it1->at(0));
new_elem.set_val(1, it1->at(1));
new_elem.set_val(2, it1->at(2));
new_elem.set_val(3, it1->at(3));
task_list.push_back(new_elem);
}
}
cuda_safe_call(hipMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(packed_config<6>) * task_list.size(), hipMemcpyHostToDevice));
}
if (backprop_required)
{
std::vector<packed_config<3> > task_list;
packed_config<3> new_elem;
for(int input_feature_map_block_id = 0; input_feature_map_block_id < backward_input_feature_map_block_count; ++input_feature_map_block_id)
{
new_elem.set_val(2, input_feature_map_block_id * FEATURE_MAP_BLOCK_SIZE);
for(int z = 0; z < input_configuration_specific.dimension_sizes[2]; ++z)
{
new_elem.set_val(1, z);
for(int y = 0; y < input_configuration_specific.dimension_sizes[1]; ++y)
{
new_elem.set_val(0, y);
task_list.push_back(new_elem);
}
}
}
cuda_safe_call(hipMemcpy(*additional_buffers[3], &(*task_list.begin()), sizeof(packed_config<3>) * task_list.size(), hipMemcpyHostToDevice));
}
}
}
}
| 09e7485887e3e47568920f27f0b3728b75c0339f.cu | /*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_3d_layer_hessian_cuda_fermi.h"
#include <cuda_runtime.h>
#include <boost/format.hpp>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "packed_config.h"
#include "space_filling_curve.h"
#include "../convolution_layer.h"
texture<float, cudaTextureType1D, cudaReadModeElementType> input_tex_ref;
texture<float, cudaTextureType1D, cudaReadModeElementType> output_tex_ref;
texture<float, cudaTextureType1D, cudaReadModeElementType> input_squared_tex_ref;
#define FEATURE_MAP_BLOCK_SIZE 4
#define WINDOW_WIDTH_LOCAL 4
namespace nnforge
{
namespace cuda
{
template<int BLOCK_SIZE>
__global__ void convolution_3d_tex_blocked_hess_kernel_fermi(
float * __restrict output,
const float * __restrict weights,
const float * __restrict biases,
const packed_config<3> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int packed_config_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE;
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < output_width) && (packed_config_id < packed_config_count);
if (in_bounds)
{
int weight_count_per_output_feature_map = (window_width * window_height) * (window_depth * input_feature_map_count);
packed_config<3> conf = packed_config_list[packed_config_id];
int y = conf.get_val(0);
int z = conf.get_val(1);
int output_feature_map_id = conf.get_val(2);
int input_elem_id = ((entry_id * input_feature_map_count * input_depth + z) * input_height + y) * input_width + x;
const float * current_weights = weights + (int)(weight_count_per_output_feature_map * output_feature_map_id);
float bias_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
bias_list[i] = biases[output_feature_map_id + i];
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = bias_list[i];
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? weight_count_per_output_feature_map * i : 0;
for(int input_layer_id = 0; input_layer_id < input_feature_map_count; ++input_layer_id)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll 4
for(int input_x = 0; input_x < window_width; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch(input_tex_ref, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
current_weights++;
input_elem_id++;
} // for input_x
input_elem_id += input_width - window_width;
} // for input_y
input_elem_id += input_width * (input_height - window_height);
} // for input_z
input_elem_id += input_height * input_width * (input_depth - window_depth);
}
float * base_output = output + (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_depth * output_height * output_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[j + output_neuron_count_per_feature_map * i] = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE>
__global__ void convolution_3d_tex_exact_blocked_hess_kernel_fermi(
float * __restrict output,
const float * __restrict weights,
const float * __restrict biases,
const packed_config<3> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int packed_config_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE;
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < output_width) && (packed_config_id < packed_config_count);
if (in_bounds)
{
int weight_count_per_output_feature_map = (WINDOW_WIDTH * window_height) * (window_depth * input_feature_map_count);
packed_config<3> conf = packed_config_list[packed_config_id];
int y = conf.get_val(0);
int z = conf.get_val(1);
int output_feature_map_id = conf.get_val(2);
int input_elem_id = ((entry_id * input_feature_map_count * input_depth + z) * input_height + y) * input_width + x;
const float * current_weights = weights + (int)(weight_count_per_output_feature_map * output_feature_map_id);
float bias_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
bias_list[i] = biases[output_feature_map_id + i];
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = bias_list[i];
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? weight_count_per_output_feature_map * i : 0;
for(int input_layer_id = 0; input_layer_id < input_feature_map_count; ++input_layer_id)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch(input_tex_ref, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
current_weights++;
input_elem_id++;
} // for input_x
input_elem_id += input_width - WINDOW_WIDTH;
} // for input_y
input_elem_id += input_width * (input_height - window_height);
} // for input_z
input_elem_id += input_height * input_width * (input_depth - window_depth);
}
float * base_output = output + (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_depth * output_height * output_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[j + output_neuron_count_per_feature_map * i] = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
extern __shared__ float arr[];
__global__ void convolution_3d_update_biases_hess_kernel_fermi(
float * __restrict hessian_biases,
const float * __restrict output_errors,
int block_size,
int output_elem_count_per_feature_map,
int output_feature_map_count,
int entry_count)
{
int output_neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int output_feature_map_id = blockIdx.y;
int block_id = blockIdx.z * blockDim.z + threadIdx.z;
int base_entry_id = block_size * block_id;
int thread_id = blockDim.x * threadIdx.z + threadIdx.x;
int threadblock_size = blockDim.x * blockDim.z;
float sum = 0.0F;
int iteration_count = min(entry_count - base_entry_id, block_size);
if (output_neuron_id < output_elem_count_per_feature_map)
{
const float * current_error = output_errors + (base_entry_id * output_feature_map_count + output_feature_map_id) * output_elem_count_per_feature_map + output_neuron_id;
int output_elem_count_per_entry = output_elem_count_per_feature_map * output_feature_map_count;
for(int i = 0; i < iteration_count; ++i)
{
sum += *current_error;
current_error += output_elem_count_per_entry;
}
}
arr[thread_id] = sum;
__syncthreads();
int t_add_elems = threadblock_size >> 1;
int t_working_elems = (threadblock_size + 1) >> 1;
while (t_add_elems > 0)
{
if (thread_id < t_add_elems)
arr[thread_id] += arr[thread_id + t_working_elems];
t_add_elems = t_working_elems >> 1;
t_working_elems = (t_working_elems + 1) >> 1;
__syncthreads();
}
if (thread_id == 0)
atomicAdd(hessian_biases + output_feature_map_id, arr[0]);
}
template<int BLOCK_SIZE>
__global__ void convolution_3d_square_deriviative_tex_hess_kernel_fermi(
float * __restrict input_errors,
const float * __restrict weights_squared,
const packed_config<3> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int packed_config_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE + (BLOCK_SIZE - 1);
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < input_width + (BLOCK_SIZE - 1)) && (packed_config_id < packed_config_count);
if (in_bounds)
{
int weight_count_per_input_feature_map = window_depth * window_height * window_width;
packed_config<3> conf = packed_config_list[packed_config_id];
int y = conf.get_val(0);
int z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_elem_id = ((entry_id * output_feature_map_count * output_depth + z) * output_height + y) * output_width + x;
const float * current_weights = weights_squared + (int)(weight_count_per_input_feature_map * input_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < input_feature_map_count - input_feature_map_id) ? weight_count_per_input_feature_map * i : 0;
int min_z_exclusive = z - output_depth;
int max_z_inclusive = z;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
for(int output_layer_id = 0; output_layer_id < output_feature_map_count; ++output_layer_id)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
bool b_fit_z = (input_z > min_z_exclusive) && (input_z <= max_z_inclusive);
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit_y = b_fit_z && (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
int input_x = 0;
#pragma unroll 1
for(; input_x < (window_width - (WINDOW_WIDTH_LOCAL - 1)); input_x += WINDOW_WIDTH_LOCAL)
{
float output_vals[BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1; ++i)
{
bool b_fit_x = b_fit_y && (i > min_x_exclusive) && (i <= max_x_inclusive);;
if (b_fit_x)
output_vals[i] = tex1Dfetch(output_tex_ref, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
output_elem_id -= WINDOW_WIDTH_LOCAL;
#pragma unroll
for(int input_x_local = 0; input_x_local < WINDOW_WIDTH_LOCAL; ++input_x_local)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x_local + j] * weight_list[i];
}
current_weights++;
}
}
#pragma unroll 1
for(; input_x < window_width; ++input_x)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
bool b_fit_x = b_fit_y && (input_x + j > min_x_exclusive) && (input_x + j <= max_x_inclusive);
if (b_fit_x)
{
float inp = tex1Dfetch(output_tex_ref, output_elem_id - j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * current_weights[weight_offsets[i]];
}
}
current_weights++;
output_elem_id--;
}
output_elem_id += window_width - output_width;
} // for input_y
output_elem_id += output_width * (window_height - output_height);
} // for input_z
output_elem_id += output_width * output_height * (output_depth + window_depth);
current_weights += weight_count_per_input_feature_map * (input_feature_map_count - 1);
}
float * base_input = input_errors + (((entry_id * input_feature_map_count + input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_depth * input_height * input_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE>
__global__ void convolution_3d_square_deriviative_tex_exact_hess_kernel_fermi(
float * __restrict input_errors,
const float * __restrict weights_squared,
const packed_config<3> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int packed_config_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE + (BLOCK_SIZE - 1);
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < input_width + (BLOCK_SIZE - 1)) && (packed_config_id < packed_config_count);
if (in_bounds)
{
int weight_count_per_input_feature_map = window_depth * window_height * WINDOW_WIDTH;
packed_config<3> conf = packed_config_list[packed_config_id];
int y = conf.get_val(0);
int z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_elem_id = ((entry_id * output_feature_map_count * output_depth + z) * output_height + y) * output_width + x;
const float * current_weights = weights_squared + (int)(weight_count_per_input_feature_map * input_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < input_feature_map_count - input_feature_map_id) ? weight_count_per_input_feature_map * i : 0;
int min_z_exclusive = z - output_depth;
int max_z_inclusive = z;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
unsigned int mask = 0;
for(int i = BLOCK_SIZE + WINDOW_WIDTH - 2; i >= 0; --i)
mask = mask << 1 | (((i > min_x_exclusive) && (i <= max_x_inclusive)) ? 1 : 0);
for(int output_layer_id = 0; output_layer_id < output_feature_map_count; ++output_layer_id)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
bool b_fit_z = (input_z > min_z_exclusive) && (input_z <= max_z_inclusive);
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit_y = b_fit_z && (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
float output_vals[BLOCK_SIZE + WINDOW_WIDTH - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH - 1; ++i)
{
bool b_fit_x = b_fit_y && (((1 << i) & mask) != 0);
if (b_fit_x)
output_vals[i] = tex1Dfetch(output_tex_ref, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x + j] * weight_list[i];
}
current_weights++;
}
output_elem_id -= output_width;
} // for input_y
output_elem_id += output_width * (window_height - output_height);
} // for input_z
output_elem_id += output_width * output_height * (output_depth + window_depth);
current_weights += weight_count_per_input_feature_map * (input_feature_map_count - 1);
}
float * base_input = input_errors + (((entry_id * input_feature_map_count + input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_depth * input_height * input_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
__global__ void convolution_3d_update_weights_hess_kernel_fermi(
float * __restrict hessian_weights,
const float * __restrict output_errors,
const packed_config<6> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int block_size,
int packed_config_count)
{
int weight_x = (blockIdx.x * blockDim.x + threadIdx.x) * WINDOW_WIDTH_LOCAL;
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int base_entry_id = (blockIdx.z * blockDim.z + threadIdx.z) * block_size;
bool in_bounds = (packed_config_id < packed_config_count) && (weight_x < window_width) && (base_entry_id < entry_count);
if (in_bounds)
{
int output_neuron_count_per_feature_map = output_depth * output_width * output_height;
packed_config<6> conf = packed_config_list[packed_config_id];
int weight_y = conf.get_val(0);
int weight_z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_y = conf.get_val(3);
int output_z = conf.get_val(4);
int output_feature_map_id = conf.get_val(5);
int iteration_count = min(block_size, entry_count - base_entry_id);
const float * current_output_errors = output_errors + (((base_entry_id * output_feature_map_count + output_feature_map_id) * output_depth + output_z) * output_height + output_y) * output_width;
int input_elem_id = (((base_entry_id * input_feature_map_count + input_feature_map_id) * input_depth + output_z + weight_z) * input_height + output_y + weight_y) * input_width + weight_x;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL; ++i)
sums[i] = 0.0F;
int output_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? output_neuron_count_per_feature_map * i : 0;
for(int t = 0; t < iteration_count; ++t)
{
float input_squared_buf[WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH_LOCAL; ++i)
{
input_squared_buf[i] = tex1Dfetch(input_squared_tex_ref, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = current_output_errors[output_offsets[i]];
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH_LOCAL - 1; ++i)
input_squared_buf[i] = input_squared_buf[i + 1];
input_squared_buf[WINDOW_WIDTH_LOCAL - 1] = tex1Dfetch(input_squared_tex_ref, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
sums[i * WINDOW_WIDTH_LOCAL + j] += output_error_list[i] * input_squared_buf[j];
current_output_errors++;
input_elem_id++;
}
current_output_errors += (output_feature_map_count * output_height * output_depth - 1) * output_width;
input_elem_id += (input_feature_map_count * input_height * input_depth - 1) * input_width + (window_width - WINDOW_WIDTH_LOCAL);
}
float * base_weights = hessian_weights + (((output_feature_map_id * input_feature_map_count + input_feature_map_id) * window_depth + weight_z) * window_height + weight_y) * window_width + weight_x;
int weight_count_per_output_feature_map = input_feature_map_count * window_depth * window_height * window_width;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
atomicAdd(base_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH_LOCAL + j]);
}
}
}
}
template<int WINDOW_WIDTH>
__global__ void convolution_3d_update_weights_exact_hess_kernel_fermi(
float * __restrict hessian_weights,
const float * __restrict output_errors,
const packed_config<6> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int entry_count,
int block_size,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int base_entry_id = (blockIdx.y * blockDim.y + threadIdx.y) * block_size;
bool in_bounds = (packed_config_id < packed_config_count) && (base_entry_id < entry_count);
if (in_bounds)
{
int output_neuron_count_per_feature_map = output_depth * output_width * output_height;
packed_config<6> conf = packed_config_list[packed_config_id];
int weight_y = conf.get_val(0);
int weight_z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_y = conf.get_val(3);
int output_z = conf.get_val(4);
int output_feature_map_id = conf.get_val(5);
int iteration_count = min(block_size, entry_count - base_entry_id);
const float * current_output_errors = output_errors + (((base_entry_id * output_feature_map_count + output_feature_map_id) * output_depth + output_z) * output_height + output_y) * output_width;
int input_elem_id = (((base_entry_id * input_feature_map_count + input_feature_map_id) * input_depth + output_z + weight_z) * input_height + output_y + weight_y) * input_width;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH; ++i)
sums[i] = 0.0F;
int output_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? output_neuron_count_per_feature_map * i : 0;
for(int t = 0; t < iteration_count; ++t)
{
float input_squared_buf[WINDOW_WIDTH];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
input_squared_buf[i] = tex1Dfetch(input_squared_tex_ref, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = current_output_errors[output_offsets[i]];
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH - 1; ++i)
input_squared_buf[i] = input_squared_buf[i + 1];
input_squared_buf[WINDOW_WIDTH - 1] = tex1Dfetch(input_squared_tex_ref, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
sums[i * WINDOW_WIDTH + j] += output_error_list[i] * input_squared_buf[j];
current_output_errors++;
input_elem_id++;
}
current_output_errors += (output_feature_map_count * output_height * output_depth - 1) * output_width;
input_elem_id += (input_feature_map_count * input_height * input_depth - 1) * input_width;
}
float * base_weights = hessian_weights + (((output_feature_map_id * input_feature_map_count + input_feature_map_id) * window_depth + weight_z) * window_height + weight_y) * WINDOW_WIDTH;
int weight_count_per_output_feature_map = input_feature_map_count * window_depth * window_height * WINDOW_WIDTH;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
atomicAdd(base_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH + j]);
}
}
}
}
convolution_3d_layer_hessian_cuda_fermi::convolution_3d_layer_hessian_cuda_fermi()
{
input_tex_ref.addressMode[0] = cudaAddressModeBorder;
input_tex_ref.normalized = false;
output_tex_ref.addressMode[0] = cudaAddressModeBorder;
output_tex_ref.normalized = false;
input_squared_tex_ref.addressMode[0] = cudaAddressModeBorder;
input_squared_tex_ref.normalized = false;
}
convolution_3d_layer_hessian_cuda_fermi::~convolution_3d_layer_hessian_cuda_fermi()
{
}
#define MAX_BLOCK_SIZE 5
#define MAX_WINDOW_WIDTH 10
#define launch_exact_kernel_const_const(window_width_const, block_size_const) \
convolution_3d_tex_exact_blocked_hess_kernel_fermi<window_width_const,block_size_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*output_neurons_buffer, *data[0], *data[1], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, packed_config_count);
#define launch_exact_kernel_const(window_width, block_size_const) \
switch (window_width) \
{ \
case 1: \
launch_exact_kernel_const_const(1, block_size_const); \
break; \
case 2: \
launch_exact_kernel_const_const(2, block_size_const); \
break; \
case 3: \
launch_exact_kernel_const_const(3, block_size_const); \
break; \
case 4: \
launch_exact_kernel_const_const(4, block_size_const); \
break; \
case 5: \
launch_exact_kernel_const_const(5, block_size_const); \
break; \
case 6: \
launch_exact_kernel_const_const(6, block_size_const); \
break; \
case 7: \
launch_exact_kernel_const_const(7, block_size_const); \
break; \
case 8: \
launch_exact_kernel_const_const(8, block_size_const); \
break; \
case 9: \
launch_exact_kernel_const_const(9, block_size_const); \
break; \
case 10: \
launch_exact_kernel_const_const(10, block_size_const); \
break; \
};
#define launch_exact_kernel(window_width, block_size) \
switch (block_size) \
{ \
case 1: \
launch_exact_kernel_const(window_width, 1); \
break; \
case 2: \
launch_exact_kernel_const(window_width, 2); \
break; \
case 3: \
launch_exact_kernel_const(window_width, 3); \
break; \
case 4: \
launch_exact_kernel_const(window_width, 4); \
break; \
case 5: \
launch_exact_kernel_const(window_width, 5); \
break; \
};
#define launch_kernel_const(block_size_const) \
convolution_3d_tex_blocked_hess_kernel_fermi<block_size_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*output_neurons_buffer, *data[0], *data[1], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, packed_config_count);
#define launch_kernel(block_size) \
switch (block_size) \
{ \
case 1: \
launch_kernel_const(1); \
break; \
case 2: \
launch_kernel_const(2); \
break; \
case 3: \
launch_kernel_const(3); \
break; \
case 4: \
launch_kernel_const(4); \
break; \
case 5: \
launch_kernel_const(5); \
break; \
};
#define launch_backprop_exact_kernel_const_const(window_width_const, block_size_const) \
convolution_3d_square_deriviative_tex_exact_hess_kernel_fermi<window_width_const,block_size_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*input_errors_buffer, *data_squared[0], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, packed_config_count);
#define launch_backprop_exact_kernel_const(window_width, block_size_const) \
switch (window_width) \
{ \
case 1: \
launch_backprop_exact_kernel_const_const(1, block_size_const); \
break; \
case 2: \
launch_backprop_exact_kernel_const_const(2, block_size_const); \
break; \
case 3: \
launch_backprop_exact_kernel_const_const(3, block_size_const); \
break; \
case 4: \
launch_backprop_exact_kernel_const_const(4, block_size_const); \
break; \
case 5: \
launch_backprop_exact_kernel_const_const(5, block_size_const); \
break; \
case 6: \
launch_backprop_exact_kernel_const_const(6, block_size_const); \
break; \
case 7: \
launch_backprop_exact_kernel_const_const(7, block_size_const); \
break; \
case 8: \
launch_backprop_exact_kernel_const_const(8, block_size_const); \
break; \
case 9: \
launch_backprop_exact_kernel_const_const(9, block_size_const); \
break; \
case 10: \
launch_backprop_exact_kernel_const_const(10, block_size_const); \
break; \
};
#define launch_backprop_exact_kernel(window_width, block_size) \
switch (block_size) \
{ \
case 1: \
launch_backprop_exact_kernel_const(window_width, 1); \
break; \
case 2: \
launch_backprop_exact_kernel_const(window_width, 2); \
break; \
case 3: \
launch_backprop_exact_kernel_const(window_width, 3); \
break; \
case 4: \
launch_backprop_exact_kernel_const(window_width, 4); \
break; \
case 5: \
launch_backprop_exact_kernel_const(window_width, 5); \
break; \
};
#define launch_backprop_kernel_const(block_size_const) \
convolution_3d_square_deriviative_tex_hess_kernel_fermi<block_size_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*input_errors_buffer, *data_squared[0], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, packed_config_count);
#define launch_backprop_kernel(block_size) \
switch (block_size) \
{ \
case 1: \
launch_backprop_kernel_const(1); \
break; \
case 2: \
launch_backprop_kernel_const(2); \
break; \
case 3: \
launch_backprop_kernel_const(3); \
break; \
case 4: \
launch_backprop_kernel_const(4); \
break; \
case 5: \
launch_backprop_kernel_const(5); \
break; \
};
#define launch_update_weights_exact_kernel_const(window_width_const) \
convolution_3d_update_weights_exact_hess_kernel_fermi<window_width_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*hessian_data[0], *output_errors_buffer, packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count, block_size, packed_config_count);
#define launch_update_weights_exact_kernel(window_width) \
switch (window_width) \
{ \
case 1: \
launch_update_weights_exact_kernel_const(1); \
break; \
case 2: \
launch_update_weights_exact_kernel_const(2); \
break; \
case 3: \
launch_update_weights_exact_kernel_const(3); \
break; \
case 4: \
launch_update_weights_exact_kernel_const(4); \
break; \
case 5: \
launch_update_weights_exact_kernel_const(5); \
break; \
case 6: \
launch_update_weights_exact_kernel_const(6); \
break; \
case 7: \
launch_update_weights_exact_kernel_const(7); \
break; \
case 8: \
launch_update_weights_exact_kernel_const(8); \
break; \
case 9: \
launch_update_weights_exact_kernel_const(9); \
break; \
case 10: \
launch_update_weights_exact_kernel_const(10); \
break; \
};
void convolution_3d_layer_hessian_cuda_fermi::enqueue_test(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cuda_safe_call(cudaBindTexture(0, input_tex_ref, *input_neurons_buffer, desc, input_elem_count_per_entry * entry_count * sizeof(float)));
int packed_config_count = output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * forward_output_feature_map_block_count;
const packed_config<3> * packed_config_list = static_cast<const packed_config<3> *>((const void *)*additional_buffers[1]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
forward_x_block_count,
packed_config_count,
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
launch_exact_kernel(window_sizes[0], forward_x_block_size);
}
else
{
launch_kernel(forward_x_block_size);
}
}
void convolution_3d_layer_hessian_cuda_fermi::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_squared,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cuda_safe_call(cudaBindTexture(0, output_tex_ref, *output_errors_buffer, desc, output_elem_count_per_entry * entry_count * sizeof(float)));
int packed_config_count = input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2]* backward_input_feature_map_block_count;
const packed_config<3> * packed_config_list = static_cast<const packed_config<3> *>((const void *)*additional_buffers[3]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
backward_x_block_count,
packed_config_count,
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size);
}
else
{
launch_backprop_kernel(backward_x_block_size);
}
}
void convolution_3d_layer_hessian_cuda_fermi::enqueue_update_hessian(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& hessian_data,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cuda_safe_call(cudaBindTexture(0, input_squared_tex_ref, *additional_buffers[0], desc, input_elem_count_per_entry * entry_count * sizeof(float)));
// Update weights
{
// Store input neurons multiplied element-wise by themselves
cuda_util::multiply_by_itself(
*cuda_config,
*input_neurons_buffer,
*additional_buffers[0],
input_elem_count_per_entry * entry_count,
stream_id);
int block_size = get_weights_update_block_size(entry_count);
int block_count = (entry_count + block_size - 1) / block_size;
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
int packed_config_count = window_sizes[1] * window_sizes[2] * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count;
const packed_config<6> * packed_config_list = static_cast<const packed_config<6> *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
packed_config_count,
block_count,
1);
launch_update_weights_exact_kernel(window_sizes[0]);
}
else
{
int packed_config_count = window_sizes[1] * window_sizes[2] * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count;
const packed_config<6> * packed_config_list = static_cast<const packed_config<6> *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
updater_window_x_block_count,
packed_config_count,
block_count);
convolution_3d_update_weights_hess_kernel_fermi<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*hessian_data[0],
*output_errors_buffer,
packed_config_list,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
output_configuration_specific.dimension_sizes[2],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
input_configuration_specific.dimension_sizes[2],
window_sizes[0],
window_sizes[1],
window_sizes[2],
input_configuration_specific.feature_map_count,
output_configuration_specific.feature_map_count,
entry_count,
block_size,
packed_config_count);
}
}
// Update biases
{
int block_size = get_bias_update_block_size(entry_count);
int block_count = (entry_count + block_size - 1) / block_size;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
1,
block_count);
kernel_dims.first.y = output_configuration_specific.feature_map_count;
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = threadblock_size * sizeof(float);
convolution_3d_update_biases_hess_kernel_fermi<<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>(
*hessian_data[1],
*output_errors_buffer,
block_size,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
}
}
int convolution_3d_layer_hessian_cuda_fermi::get_block_size(int width)
{
int block_count = (width + MAX_BLOCK_SIZE - 1) / MAX_BLOCK_SIZE;
int block_size = (width + block_count - 1) / block_count;
return block_size;
}
void convolution_3d_layer_hessian_cuda_fermi::hessian_configured()
{
std::tr1::shared_ptr<const convolution_layer> layer_derived = std::tr1::dynamic_pointer_cast<const convolution_layer>(layer_schema);
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_sizes.push_back(static_cast<int>(*it));
forward_x_block_size = get_block_size(output_configuration_specific.dimension_sizes[0]);
forward_x_block_count = (output_configuration_specific.dimension_sizes[0] + forward_x_block_size - 1) / forward_x_block_size;
forward_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_window_x_block_count = (window_sizes[0] <= MAX_WINDOW_WIDTH) ? 1 : (window_sizes[0] + WINDOW_WIDTH_LOCAL - 1) / WINDOW_WIDTH_LOCAL;
{
std::tr1::array<int, 4> size_list;
size_list[0] = window_sizes[1];
size_list[1] = window_sizes[2];
size_list[2] = input_configuration_specific.feature_map_count;
size_list[3] = output_configuration_specific.dimension_sizes[1];
space_filling_curve<4>::fill_pattern(size_list, updater_config_ordered_list1);
}
{
std::tr1::array<int, 2> size_list;
size_list[0] = output_configuration_specific.dimension_sizes[2];
size_list[1] = updater_output_feature_map_block_count;
space_filling_curve<2>::fill_pattern(size_list, updater_config_ordered_list2);
}
if (backprop_required)
{
backward_x_block_size = get_block_size(input_configuration_specific.dimension_sizes[0]);
backward_x_block_count = (input_configuration_specific.dimension_sizes[0] + backward_x_block_size - 1) / backward_x_block_size;
backward_input_feature_map_block_count = (input_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
}
}
bool convolution_3d_layer_hessian_cuda_fermi::is_in_place_backprop() const
{
return false;
}
std::vector<size_t> convolution_3d_layer_hessian_cuda_fermi::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(input_elem_count_per_entry * sizeof(float));
return res;
}
std::vector<unsigned int> convolution_3d_layer_hessian_cuda_fermi::get_linear_addressing_through_texture_per_entry() const
{
std::vector<unsigned int> res;
res.push_back(input_elem_count_per_entry);
res.push_back(output_elem_count_per_entry);
return res;
}
int convolution_3d_layer_hessian_cuda_fermi::get_bias_update_block_size(int entry_count)
{
int block_size = std::min<int>(std::max<int>(static_cast<int>(sqrtf(static_cast<float>(entry_count))), 1), entry_count);
return block_size;
}
int convolution_3d_layer_hessian_cuda_fermi::get_weights_update_block_size(int entry_count)
{
int block_size = std::min<int>(std::max<int>(static_cast<int>(sqrtf(static_cast<float>(entry_count))), 1), entry_count);
return block_size;
}
std::vector<size_t> convolution_3d_layer_hessian_cuda_fermi::get_sizes_of_additional_buffers_fixed() const
{
std::vector<size_t> res;
res.push_back(sizeof(packed_config<3>) * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * forward_output_feature_map_block_count);
res.push_back(sizeof(packed_config<6>) * window_sizes[1] * window_sizes[2] * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count);
if (backprop_required)
{
res.push_back(sizeof(packed_config<3>) * input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2] * backward_input_feature_map_block_count);
}
return res;
}
void convolution_3d_layer_hessian_cuda_fermi::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
{
std::vector<packed_config<3> > task_list;
packed_config<3> new_elem;
for(int output_feature_map_block_id = 0; output_feature_map_block_id < forward_output_feature_map_block_count; ++output_feature_map_block_id)
{
new_elem.set_val(2, output_feature_map_block_id * FEATURE_MAP_BLOCK_SIZE);
for(int z = 0; z < output_configuration_specific.dimension_sizes[2]; ++z)
{
new_elem.set_val(1, z);
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
{
new_elem.set_val(0, y);
task_list.push_back(new_elem);
}
}
}
cuda_safe_call(cudaMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(packed_config<3>) * task_list.size(), cudaMemcpyHostToDevice));
}
{
std::vector<packed_config<6> > task_list;
packed_config<6> new_elem;
for(std::vector<std::tr1::array<int, 2> >::const_iterator it2 = updater_config_ordered_list2.begin(); it2 != updater_config_ordered_list2.end(); ++it2)
{
new_elem.set_val(4, it2->at(0));
new_elem.set_val(5, it2->at(1) * FEATURE_MAP_BLOCK_SIZE);
for(std::vector<std::tr1::array<int, 4> >::const_iterator it1 = updater_config_ordered_list1.begin(); it1 != updater_config_ordered_list1.end(); ++it1)
{
new_elem.set_val(0, it1->at(0));
new_elem.set_val(1, it1->at(1));
new_elem.set_val(2, it1->at(2));
new_elem.set_val(3, it1->at(3));
task_list.push_back(new_elem);
}
}
cuda_safe_call(cudaMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(packed_config<6>) * task_list.size(), cudaMemcpyHostToDevice));
}
if (backprop_required)
{
std::vector<packed_config<3> > task_list;
packed_config<3> new_elem;
for(int input_feature_map_block_id = 0; input_feature_map_block_id < backward_input_feature_map_block_count; ++input_feature_map_block_id)
{
new_elem.set_val(2, input_feature_map_block_id * FEATURE_MAP_BLOCK_SIZE);
for(int z = 0; z < input_configuration_specific.dimension_sizes[2]; ++z)
{
new_elem.set_val(1, z);
for(int y = 0; y < input_configuration_specific.dimension_sizes[1]; ++y)
{
new_elem.set_val(0, y);
task_list.push_back(new_elem);
}
}
}
cuda_safe_call(cudaMemcpy(*additional_buffers[3], &(*task_list.begin()), sizeof(packed_config<3>) * task_list.size(), cudaMemcpyHostToDevice));
}
}
}
}
|
b800616a4c31cd6edbe264d6a8e5e6c67c8e60d2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
__global__ void ThirdAngle(int *a1, int *a2, int *a3)
{
*a3 = (180-*a1-*a2);
}
int main()
{
int *a3, *a2, *a1;
int var1, var2, angle3;
printf("Enter angle1\n");
scanf("%d", &var1);
printf("Enter angle2\n");
scanf("%d", &var2);
//Cuda goodness
hipMalloc((void**)&a3, sizeof(int));
hipMalloc((void**)&a2, sizeof(int));
hipMalloc((void**)&a1, sizeof(int));
//Copy read vars to cuda vars
hipMemcpy(a1, &var1, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(a2, &var2, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ThirdAngle), dim3(1),dim3(1), 0, 0, a1, a2, a3);//Run Cuda function on single block
hipMemcpy(&angle3, a3, sizeof(int), hipMemcpyDeviceToHost);//Nab the angle back to angle3
printf("Third Angle:\n %d", angle3);
getchar();
return 0;
} | b800616a4c31cd6edbe264d6a8e5e6c67c8e60d2.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
__global__ void ThirdAngle(int *a1, int *a2, int *a3)
{
*a3 = (180-*a1-*a2);
}
int main()
{
int *a3, *a2, *a1;
int var1, var2, angle3;
printf("Enter angle1\n");
scanf("%d", &var1);
printf("Enter angle2\n");
scanf("%d", &var2);
//Cuda goodness
cudaMalloc((void**)&a3, sizeof(int));
cudaMalloc((void**)&a2, sizeof(int));
cudaMalloc((void**)&a1, sizeof(int));
//Copy read vars to cuda vars
cudaMemcpy(a1, &var1, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(a2, &var2, sizeof(int), cudaMemcpyHostToDevice);
ThirdAngle<<<1,1>>>(a1, a2, a3);//Run Cuda function on single block
cudaMemcpy(&angle3, a3, sizeof(int), cudaMemcpyDeviceToHost);//Nab the angle back to angle3
printf("Third Angle:\n %d", angle3);
getchar();
return 0;
} |
b043de37e2ad0164f774af6b4d56601971f7074d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Author: Dai-Ni Hsieh ([email protected])
// Date : 07/09/2020
#include "matvec.h"
#include "constants.h"
void dsum(double *, double *, double *, int);
__global__ void landmarksToVarifoldKernel(double *d_cenPosMat, double *d_uniDirMat, double *d_elmVolVec,
double *d_lmkPosMat, int *d_elmVtxMat, int lmkNum, int elmNum)
{
int elmIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( elmIdx < elmNum )
{
int q0Idx = d_elmVtxMat[ elmIdx];
int q1Idx = d_elmVtxMat[elmNum + elmIdx];
vector q0Vec, q1Vec;
getVector(q0Vec, d_lmkPosMat, q0Idx, lmkNum);
getVector(q1Vec, d_lmkPosMat, q1Idx, lmkNum);
vector cenVec, dirVec;
vectorAverage(cenVec, q0Vec, q1Vec);
vectorSubtract(dirVec, q1Vec, q0Vec);
double elmVol = eucnorm(dirVec);
dirVec.x /= elmVol;
dirVec.y /= elmVol;
setVector(d_cenPosMat, cenVec, elmIdx, elmNum);
setVector(d_uniDirMat, dirVec, elmIdx, elmNum);
d_elmVolVec[elmIdx] = elmVol;
}
return;
}
__device__ void geometricFunction(double &knlVal, vector c1Vec, vector c2Vec,
char knlType, double knlWidth)
{
if ( knlType == 'G' ) // gaussian
{
double dstSqu = eucdistSqu(c1Vec, c2Vec);
knlVal = exp(-dstSqu / (knlWidth * knlWidth));
return;
}
if ( knlType == 'C' ) // cauchy
{
double dstSqu = eucdistSqu(c1Vec, c2Vec);
knlVal = 1.0 / (1.0 + dstSqu / (knlWidth * knlWidth));
return;
}
return;
}
__device__ void geometricFunction(double &knlVal, vector &d1KVec, vector c1Vec, vector c2Vec,
char knlType, double knlWidth)
{
if ( knlType == 'G' ) // gaussian
{
double dstSqu = eucdistSqu(c1Vec, c2Vec);
knlVal = exp(-dstSqu / (knlWidth * knlWidth));
double d1KVal = -2.0 * knlVal / (knlWidth * knlWidth);
d1KVec.x = d1KVal * (c1Vec.x - c2Vec.x);
d1KVec.y = d1KVal * (c1Vec.y - c2Vec.y);
return;
}
if ( knlType == 'C' ) // cauchy
{
double dstSqu = eucdistSqu(c1Vec, c2Vec);
knlVal = 1.0 / (1.0 + dstSqu / (knlWidth * knlWidth));
double d1KVal = -2.0 * knlVal * knlVal / (knlWidth * knlWidth);
d1KVec.x = d1KVal * (c1Vec.x - c2Vec.x);
d1KVec.y = d1KVal * (c1Vec.y - c2Vec.y);
return;
}
return;
}
__device__ void grassmanFunction(double &knlVal, vector v1Vec, vector v2Vec,
char knlType, double knlWidth)
{
if ( knlType == 'B' ) // binet
{
double angVal = dotProduct(v1Vec, v2Vec);
knlVal = angVal * angVal;
return;
}
if ( knlType == 'L' ) // linear
{
double angVal = dotProduct(v1Vec, v2Vec);
knlVal = angVal;
return;
}
if ( knlType == 'O' ) // gaussian oriented
{
double angVal = dotProduct(v1Vec, v2Vec);
knlVal = exp(2.0 * (angVal - 1.0) / (knlWidth * knlWidth));
return;
}
if ( knlType == 'U' ) // gaussian unoriented
{
double angVal = dotProduct(v1Vec, v2Vec);
knlVal = exp(2.0 * (angVal * angVal - 1.0) / (knlWidth * knlWidth));
return;
}
return;
}
__device__ void grassmanFunction(double &knlVal, vector &d1KVec, vector v1Vec, vector v2Vec,
char knlType, double knlWidth, double v1Vol)
{
if ( knlType == 'B' ) // binet
{
double angVal = dotProduct(v1Vec, v2Vec);
knlVal = angVal * angVal;
double d1KVal = 2.0 * angVal;
d1KVec.x = d1KVal / v1Vol * (-angVal * v1Vec.x + v2Vec.x);
d1KVec.y = d1KVal / v1Vol * (-angVal * v1Vec.y + v2Vec.y);
return;
}
if ( knlType == 'L' ) // linear
{
double angVal = dotProduct(v1Vec, v2Vec);
knlVal = angVal;
d1KVec.x = 1.0 / v1Vol * (-angVal * v1Vec.x + v2Vec.x);
d1KVec.y = 1.0 / v1Vol * (-angVal * v1Vec.y + v2Vec.y);
return;
}
if ( knlType == 'O' ) // gaussian oriented
{
double angVal = dotProduct(v1Vec, v2Vec);
knlVal = exp(2.0 * (angVal - 1.0) / (knlWidth * knlWidth));
double d1KVal = 2.0 * knlVal / (knlWidth * knlWidth);
d1KVec.x = d1KVal / v1Vol * (-angVal * v1Vec.x + v2Vec.x);
d1KVec.y = d1KVal / v1Vol * (-angVal * v1Vec.y + v2Vec.y);
return;
}
if ( knlType == 'U' ) // gaussian unoriented
{
double angVal = dotProduct(v1Vec, v2Vec);
knlVal = exp(2.0 * (angVal * angVal - 1.0) / (knlWidth * knlWidth));
double d1KVal = 4.0 * angVal * knlVal / (knlWidth * knlWidth);
d1KVec.x = d1KVal / v1Vol * (-angVal * v1Vec.x + v2Vec.x);
d1KVec.y = d1KVal / v1Vol * (-angVal * v1Vec.y + v2Vec.y);
return;
}
return;
}
__global__ void vfd_DD_DT_Kernel(double *d_vfdVec,
double *d_dfmCenPosMat, double *d_dfmUniDirMat, double *d_dfmElmVolVec,
double *d_tgtCenPosMat, double *d_tgtUniDirMat, double *d_tgtElmVolVec,
char cenKnlType, double cenKnlWidth, char dirKnlType, double dirKnlWidth,
int dfmElmNum, int tgtElmNum)
{
int dfmElmiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( dfmElmiIdx < dfmElmNum )
{
double vfdVal = 0.0;
vector dfmCeniVec, dfmDiriVec;
getVector(dfmCeniVec, d_dfmCenPosMat, dfmElmiIdx, dfmElmNum);
getVector(dfmDiriVec, d_dfmUniDirMat, dfmElmiIdx, dfmElmNum);
double dfmElmiVol = d_dfmElmVolVec[dfmElmiIdx];
for ( int dfmElmjIdx = 0; dfmElmjIdx < dfmElmNum; ++dfmElmjIdx )
{
vector dfmCenjVec, dfmDirjVec;
getVector(dfmCenjVec, d_dfmCenPosMat, dfmElmjIdx, dfmElmNum);
getVector(dfmDirjVec, d_dfmUniDirMat, dfmElmjIdx, dfmElmNum);
double dfmElmjVol = d_dfmElmVolVec[dfmElmjIdx];
double cenKnlVal, dirKnlVal;
geometricFunction(cenKnlVal, dfmCeniVec, dfmCenjVec, cenKnlType, cenKnlWidth);
grassmanFunction(dirKnlVal, dfmDiriVec, dfmDirjVec, dirKnlType, dirKnlWidth);
vfdVal += cenKnlVal * dirKnlVal * dfmElmiVol * dfmElmjVol;
}
for ( int tgtElmjIdx = 0; tgtElmjIdx < tgtElmNum; ++tgtElmjIdx )
{
vector tgtCenjVec, tgtDirjVec;
getVector(tgtCenjVec, d_tgtCenPosMat, tgtElmjIdx, tgtElmNum);
getVector(tgtDirjVec, d_tgtUniDirMat, tgtElmjIdx, tgtElmNum);
double tgtElmjVol = d_tgtElmVolVec[tgtElmjIdx];
double cenKnlVal, dirKnlVal;
geometricFunction(cenKnlVal, dfmCeniVec, tgtCenjVec, cenKnlType, cenKnlWidth);
grassmanFunction(dirKnlVal, dfmDiriVec, tgtDirjVec, dirKnlType, dirKnlWidth);
vfdVal -= 2.0 * cenKnlVal * dirKnlVal * dfmElmiVol * tgtElmjVol;
}
d_vfdVec[dfmElmiIdx] = vfdVal;
}
return;
}
__global__ void vfd_TT_Kernel(double *d_vfdVec, double *d_tgtCenPosMat, double *d_tgtUniDirMat,
double *d_tgtElmVolVec, char cenKnlType, double cenKnlWidth,
char dirKnlType, double dirKnlWidth, int tgtElmNum)
{
int tgtElmiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( tgtElmiIdx < tgtElmNum )
{
double vfdVal = 0.0;
vector tgtCeniVec, tgtDiriVec;
getVector(tgtCeniVec, d_tgtCenPosMat, tgtElmiIdx, tgtElmNum);
getVector(tgtDiriVec, d_tgtUniDirMat, tgtElmiIdx, tgtElmNum);
double tgtElmiVol = d_tgtElmVolVec[tgtElmiIdx];
for ( int tgtElmjIdx = 0; tgtElmjIdx < tgtElmNum; ++tgtElmjIdx )
{
vector tgtCenjVec, tgtDirjVec;
getVector(tgtCenjVec, d_tgtCenPosMat, tgtElmjIdx, tgtElmNum);
getVector(tgtDirjVec, d_tgtUniDirMat, tgtElmjIdx, tgtElmNum);
double tgtElmjVol = d_tgtElmVolVec[tgtElmjIdx];
double cenKnlVal, dirKnlVal;
geometricFunction(cenKnlVal, tgtCeniVec, tgtCenjVec, cenKnlType, cenKnlWidth);
grassmanFunction(dirKnlVal, tgtDiriVec, tgtDirjVec, dirKnlType, dirKnlWidth);
vfdVal += cenKnlVal * dirKnlVal * tgtElmiVol * tgtElmjVol;
}
d_vfdVec[tgtElmiIdx] = vfdVal;
}
return;
}
__global__ void vfd_TT_TD_Kernel(double *d_vfdVec,
double *d_dfmCenPosMat, double *d_dfmUniDirMat, double *d_dfmElmVolVec,
double *d_tgtCenPosMat, double *d_tgtUniDirMat, double *d_tgtElmVolVec,
char cenKnlType, double cenKnlWidth, char dirKnlType, double dirKnlWidth,
int dfmElmNum, int tgtElmNum)
{
int tgtElmiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( tgtElmiIdx < tgtElmNum )
{
double vfdVal = 0.0;
vector tgtCeniVec, tgtDiriVec;
getVector(tgtCeniVec, d_tgtCenPosMat, tgtElmiIdx, tgtElmNum);
getVector(tgtDiriVec, d_tgtUniDirMat, tgtElmiIdx, tgtElmNum);
double tgtElmiVol = d_tgtElmVolVec[tgtElmiIdx];
for ( int tgtElmjIdx = 0; tgtElmjIdx < tgtElmNum; ++tgtElmjIdx )
{
vector tgtCenjVec, tgtDirjVec;
getVector(tgtCenjVec, d_tgtCenPosMat, tgtElmjIdx, tgtElmNum);
getVector(tgtDirjVec, d_tgtUniDirMat, tgtElmjIdx, tgtElmNum);
double tgtElmjVol = d_tgtElmVolVec[tgtElmjIdx];
double cenKnlVal, dirKnlVal;
geometricFunction(cenKnlVal, tgtCeniVec, tgtCenjVec, cenKnlType, cenKnlWidth);
grassmanFunction(dirKnlVal, tgtDiriVec, tgtDirjVec, dirKnlType, dirKnlWidth);
vfdVal += cenKnlVal * dirKnlVal * tgtElmiVol * tgtElmjVol;
}
for ( int dfmElmjIdx = 0; dfmElmjIdx < dfmElmNum; ++dfmElmjIdx )
{
vector dfmCenjVec, dfmDirjVec;
getVector(dfmCenjVec, d_dfmCenPosMat, dfmElmjIdx, dfmElmNum);
getVector(dfmDirjVec, d_dfmUniDirMat, dfmElmjIdx, dfmElmNum);
double dfmElmjVol = d_dfmElmVolVec[dfmElmjIdx];
double cenKnlVal, dirKnlVal;
geometricFunction(cenKnlVal, tgtCeniVec, dfmCenjVec, cenKnlType, cenKnlWidth);
grassmanFunction(dirKnlVal, tgtDiriVec, dfmDirjVec, dirKnlType, dirKnlWidth);
vfdVal -= 2.0 * cenKnlVal * dirKnlVal * tgtElmiVol * dfmElmjVol;
}
d_vfdVec[tgtElmiIdx] = vfdVal;
}
return;
}
__global__ void vfd_DD_Kernel(double *d_vfdVec, double *d_dfmCenPosMat, double *d_dfmUniDirMat,
double *d_dfmElmVolVec, char cenKnlType, double cenKnlWidth,
char dirKnlType, double dirKnlWidth, int dfmElmNum)
{
int dfmElmiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( dfmElmiIdx < dfmElmNum )
{
double vfdVal = 0.0;
vector dfmCeniVec, dfmDiriVec;
getVector(dfmCeniVec, d_dfmCenPosMat, dfmElmiIdx, dfmElmNum);
getVector(dfmDiriVec, d_dfmUniDirMat, dfmElmiIdx, dfmElmNum);
double dfmElmiVol = d_dfmElmVolVec[dfmElmiIdx];
for ( int dfmElmjIdx = 0; dfmElmjIdx < dfmElmNum; ++dfmElmjIdx )
{
vector dfmCenjVec, dfmDirjVec;
getVector(dfmCenjVec, d_dfmCenPosMat, dfmElmjIdx, dfmElmNum);
getVector(dfmDirjVec, d_dfmUniDirMat, dfmElmjIdx, dfmElmNum);
double dfmElmjVol = d_dfmElmVolVec[dfmElmjIdx];
double cenKnlVal, dirKnlVal;
geometricFunction(cenKnlVal, dfmCeniVec, dfmCenjVec, cenKnlType, cenKnlWidth);
grassmanFunction(dirKnlVal, dfmDiriVec, dfmDirjVec, dirKnlType, dirKnlWidth);
vfdVal += cenKnlVal * dirKnlVal * dfmElmiVol * dfmElmjVol;
}
d_vfdVec[dfmElmiIdx] = vfdVal;
}
return;
}
__global__ void dqVfd_DD_DT_Kernel(double *d_vfdVec, double *d_dcVfdMat, double *d_ddVfdMat,
double *d_dfmCenPosMat, double *d_dfmUniDirMat, double *d_dfmElmVolVec,
double *d_tgtCenPosMat, double *d_tgtUniDirMat, double *d_tgtElmVolVec,
char cenKnlType, double cenKnlWidth, char dirKnlType, double dirKnlWidth,
int dfmElmNum, int tgtElmNum)
{
int dfmElmiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( dfmElmiIdx < dfmElmNum )
{
double vfdVal = 0.0;
vector dciVfdVec = {0.0, 0.0};
vector ddiVfdVec = {0.0, 0.0};
vector dfmCeniVec, dfmDiriVec;
getVector(dfmCeniVec, d_dfmCenPosMat, dfmElmiIdx, dfmElmNum);
getVector(dfmDiriVec, d_dfmUniDirMat, dfmElmiIdx, dfmElmNum);
double dfmElmiVol = d_dfmElmVolVec[dfmElmiIdx];
for ( int dfmElmjIdx = 0; dfmElmjIdx < dfmElmNum; ++dfmElmjIdx )
{
vector dfmCenjVec, dfmDirjVec;
getVector(dfmCenjVec, d_dfmCenPosMat, dfmElmjIdx, dfmElmNum);
getVector(dfmDirjVec, d_dfmUniDirMat, dfmElmjIdx, dfmElmNum);
double dfmElmjVol = d_dfmElmVolVec[dfmElmjIdx];
double cenKnlVal, dirKnlVal;
vector dciKnlVec, ddiKnlVec;
geometricFunction(cenKnlVal, dciKnlVec, dfmCeniVec, dfmCenjVec, cenKnlType, cenKnlWidth );
grassmanFunction(dirKnlVal, ddiKnlVec, dfmDiriVec, dfmDirjVec, dirKnlType, dirKnlWidth, dfmElmiVol);
vfdVal += cenKnlVal * dirKnlVal * dfmElmiVol * dfmElmjVol;
dciVfdVec.x += 2.0 * dciKnlVec.x * dirKnlVal * dfmElmiVol * dfmElmjVol;
dciVfdVec.y += 2.0 * dciKnlVec.y * dirKnlVal * dfmElmiVol * dfmElmjVol;
ddiVfdVec.x += 2.0 * cenKnlVal * ( ddiKnlVec.x * dfmElmiVol
+ dirKnlVal * dfmDiriVec.x ) * dfmElmjVol;
ddiVfdVec.y += 2.0 * cenKnlVal * ( ddiKnlVec.y * dfmElmiVol
+ dirKnlVal * dfmDiriVec.y ) * dfmElmjVol;
}
for ( int tgtElmjIdx = 0; tgtElmjIdx < tgtElmNum; ++tgtElmjIdx )
{
vector tgtCenjVec, tgtDirjVec;
getVector(tgtCenjVec, d_tgtCenPosMat, tgtElmjIdx, tgtElmNum);
getVector(tgtDirjVec, d_tgtUniDirMat, tgtElmjIdx, tgtElmNum);
double tgtElmjVol = d_tgtElmVolVec[tgtElmjIdx];
double cenKnlVal, dirKnlVal;
vector dciKnlVec, ddiKnlVec;
geometricFunction(cenKnlVal, dciKnlVec, dfmCeniVec, tgtCenjVec, cenKnlType, cenKnlWidth );
grassmanFunction(dirKnlVal, ddiKnlVec, dfmDiriVec, tgtDirjVec, dirKnlType, dirKnlWidth, dfmElmiVol);
vfdVal -= 2.0 * cenKnlVal * dirKnlVal * dfmElmiVol * tgtElmjVol;
dciVfdVec.x -= 2.0 * dciKnlVec.x * dirKnlVal * dfmElmiVol * tgtElmjVol;
dciVfdVec.y -= 2.0 * dciKnlVec.y * dirKnlVal * dfmElmiVol * tgtElmjVol;
ddiVfdVec.x -= 2.0 * cenKnlVal * ( ddiKnlVec.x * dfmElmiVol
+ dirKnlVal * dfmDiriVec.x ) * tgtElmjVol;
ddiVfdVec.y -= 2.0 * cenKnlVal * ( ddiKnlVec.y * dfmElmiVol
+ dirKnlVal * dfmDiriVec.y ) * tgtElmjVol;
}
d_vfdVec[dfmElmiIdx] = vfdVal;
setVector(d_dcVfdMat, dciVfdVec, dfmElmiIdx, dfmElmNum);
setVector(d_ddVfdMat, ddiVfdVec, dfmElmiIdx, dfmElmNum);
}
return;
}
__global__ void dqVfd_TT_Kernel(double *d_vfdVec, double *d_tgtCenPosMat, double *d_tgtUniDirMat,
double *d_tgtElmVolVec, char cenKnlType, double cenKnlWidth,
char dirKnlType, double dirKnlWidth, int tgtElmNum)
{
int tgtElmiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( tgtElmiIdx < tgtElmNum )
{
double vfdVal = 0.0;
vector tgtCeniVec, tgtDiriVec;
getVector(tgtCeniVec, d_tgtCenPosMat, tgtElmiIdx, tgtElmNum);
getVector(tgtDiriVec, d_tgtUniDirMat, tgtElmiIdx, tgtElmNum);
double tgtElmiVol = d_tgtElmVolVec[tgtElmiIdx];
for ( int tgtElmjIdx = 0; tgtElmjIdx < tgtElmNum; ++tgtElmjIdx )
{
vector tgtCenjVec, tgtDirjVec;
getVector(tgtCenjVec, d_tgtCenPosMat, tgtElmjIdx, tgtElmNum);
getVector(tgtDirjVec, d_tgtUniDirMat, tgtElmjIdx, tgtElmNum);
double tgtElmjVol = d_tgtElmVolVec[tgtElmjIdx];
double cenKnlVal, dirKnlVal;
geometricFunction(cenKnlVal, tgtCeniVec, tgtCenjVec, cenKnlType, cenKnlWidth);
grassmanFunction(dirKnlVal, tgtDiriVec, tgtDirjVec, dirKnlType, dirKnlWidth);
vfdVal += cenKnlVal * dirKnlVal * tgtElmiVol * tgtElmjVol;
}
d_vfdVec[tgtElmiIdx] = vfdVal;
}
return;
}
__global__ void dqVfdGatherKernel(double *d_dqVfdMat, double *d_dcVfdMat, double *d_ddVfdMat,
int *d_dfmElmIfoMat, int dfmElmNum, int dfmLmkNum)
{
int dfmLmkIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( dfmLmkIdx < dfmLmkNum )
{
vector dqVfdVec = {0.0, 0.0};
int adjNum = d_dfmElmIfoMat[dfmLmkIdx];
for ( int adjIdx = 0; adjIdx < adjNum; ++adjIdx )
{
int elmIdx = d_dfmElmIfoMat[(1 + 2 * adjIdx ) * dfmLmkNum + dfmLmkIdx];
int sgnInt = d_dfmElmIfoMat[(1 + 2 * adjIdx + 1) * dfmLmkNum + dfmLmkIdx];
vector dcVfdVec, ddVfdVec;
getVector(dcVfdVec, d_dcVfdMat, elmIdx, dfmElmNum);
getVector(ddVfdVec, d_ddVfdMat, elmIdx, dfmElmNum);
dqVfdVec.x += 0.5 * dcVfdVec.x + sgnInt * ddVfdVec.x;
dqVfdVec.y += 0.5 * dcVfdVec.y + sgnInt * ddVfdVec.y;
}
setVector(d_dqVfdMat, dqVfdVec, dfmLmkIdx, dfmLmkNum);
}
return;
}
void varifold(double *h_vfdPtr, double *d_dfmLmkPosMat, int *d_dfmElmVtxMat,
double *d_tgtCenPosMat, double *d_tgtUniDirMat, double *d_tgtElmVolVec,
char cenKnlType, double cenKnlWidth, char dirKnlType, double dirKnlWidth,
double *d_dfmCenPosMat, double *d_dfmUniDirMat, double *d_dfmElmVolVec,
double *d_vfdVec, double *d_sumBufVec,
int dfmLmkNum, int dfmElmNum, int tgtElmNum)
{
int blkNum = (dfmElmNum - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( landmarksToVarifoldKernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dfmCenPosMat, d_dfmUniDirMat, d_dfmElmVolVec,
d_dfmLmkPosMat, d_dfmElmVtxMat, dfmLmkNum, dfmElmNum);
if ( dfmElmNum >= tgtElmNum )
{
blkNum = (dfmElmNum - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( vfd_DD_DT_Kernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_vfdVec,
d_dfmCenPosMat, d_dfmUniDirMat, d_dfmElmVolVec,
d_tgtCenPosMat, d_tgtUniDirMat, d_tgtElmVolVec,
cenKnlType, cenKnlWidth, dirKnlType, dirKnlWidth,
dfmElmNum, tgtElmNum);
blkNum = (tgtElmNum - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( vfd_TT_Kernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_vfdVec + dfmElmNum,
d_tgtCenPosMat, d_tgtUniDirMat, d_tgtElmVolVec,
cenKnlType, cenKnlWidth, dirKnlType, dirKnlWidth, tgtElmNum);
}
else
{
blkNum = (tgtElmNum - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( vfd_TT_TD_Kernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_vfdVec,
d_dfmCenPosMat, d_dfmUniDirMat, d_dfmElmVolVec,
d_tgtCenPosMat, d_tgtUniDirMat, d_tgtElmVolVec,
cenKnlType, cenKnlWidth, dirKnlType, dirKnlWidth,
dfmElmNum, tgtElmNum);
blkNum = (dfmElmNum - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( vfd_DD_Kernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_vfdVec + tgtElmNum,
d_dfmCenPosMat, d_dfmUniDirMat, d_dfmElmVolVec,
cenKnlType, cenKnlWidth, dirKnlType, dirKnlWidth, dfmElmNum);
}
dsum(h_vfdPtr, d_vfdVec, d_sumBufVec, dfmElmNum + tgtElmNum);
return;
}
void varifold(double *h_vfdPtr, double *d_dqVfdMat,
double *d_dfmLmkPosMat, int *d_dfmElmVtxMat, int *d_dfmElmIfoMat,
double *d_tgtCenPosMat, double *d_tgtUniDirMat, double *d_tgtElmVolVec,
char cenKnlType, double cenKnlWidth, char dirKnlType, double dirKnlWidth,
double *d_dfmCenPosMat, double *d_dfmUniDirMat, double *d_dfmElmVolVec,
double *d_vfdVec, double *d_sumBufVec, double *d_dcVfdMat, double *d_ddVfdMat,
int dfmLmkNum, int dfmElmNum, int tgtElmNum)
{
int blkNum = (dfmElmNum - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( landmarksToVarifoldKernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dfmCenPosMat, d_dfmUniDirMat, d_dfmElmVolVec,
d_dfmLmkPosMat, d_dfmElmVtxMat, dfmLmkNum, dfmElmNum);
blkNum = (dfmElmNum - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( dqVfd_DD_DT_Kernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_vfdVec, d_dcVfdMat, d_ddVfdMat,
d_dfmCenPosMat, d_dfmUniDirMat, d_dfmElmVolVec,
d_tgtCenPosMat, d_tgtUniDirMat, d_tgtElmVolVec,
cenKnlType, cenKnlWidth, dirKnlType, dirKnlWidth,
dfmElmNum, tgtElmNum);
blkNum = (tgtElmNum - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( dqVfd_TT_Kernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_vfdVec + dfmElmNum,
d_tgtCenPosMat, d_tgtUniDirMat, d_tgtElmVolVec,
cenKnlType, cenKnlWidth, dirKnlType, dirKnlWidth, tgtElmNum);
dsum(h_vfdPtr, d_vfdVec, d_sumBufVec, dfmElmNum + tgtElmNum);
blkNum = (dfmLmkNum - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( dqVfdGatherKernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dqVfdMat, d_dcVfdMat, d_ddVfdMat,
d_dfmElmIfoMat, dfmElmNum, dfmLmkNum);
return;
}
| b043de37e2ad0164f774af6b4d56601971f7074d.cu | // Author: Dai-Ni Hsieh ([email protected])
// Date : 07/09/2020
#include "matvec.h"
#include "constants.h"
void dsum(double *, double *, double *, int);
__global__ void landmarksToVarifoldKernel(double *d_cenPosMat, double *d_uniDirMat, double *d_elmVolVec,
double *d_lmkPosMat, int *d_elmVtxMat, int lmkNum, int elmNum)
{
int elmIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( elmIdx < elmNum )
{
int q0Idx = d_elmVtxMat[ elmIdx];
int q1Idx = d_elmVtxMat[elmNum + elmIdx];
vector q0Vec, q1Vec;
getVector(q0Vec, d_lmkPosMat, q0Idx, lmkNum);
getVector(q1Vec, d_lmkPosMat, q1Idx, lmkNum);
vector cenVec, dirVec;
vectorAverage(cenVec, q0Vec, q1Vec);
vectorSubtract(dirVec, q1Vec, q0Vec);
double elmVol = eucnorm(dirVec);
dirVec.x /= elmVol;
dirVec.y /= elmVol;
setVector(d_cenPosMat, cenVec, elmIdx, elmNum);
setVector(d_uniDirMat, dirVec, elmIdx, elmNum);
d_elmVolVec[elmIdx] = elmVol;
}
return;
}
__device__ void geometricFunction(double &knlVal, vector c1Vec, vector c2Vec,
char knlType, double knlWidth)
{
if ( knlType == 'G' ) // gaussian
{
double dstSqu = eucdistSqu(c1Vec, c2Vec);
knlVal = exp(-dstSqu / (knlWidth * knlWidth));
return;
}
if ( knlType == 'C' ) // cauchy
{
double dstSqu = eucdistSqu(c1Vec, c2Vec);
knlVal = 1.0 / (1.0 + dstSqu / (knlWidth * knlWidth));
return;
}
return;
}
__device__ void geometricFunction(double &knlVal, vector &d1KVec, vector c1Vec, vector c2Vec,
char knlType, double knlWidth)
{
if ( knlType == 'G' ) // gaussian
{
double dstSqu = eucdistSqu(c1Vec, c2Vec);
knlVal = exp(-dstSqu / (knlWidth * knlWidth));
double d1KVal = -2.0 * knlVal / (knlWidth * knlWidth);
d1KVec.x = d1KVal * (c1Vec.x - c2Vec.x);
d1KVec.y = d1KVal * (c1Vec.y - c2Vec.y);
return;
}
if ( knlType == 'C' ) // cauchy
{
double dstSqu = eucdistSqu(c1Vec, c2Vec);
knlVal = 1.0 / (1.0 + dstSqu / (knlWidth * knlWidth));
double d1KVal = -2.0 * knlVal * knlVal / (knlWidth * knlWidth);
d1KVec.x = d1KVal * (c1Vec.x - c2Vec.x);
d1KVec.y = d1KVal * (c1Vec.y - c2Vec.y);
return;
}
return;
}
__device__ void grassmanFunction(double &knlVal, vector v1Vec, vector v2Vec,
char knlType, double knlWidth)
{
if ( knlType == 'B' ) // binet
{
double angVal = dotProduct(v1Vec, v2Vec);
knlVal = angVal * angVal;
return;
}
if ( knlType == 'L' ) // linear
{
double angVal = dotProduct(v1Vec, v2Vec);
knlVal = angVal;
return;
}
if ( knlType == 'O' ) // gaussian oriented
{
double angVal = dotProduct(v1Vec, v2Vec);
knlVal = exp(2.0 * (angVal - 1.0) / (knlWidth * knlWidth));
return;
}
if ( knlType == 'U' ) // gaussian unoriented
{
double angVal = dotProduct(v1Vec, v2Vec);
knlVal = exp(2.0 * (angVal * angVal - 1.0) / (knlWidth * knlWidth));
return;
}
return;
}
__device__ void grassmanFunction(double &knlVal, vector &d1KVec, vector v1Vec, vector v2Vec,
char knlType, double knlWidth, double v1Vol)
{
if ( knlType == 'B' ) // binet
{
double angVal = dotProduct(v1Vec, v2Vec);
knlVal = angVal * angVal;
double d1KVal = 2.0 * angVal;
d1KVec.x = d1KVal / v1Vol * (-angVal * v1Vec.x + v2Vec.x);
d1KVec.y = d1KVal / v1Vol * (-angVal * v1Vec.y + v2Vec.y);
return;
}
if ( knlType == 'L' ) // linear
{
double angVal = dotProduct(v1Vec, v2Vec);
knlVal = angVal;
d1KVec.x = 1.0 / v1Vol * (-angVal * v1Vec.x + v2Vec.x);
d1KVec.y = 1.0 / v1Vol * (-angVal * v1Vec.y + v2Vec.y);
return;
}
if ( knlType == 'O' ) // gaussian oriented
{
double angVal = dotProduct(v1Vec, v2Vec);
knlVal = exp(2.0 * (angVal - 1.0) / (knlWidth * knlWidth));
double d1KVal = 2.0 * knlVal / (knlWidth * knlWidth);
d1KVec.x = d1KVal / v1Vol * (-angVal * v1Vec.x + v2Vec.x);
d1KVec.y = d1KVal / v1Vol * (-angVal * v1Vec.y + v2Vec.y);
return;
}
if ( knlType == 'U' ) // gaussian unoriented
{
double angVal = dotProduct(v1Vec, v2Vec);
knlVal = exp(2.0 * (angVal * angVal - 1.0) / (knlWidth * knlWidth));
double d1KVal = 4.0 * angVal * knlVal / (knlWidth * knlWidth);
d1KVec.x = d1KVal / v1Vol * (-angVal * v1Vec.x + v2Vec.x);
d1KVec.y = d1KVal / v1Vol * (-angVal * v1Vec.y + v2Vec.y);
return;
}
return;
}
__global__ void vfd_DD_DT_Kernel(double *d_vfdVec,
double *d_dfmCenPosMat, double *d_dfmUniDirMat, double *d_dfmElmVolVec,
double *d_tgtCenPosMat, double *d_tgtUniDirMat, double *d_tgtElmVolVec,
char cenKnlType, double cenKnlWidth, char dirKnlType, double dirKnlWidth,
int dfmElmNum, int tgtElmNum)
{
int dfmElmiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( dfmElmiIdx < dfmElmNum )
{
double vfdVal = 0.0;
vector dfmCeniVec, dfmDiriVec;
getVector(dfmCeniVec, d_dfmCenPosMat, dfmElmiIdx, dfmElmNum);
getVector(dfmDiriVec, d_dfmUniDirMat, dfmElmiIdx, dfmElmNum);
double dfmElmiVol = d_dfmElmVolVec[dfmElmiIdx];
for ( int dfmElmjIdx = 0; dfmElmjIdx < dfmElmNum; ++dfmElmjIdx )
{
vector dfmCenjVec, dfmDirjVec;
getVector(dfmCenjVec, d_dfmCenPosMat, dfmElmjIdx, dfmElmNum);
getVector(dfmDirjVec, d_dfmUniDirMat, dfmElmjIdx, dfmElmNum);
double dfmElmjVol = d_dfmElmVolVec[dfmElmjIdx];
double cenKnlVal, dirKnlVal;
geometricFunction(cenKnlVal, dfmCeniVec, dfmCenjVec, cenKnlType, cenKnlWidth);
grassmanFunction(dirKnlVal, dfmDiriVec, dfmDirjVec, dirKnlType, dirKnlWidth);
vfdVal += cenKnlVal * dirKnlVal * dfmElmiVol * dfmElmjVol;
}
for ( int tgtElmjIdx = 0; tgtElmjIdx < tgtElmNum; ++tgtElmjIdx )
{
vector tgtCenjVec, tgtDirjVec;
getVector(tgtCenjVec, d_tgtCenPosMat, tgtElmjIdx, tgtElmNum);
getVector(tgtDirjVec, d_tgtUniDirMat, tgtElmjIdx, tgtElmNum);
double tgtElmjVol = d_tgtElmVolVec[tgtElmjIdx];
double cenKnlVal, dirKnlVal;
geometricFunction(cenKnlVal, dfmCeniVec, tgtCenjVec, cenKnlType, cenKnlWidth);
grassmanFunction(dirKnlVal, dfmDiriVec, tgtDirjVec, dirKnlType, dirKnlWidth);
vfdVal -= 2.0 * cenKnlVal * dirKnlVal * dfmElmiVol * tgtElmjVol;
}
d_vfdVec[dfmElmiIdx] = vfdVal;
}
return;
}
__global__ void vfd_TT_Kernel(double *d_vfdVec, double *d_tgtCenPosMat, double *d_tgtUniDirMat,
double *d_tgtElmVolVec, char cenKnlType, double cenKnlWidth,
char dirKnlType, double dirKnlWidth, int tgtElmNum)
{
int tgtElmiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( tgtElmiIdx < tgtElmNum )
{
double vfdVal = 0.0;
vector tgtCeniVec, tgtDiriVec;
getVector(tgtCeniVec, d_tgtCenPosMat, tgtElmiIdx, tgtElmNum);
getVector(tgtDiriVec, d_tgtUniDirMat, tgtElmiIdx, tgtElmNum);
double tgtElmiVol = d_tgtElmVolVec[tgtElmiIdx];
for ( int tgtElmjIdx = 0; tgtElmjIdx < tgtElmNum; ++tgtElmjIdx )
{
vector tgtCenjVec, tgtDirjVec;
getVector(tgtCenjVec, d_tgtCenPosMat, tgtElmjIdx, tgtElmNum);
getVector(tgtDirjVec, d_tgtUniDirMat, tgtElmjIdx, tgtElmNum);
double tgtElmjVol = d_tgtElmVolVec[tgtElmjIdx];
double cenKnlVal, dirKnlVal;
geometricFunction(cenKnlVal, tgtCeniVec, tgtCenjVec, cenKnlType, cenKnlWidth);
grassmanFunction(dirKnlVal, tgtDiriVec, tgtDirjVec, dirKnlType, dirKnlWidth);
vfdVal += cenKnlVal * dirKnlVal * tgtElmiVol * tgtElmjVol;
}
d_vfdVec[tgtElmiIdx] = vfdVal;
}
return;
}
__global__ void vfd_TT_TD_Kernel(double *d_vfdVec,
double *d_dfmCenPosMat, double *d_dfmUniDirMat, double *d_dfmElmVolVec,
double *d_tgtCenPosMat, double *d_tgtUniDirMat, double *d_tgtElmVolVec,
char cenKnlType, double cenKnlWidth, char dirKnlType, double dirKnlWidth,
int dfmElmNum, int tgtElmNum)
{
int tgtElmiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( tgtElmiIdx < tgtElmNum )
{
double vfdVal = 0.0;
vector tgtCeniVec, tgtDiriVec;
getVector(tgtCeniVec, d_tgtCenPosMat, tgtElmiIdx, tgtElmNum);
getVector(tgtDiriVec, d_tgtUniDirMat, tgtElmiIdx, tgtElmNum);
double tgtElmiVol = d_tgtElmVolVec[tgtElmiIdx];
for ( int tgtElmjIdx = 0; tgtElmjIdx < tgtElmNum; ++tgtElmjIdx )
{
vector tgtCenjVec, tgtDirjVec;
getVector(tgtCenjVec, d_tgtCenPosMat, tgtElmjIdx, tgtElmNum);
getVector(tgtDirjVec, d_tgtUniDirMat, tgtElmjIdx, tgtElmNum);
double tgtElmjVol = d_tgtElmVolVec[tgtElmjIdx];
double cenKnlVal, dirKnlVal;
geometricFunction(cenKnlVal, tgtCeniVec, tgtCenjVec, cenKnlType, cenKnlWidth);
grassmanFunction(dirKnlVal, tgtDiriVec, tgtDirjVec, dirKnlType, dirKnlWidth);
vfdVal += cenKnlVal * dirKnlVal * tgtElmiVol * tgtElmjVol;
}
for ( int dfmElmjIdx = 0; dfmElmjIdx < dfmElmNum; ++dfmElmjIdx )
{
vector dfmCenjVec, dfmDirjVec;
getVector(dfmCenjVec, d_dfmCenPosMat, dfmElmjIdx, dfmElmNum);
getVector(dfmDirjVec, d_dfmUniDirMat, dfmElmjIdx, dfmElmNum);
double dfmElmjVol = d_dfmElmVolVec[dfmElmjIdx];
double cenKnlVal, dirKnlVal;
geometricFunction(cenKnlVal, tgtCeniVec, dfmCenjVec, cenKnlType, cenKnlWidth);
grassmanFunction(dirKnlVal, tgtDiriVec, dfmDirjVec, dirKnlType, dirKnlWidth);
vfdVal -= 2.0 * cenKnlVal * dirKnlVal * tgtElmiVol * dfmElmjVol;
}
d_vfdVec[tgtElmiIdx] = vfdVal;
}
return;
}
__global__ void vfd_DD_Kernel(double *d_vfdVec, double *d_dfmCenPosMat, double *d_dfmUniDirMat,
double *d_dfmElmVolVec, char cenKnlType, double cenKnlWidth,
char dirKnlType, double dirKnlWidth, int dfmElmNum)
{
int dfmElmiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( dfmElmiIdx < dfmElmNum )
{
double vfdVal = 0.0;
vector dfmCeniVec, dfmDiriVec;
getVector(dfmCeniVec, d_dfmCenPosMat, dfmElmiIdx, dfmElmNum);
getVector(dfmDiriVec, d_dfmUniDirMat, dfmElmiIdx, dfmElmNum);
double dfmElmiVol = d_dfmElmVolVec[dfmElmiIdx];
for ( int dfmElmjIdx = 0; dfmElmjIdx < dfmElmNum; ++dfmElmjIdx )
{
vector dfmCenjVec, dfmDirjVec;
getVector(dfmCenjVec, d_dfmCenPosMat, dfmElmjIdx, dfmElmNum);
getVector(dfmDirjVec, d_dfmUniDirMat, dfmElmjIdx, dfmElmNum);
double dfmElmjVol = d_dfmElmVolVec[dfmElmjIdx];
double cenKnlVal, dirKnlVal;
geometricFunction(cenKnlVal, dfmCeniVec, dfmCenjVec, cenKnlType, cenKnlWidth);
grassmanFunction(dirKnlVal, dfmDiriVec, dfmDirjVec, dirKnlType, dirKnlWidth);
vfdVal += cenKnlVal * dirKnlVal * dfmElmiVol * dfmElmjVol;
}
d_vfdVec[dfmElmiIdx] = vfdVal;
}
return;
}
__global__ void dqVfd_DD_DT_Kernel(double *d_vfdVec, double *d_dcVfdMat, double *d_ddVfdMat,
double *d_dfmCenPosMat, double *d_dfmUniDirMat, double *d_dfmElmVolVec,
double *d_tgtCenPosMat, double *d_tgtUniDirMat, double *d_tgtElmVolVec,
char cenKnlType, double cenKnlWidth, char dirKnlType, double dirKnlWidth,
int dfmElmNum, int tgtElmNum)
{
int dfmElmiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( dfmElmiIdx < dfmElmNum )
{
double vfdVal = 0.0;
vector dciVfdVec = {0.0, 0.0};
vector ddiVfdVec = {0.0, 0.0};
vector dfmCeniVec, dfmDiriVec;
getVector(dfmCeniVec, d_dfmCenPosMat, dfmElmiIdx, dfmElmNum);
getVector(dfmDiriVec, d_dfmUniDirMat, dfmElmiIdx, dfmElmNum);
double dfmElmiVol = d_dfmElmVolVec[dfmElmiIdx];
for ( int dfmElmjIdx = 0; dfmElmjIdx < dfmElmNum; ++dfmElmjIdx )
{
vector dfmCenjVec, dfmDirjVec;
getVector(dfmCenjVec, d_dfmCenPosMat, dfmElmjIdx, dfmElmNum);
getVector(dfmDirjVec, d_dfmUniDirMat, dfmElmjIdx, dfmElmNum);
double dfmElmjVol = d_dfmElmVolVec[dfmElmjIdx];
double cenKnlVal, dirKnlVal;
vector dciKnlVec, ddiKnlVec;
geometricFunction(cenKnlVal, dciKnlVec, dfmCeniVec, dfmCenjVec, cenKnlType, cenKnlWidth );
grassmanFunction(dirKnlVal, ddiKnlVec, dfmDiriVec, dfmDirjVec, dirKnlType, dirKnlWidth, dfmElmiVol);
vfdVal += cenKnlVal * dirKnlVal * dfmElmiVol * dfmElmjVol;
dciVfdVec.x += 2.0 * dciKnlVec.x * dirKnlVal * dfmElmiVol * dfmElmjVol;
dciVfdVec.y += 2.0 * dciKnlVec.y * dirKnlVal * dfmElmiVol * dfmElmjVol;
ddiVfdVec.x += 2.0 * cenKnlVal * ( ddiKnlVec.x * dfmElmiVol
+ dirKnlVal * dfmDiriVec.x ) * dfmElmjVol;
ddiVfdVec.y += 2.0 * cenKnlVal * ( ddiKnlVec.y * dfmElmiVol
+ dirKnlVal * dfmDiriVec.y ) * dfmElmjVol;
}
for ( int tgtElmjIdx = 0; tgtElmjIdx < tgtElmNum; ++tgtElmjIdx )
{
vector tgtCenjVec, tgtDirjVec;
getVector(tgtCenjVec, d_tgtCenPosMat, tgtElmjIdx, tgtElmNum);
getVector(tgtDirjVec, d_tgtUniDirMat, tgtElmjIdx, tgtElmNum);
double tgtElmjVol = d_tgtElmVolVec[tgtElmjIdx];
double cenKnlVal, dirKnlVal;
vector dciKnlVec, ddiKnlVec;
geometricFunction(cenKnlVal, dciKnlVec, dfmCeniVec, tgtCenjVec, cenKnlType, cenKnlWidth );
grassmanFunction(dirKnlVal, ddiKnlVec, dfmDiriVec, tgtDirjVec, dirKnlType, dirKnlWidth, dfmElmiVol);
vfdVal -= 2.0 * cenKnlVal * dirKnlVal * dfmElmiVol * tgtElmjVol;
dciVfdVec.x -= 2.0 * dciKnlVec.x * dirKnlVal * dfmElmiVol * tgtElmjVol;
dciVfdVec.y -= 2.0 * dciKnlVec.y * dirKnlVal * dfmElmiVol * tgtElmjVol;
ddiVfdVec.x -= 2.0 * cenKnlVal * ( ddiKnlVec.x * dfmElmiVol
+ dirKnlVal * dfmDiriVec.x ) * tgtElmjVol;
ddiVfdVec.y -= 2.0 * cenKnlVal * ( ddiKnlVec.y * dfmElmiVol
+ dirKnlVal * dfmDiriVec.y ) * tgtElmjVol;
}
d_vfdVec[dfmElmiIdx] = vfdVal;
setVector(d_dcVfdMat, dciVfdVec, dfmElmiIdx, dfmElmNum);
setVector(d_ddVfdMat, ddiVfdVec, dfmElmiIdx, dfmElmNum);
}
return;
}
__global__ void dqVfd_TT_Kernel(double *d_vfdVec, double *d_tgtCenPosMat, double *d_tgtUniDirMat,
double *d_tgtElmVolVec, char cenKnlType, double cenKnlWidth,
char dirKnlType, double dirKnlWidth, int tgtElmNum)
{
int tgtElmiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( tgtElmiIdx < tgtElmNum )
{
double vfdVal = 0.0;
vector tgtCeniVec, tgtDiriVec;
getVector(tgtCeniVec, d_tgtCenPosMat, tgtElmiIdx, tgtElmNum);
getVector(tgtDiriVec, d_tgtUniDirMat, tgtElmiIdx, tgtElmNum);
double tgtElmiVol = d_tgtElmVolVec[tgtElmiIdx];
for ( int tgtElmjIdx = 0; tgtElmjIdx < tgtElmNum; ++tgtElmjIdx )
{
vector tgtCenjVec, tgtDirjVec;
getVector(tgtCenjVec, d_tgtCenPosMat, tgtElmjIdx, tgtElmNum);
getVector(tgtDirjVec, d_tgtUniDirMat, tgtElmjIdx, tgtElmNum);
double tgtElmjVol = d_tgtElmVolVec[tgtElmjIdx];
double cenKnlVal, dirKnlVal;
geometricFunction(cenKnlVal, tgtCeniVec, tgtCenjVec, cenKnlType, cenKnlWidth);
grassmanFunction(dirKnlVal, tgtDiriVec, tgtDirjVec, dirKnlType, dirKnlWidth);
vfdVal += cenKnlVal * dirKnlVal * tgtElmiVol * tgtElmjVol;
}
d_vfdVec[tgtElmiIdx] = vfdVal;
}
return;
}
__global__ void dqVfdGatherKernel(double *d_dqVfdMat, double *d_dcVfdMat, double *d_ddVfdMat,
int *d_dfmElmIfoMat, int dfmElmNum, int dfmLmkNum)
{
int dfmLmkIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( dfmLmkIdx < dfmLmkNum )
{
vector dqVfdVec = {0.0, 0.0};
int adjNum = d_dfmElmIfoMat[dfmLmkIdx];
for ( int adjIdx = 0; adjIdx < adjNum; ++adjIdx )
{
int elmIdx = d_dfmElmIfoMat[(1 + 2 * adjIdx ) * dfmLmkNum + dfmLmkIdx];
int sgnInt = d_dfmElmIfoMat[(1 + 2 * adjIdx + 1) * dfmLmkNum + dfmLmkIdx];
vector dcVfdVec, ddVfdVec;
getVector(dcVfdVec, d_dcVfdMat, elmIdx, dfmElmNum);
getVector(ddVfdVec, d_ddVfdMat, elmIdx, dfmElmNum);
dqVfdVec.x += 0.5 * dcVfdVec.x + sgnInt * ddVfdVec.x;
dqVfdVec.y += 0.5 * dcVfdVec.y + sgnInt * ddVfdVec.y;
}
setVector(d_dqVfdMat, dqVfdVec, dfmLmkIdx, dfmLmkNum);
}
return;
}
void varifold(double *h_vfdPtr, double *d_dfmLmkPosMat, int *d_dfmElmVtxMat,
double *d_tgtCenPosMat, double *d_tgtUniDirMat, double *d_tgtElmVolVec,
char cenKnlType, double cenKnlWidth, char dirKnlType, double dirKnlWidth,
double *d_dfmCenPosMat, double *d_dfmUniDirMat, double *d_dfmElmVolVec,
double *d_vfdVec, double *d_sumBufVec,
int dfmLmkNum, int dfmElmNum, int tgtElmNum)
{
int blkNum = (dfmElmNum - 1) / BLKDIM + 1;
landmarksToVarifoldKernel <<<blkNum, BLKDIM>>> (d_dfmCenPosMat, d_dfmUniDirMat, d_dfmElmVolVec,
d_dfmLmkPosMat, d_dfmElmVtxMat, dfmLmkNum, dfmElmNum);
if ( dfmElmNum >= tgtElmNum )
{
blkNum = (dfmElmNum - 1) / BLKDIM + 1;
vfd_DD_DT_Kernel <<<blkNum, BLKDIM>>> (d_vfdVec,
d_dfmCenPosMat, d_dfmUniDirMat, d_dfmElmVolVec,
d_tgtCenPosMat, d_tgtUniDirMat, d_tgtElmVolVec,
cenKnlType, cenKnlWidth, dirKnlType, dirKnlWidth,
dfmElmNum, tgtElmNum);
blkNum = (tgtElmNum - 1) / BLKDIM + 1;
vfd_TT_Kernel <<<blkNum, BLKDIM>>> (d_vfdVec + dfmElmNum,
d_tgtCenPosMat, d_tgtUniDirMat, d_tgtElmVolVec,
cenKnlType, cenKnlWidth, dirKnlType, dirKnlWidth, tgtElmNum);
}
else
{
blkNum = (tgtElmNum - 1) / BLKDIM + 1;
vfd_TT_TD_Kernel <<<blkNum, BLKDIM>>> (d_vfdVec,
d_dfmCenPosMat, d_dfmUniDirMat, d_dfmElmVolVec,
d_tgtCenPosMat, d_tgtUniDirMat, d_tgtElmVolVec,
cenKnlType, cenKnlWidth, dirKnlType, dirKnlWidth,
dfmElmNum, tgtElmNum);
blkNum = (dfmElmNum - 1) / BLKDIM + 1;
vfd_DD_Kernel <<<blkNum, BLKDIM>>> (d_vfdVec + tgtElmNum,
d_dfmCenPosMat, d_dfmUniDirMat, d_dfmElmVolVec,
cenKnlType, cenKnlWidth, dirKnlType, dirKnlWidth, dfmElmNum);
}
dsum(h_vfdPtr, d_vfdVec, d_sumBufVec, dfmElmNum + tgtElmNum);
return;
}
void varifold(double *h_vfdPtr, double *d_dqVfdMat,
double *d_dfmLmkPosMat, int *d_dfmElmVtxMat, int *d_dfmElmIfoMat,
double *d_tgtCenPosMat, double *d_tgtUniDirMat, double *d_tgtElmVolVec,
char cenKnlType, double cenKnlWidth, char dirKnlType, double dirKnlWidth,
double *d_dfmCenPosMat, double *d_dfmUniDirMat, double *d_dfmElmVolVec,
double *d_vfdVec, double *d_sumBufVec, double *d_dcVfdMat, double *d_ddVfdMat,
int dfmLmkNum, int dfmElmNum, int tgtElmNum)
{
int blkNum = (dfmElmNum - 1) / BLKDIM + 1;
landmarksToVarifoldKernel <<<blkNum, BLKDIM>>> (d_dfmCenPosMat, d_dfmUniDirMat, d_dfmElmVolVec,
d_dfmLmkPosMat, d_dfmElmVtxMat, dfmLmkNum, dfmElmNum);
blkNum = (dfmElmNum - 1) / BLKDIM + 1;
dqVfd_DD_DT_Kernel <<<blkNum, BLKDIM>>> (d_vfdVec, d_dcVfdMat, d_ddVfdMat,
d_dfmCenPosMat, d_dfmUniDirMat, d_dfmElmVolVec,
d_tgtCenPosMat, d_tgtUniDirMat, d_tgtElmVolVec,
cenKnlType, cenKnlWidth, dirKnlType, dirKnlWidth,
dfmElmNum, tgtElmNum);
blkNum = (tgtElmNum - 1) / BLKDIM + 1;
dqVfd_TT_Kernel <<<blkNum, BLKDIM>>> (d_vfdVec + dfmElmNum,
d_tgtCenPosMat, d_tgtUniDirMat, d_tgtElmVolVec,
cenKnlType, cenKnlWidth, dirKnlType, dirKnlWidth, tgtElmNum);
dsum(h_vfdPtr, d_vfdVec, d_sumBufVec, dfmElmNum + tgtElmNum);
blkNum = (dfmLmkNum - 1) / BLKDIM + 1;
dqVfdGatherKernel <<<blkNum, BLKDIM>>> (d_dqVfdMat, d_dcVfdMat, d_ddVfdMat,
d_dfmElmIfoMat, dfmElmNum, dfmLmkNum);
return;
}
|
e0f46372c6ea8a6f3f4cbdcd6f99866fd94a241d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "book.h"
#include "func.h"
#include <stdio.h>
#include <time.h>
#define PI 3.1415926535897932384
#define mu0 4*PI*1e-7
int main( void ) {
clock_t begin, end;
double time_spent;
begin = clock();
FILE *myfile;
myfile = fopen("results.txt", "w");
double imax, rlength, eta, tstep, ldr, thresh;
int numseg;
printf("%s", "What is your I max? ");
scanf("%lf", &imax);
printf("%s", "What is the length of your rod? ");
scanf("%lf", &rlength);
printf("%s", "What is eta? ");
scanf("%lf", &eta);
printf("%s", "How many segments would you like? ");
scanf("%d", &numseg);
ldr = rlength/(numseg+1);
double bound = 0.5*ldr*ldr*mu0/eta;
printf("%s%lf%s", "What time step would you like? (must be less than ", bound, " ) ");
scanf("%lf", &tstep);
printf("%s", "What is the threshold for your convergence? ");
scanf("%lf", &thresh);
//initialize
double *rod_new = new double[numseg+2];
bool *conv = new bool[numseg];
double *dev_old, *dev_new;
bool *dev_conv;
// allocate the memory on the GPU
HANDLE_ERROR( hipMalloc( (void**)&dev_old, (numseg+2) * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_new, (numseg+2) * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_conv, numseg * sizeof(bool) ) );
// fill the array 'new'
hipLaunchKernelGGL(( init), dim3(numseg+2),dim3(1), 0, 0, dev_new, imax, ldr, rlength, numseg+2);
// copy data on device from 'dev_new' to 'rod_new'
HANDLE_ERROR( hipMemcpy( rod_new, dev_new, (numseg+2) * sizeof(double), hipMemcpyDeviceToHost ) );
int out;
// output r values
for (out = 0; out<numseg+1; out++) {
fprintf( myfile, "%lf ", out*ldr );
}
fprintf( myfile, "%lf\n", out*ldr );
double aug = eta*tstep/(mu0*ldr*ldr);
int tcount = 0;
for (out=0; out<numseg+1; out++) {
fprintf( myfile, "%lf ", *(rod_new+out) );
}
fprintf( myfile, "%lf\n", *(rod_new+out) );
do{
/*if(tcount%10000==0){
printf("\ntimestep %d", tcount);
HANDLE_ERROR( hipMemcpy( rod_new + 1, dev_new + 1, numseg * sizeof(double), hipMemcpyDeviceToHost ) );
for (out=0; out<numseg+1; out++) {
fprintf( myfile, "%lf ", *(rod_new+out) );
}
fprintf( myfile, "%lf\n", *(rod_new+out) );
}*/
tcount++;
//copy new to old
hipLaunchKernelGGL(( copyToOld), dim3(numseg+2),dim3(1), 0, 0, dev_new, dev_old, numseg+2);
//update
hipLaunchKernelGGL(( update), dim3(numseg),dim3(1), 0, 0, dev_new, dev_old, numseg+1, dev_conv, thresh, aug);
HANDLE_ERROR( hipMemcpy( conv, dev_conv, numseg, hipMemcpyDeviceToHost ) );
} while(!converge(conv, numseg));
// free the memory allocated on the GPU
HANDLE_ERROR( hipFree( dev_old ) );
HANDLE_ERROR( hipFree( dev_new ) );
HANDLE_ERROR( hipFree( dev_conv ) );
for (out=0; out<numseg+1; out++) {
fprintf( myfile, "%lf ", *(rod_new+out) );
}
fprintf( myfile, "%lf\n", *(rod_new+out) );
fprintf(myfile, "STOP\n");
fclose(myfile);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("\n------------------------------------\n");
printf("Execution took: %lf sec\n", time_spent);
return 0;
}
| e0f46372c6ea8a6f3f4cbdcd6f99866fd94a241d.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "book.h"
#include "func.h"
#include <stdio.h>
#include <time.h>
#define PI 3.1415926535897932384
#define mu0 4*PI*1e-7
int main( void ) {
clock_t begin, end;
double time_spent;
begin = clock();
FILE *myfile;
myfile = fopen("results.txt", "w");
double imax, rlength, eta, tstep, ldr, thresh;
int numseg;
printf("%s", "What is your I max? ");
scanf("%lf", &imax);
printf("%s", "What is the length of your rod? ");
scanf("%lf", &rlength);
printf("%s", "What is eta? ");
scanf("%lf", &eta);
printf("%s", "How many segments would you like? ");
scanf("%d", &numseg);
ldr = rlength/(numseg+1);
double bound = 0.5*ldr*ldr*mu0/eta;
printf("%s%lf%s", "What time step would you like? (must be less than ", bound, " ) ");
scanf("%lf", &tstep);
printf("%s", "What is the threshold for your convergence? ");
scanf("%lf", &thresh);
//initialize
double *rod_new = new double[numseg+2];
bool *conv = new bool[numseg];
double *dev_old, *dev_new;
bool *dev_conv;
// allocate the memory on the GPU
HANDLE_ERROR( cudaMalloc( (void**)&dev_old, (numseg+2) * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_new, (numseg+2) * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_conv, numseg * sizeof(bool) ) );
// fill the array 'new'
init<<<numseg+2,1>>>(dev_new, imax, ldr, rlength, numseg+2);
// copy data on device from 'dev_new' to 'rod_new'
HANDLE_ERROR( cudaMemcpy( rod_new, dev_new, (numseg+2) * sizeof(double), cudaMemcpyDeviceToHost ) );
int out;
// output r values
for (out = 0; out<numseg+1; out++) {
fprintf( myfile, "%lf ", out*ldr );
}
fprintf( myfile, "%lf\n", out*ldr );
double aug = eta*tstep/(mu0*ldr*ldr);
int tcount = 0;
for (out=0; out<numseg+1; out++) {
fprintf( myfile, "%lf ", *(rod_new+out) );
}
fprintf( myfile, "%lf\n", *(rod_new+out) );
do{
/*if(tcount%10000==0){
printf("\ntimestep %d", tcount);
HANDLE_ERROR( cudaMemcpy( rod_new + 1, dev_new + 1, numseg * sizeof(double), cudaMemcpyDeviceToHost ) );
for (out=0; out<numseg+1; out++) {
fprintf( myfile, "%lf ", *(rod_new+out) );
}
fprintf( myfile, "%lf\n", *(rod_new+out) );
}*/
tcount++;
//copy new to old
copyToOld<<<numseg+2,1>>>(dev_new, dev_old, numseg+2);
//update
update<<<numseg,1>>>(dev_new, dev_old, numseg+1, dev_conv, thresh, aug);
HANDLE_ERROR( cudaMemcpy( conv, dev_conv, numseg, cudaMemcpyDeviceToHost ) );
} while(!converge(conv, numseg));
// free the memory allocated on the GPU
HANDLE_ERROR( cudaFree( dev_old ) );
HANDLE_ERROR( cudaFree( dev_new ) );
HANDLE_ERROR( cudaFree( dev_conv ) );
for (out=0; out<numseg+1; out++) {
fprintf( myfile, "%lf ", *(rod_new+out) );
}
fprintf( myfile, "%lf\n", *(rod_new+out) );
fprintf(myfile, "STOP\n");
fclose(myfile);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("\n------------------------------------\n");
printf("Execution took: %lf sec\n", time_spent);
return 0;
}
|
653c0632fe4c8c629d759b2b1288e52f87048939.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/landmark_pair_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void calc_eye_dist_68_kernel(const int N, const int num_landmark, const Dtype *gtLandmarks, Dtype *dist) {
CUDA_KERNEL_LOOP(index, N) {
const Dtype *gtLandmark = gtLandmarks + index * num_landmark * 2;
Dtype left_eye_x = 0, left_eye_y = 0;
Dtype right_eye_x = 0, right_eye_y = 0;
for (int i = 36; i < 42; ++i) {
left_eye_x += gtLandmark[2 * i + 0];
left_eye_y += gtLandmark[2 * i + 1];
}
left_eye_x /= 6;
left_eye_y /= 6;
for (int i = 42; i < 48; ++i) {
right_eye_x += gtLandmark[2 * i + 0];
right_eye_y += gtLandmark[2 * i + 1];
}
right_eye_x /= 6;
right_eye_y /= 6;
Dtype dx = left_eye_x - right_eye_x;
Dtype dy = left_eye_y - right_eye_y;
dist[index] = sqrt(dx*dx + dy*dy);
}
}
template <typename Dtype>
__global__ void calc_landmark_dist_kernel(const int N, const int num_landmark, const Dtype *diff, Dtype *dist) {
CUDA_KERNEL_LOOP(index, N) {
int iBatch = index / num_landmark;
int iLandmark = index % num_landmark;
const Dtype *diff2 = diff + iBatch * num_landmark * 2 + iLandmark * 2;
dist[index] = sqrt(diff2[0] + diff2[1]);
}
}
template <typename Dtype>
__global__ void calc_single_landmark_error_kernel(const int N, const int num_landmark, const Dtype *diff, Dtype *dist) {
CUDA_KERNEL_LOOP(index, N) {
int iBatch = index / num_landmark;
int iLandmark = index % num_landmark;
Dtype *diff2 = diff + iBatch * num_landmark * 2 + iLandmark * 2;
dist[index] = sqrt(diff2[0] + diff2[1]);
}
}
template <typename Dtype>
void LandmarkPairLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int num = bottom[0]->num();
const int landmark_dim = 2 * num_landmark_;
// landmarklandmark
caffe_gpu_sub(bottom[0]->count(), bottom[0]->gpu_data(), bottom[1]->gpu_data(), tmp_diff.mutable_gpu_data());
caffe_gpu_mul(tmp_diff.count(), tmp_diff.gpu_data(), tmp_diff.gpu_data(), tmp_diff.mutable_gpu_diff());
// landmarklandmark
calc_landmark_dist_kernel<Dtype> << <CAFFE_GET_BLOCKS(num*num_landmark_), CAFFE_CUDA_NUM_THREADS >> >(
num*num_landmark_, num_landmark_, tmp_diff.gpu_diff(), tmp_dist.mutable_gpu_data());
// landmark
hipLaunchKernelGGL(( calc_eye_dist_68_kernel<Dtype>) , dim3(CAFFE_GET_BLOCKS(num)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
num, num_landmark_, bottom[1]->gpu_data(), tmp_eye_dist.mutable_gpu_data());
const Dtype *tmp_dist_data = tmp_dist.gpu_data();
const Dtype *tmp_eye_dist_data = tmp_eye_dist.cpu_data();
Dtype error = Dtype(0.0);
for (int i = 0; i < num; ++i) {
Dtype sum_dist = Dtype(0.0);
caffe_gpu_asum(num_landmark_, tmp_dist_data + i*num_landmark_, &sum_dist);
sum_dist /= static_cast<Dtype>(num_landmark_);
error += sum_dist / tmp_eye_dist_data[i];
}
top[0]->mutable_cpu_data()[0] = error / static_cast<Dtype>(num);
}
template <typename Dtype>
void LandmarkPairLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const int num = bottom[0]->num();
const int landmark_dim = 2 * num_landmark_;
const Dtype *tmp_diff_data = tmp_diff.gpu_data();
const Dtype *tmp_dist_data = tmp_dist.gpu_data();
const Dtype *tmp_eye_dist_data = tmp_eye_dist.cpu_data();
const Dtype *top_diff = top[0]->cpu_diff();
Dtype *bottom_diff = bottom[0]->mutable_gpu_diff();
for (int i = 0; i < bottom[0]->num(); ++i) {
Dtype alpha = top_diff[0] / (num*tmp_eye_dist_data[i] * num_landmark_);
caffe_gpu_div(num_landmark_, tmp_diff_data, tmp_dist_data, bottom_diff);
caffe_gpu_div(num_landmark_, tmp_diff_data + num_landmark_, tmp_dist_data, bottom_diff + num_landmark_);
caffe_gpu_scal(landmark_dim, alpha, bottom_diff);
tmp_diff_data += landmark_dim;
tmp_dist_data += num_landmark_;
bottom_diff += landmark_dim;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LandmarkPairLossLayer);
} // namespace caffe
| 653c0632fe4c8c629d759b2b1288e52f87048939.cu | #include <vector>
#include "caffe/layers/landmark_pair_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void calc_eye_dist_68_kernel(const int N, const int num_landmark, const Dtype *gtLandmarks, Dtype *dist) {
CUDA_KERNEL_LOOP(index, N) {
const Dtype *gtLandmark = gtLandmarks + index * num_landmark * 2;
Dtype left_eye_x = 0, left_eye_y = 0;
Dtype right_eye_x = 0, right_eye_y = 0;
for (int i = 36; i < 42; ++i) {
left_eye_x += gtLandmark[2 * i + 0];
left_eye_y += gtLandmark[2 * i + 1];
}
left_eye_x /= 6;
left_eye_y /= 6;
for (int i = 42; i < 48; ++i) {
right_eye_x += gtLandmark[2 * i + 0];
right_eye_y += gtLandmark[2 * i + 1];
}
right_eye_x /= 6;
right_eye_y /= 6;
Dtype dx = left_eye_x - right_eye_x;
Dtype dy = left_eye_y - right_eye_y;
dist[index] = sqrt(dx*dx + dy*dy);
}
}
template <typename Dtype>
__global__ void calc_landmark_dist_kernel(const int N, const int num_landmark, const Dtype *diff, Dtype *dist) {
CUDA_KERNEL_LOOP(index, N) {
int iBatch = index / num_landmark;
int iLandmark = index % num_landmark;
const Dtype *diff2 = diff + iBatch * num_landmark * 2 + iLandmark * 2;
dist[index] = sqrt(diff2[0] + diff2[1]);
}
}
template <typename Dtype>
__global__ void calc_single_landmark_error_kernel(const int N, const int num_landmark, const Dtype *diff, Dtype *dist) {
CUDA_KERNEL_LOOP(index, N) {
int iBatch = index / num_landmark;
int iLandmark = index % num_landmark;
Dtype *diff2 = diff + iBatch * num_landmark * 2 + iLandmark * 2;
dist[index] = sqrt(diff2[0] + diff2[1]);
}
}
template <typename Dtype>
void LandmarkPairLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int num = bottom[0]->num();
const int landmark_dim = 2 * num_landmark_;
// 求预测landmark与真实landmark的的差和差的平方
caffe_gpu_sub(bottom[0]->count(), bottom[0]->gpu_data(), bottom[1]->gpu_data(), tmp_diff.mutable_gpu_data());
caffe_gpu_mul(tmp_diff.count(), tmp_diff.gpu_data(), tmp_diff.gpu_data(), tmp_diff.mutable_gpu_diff());
// 求预测landmark与真实landmark的距离
calc_landmark_dist_kernel<Dtype> << <CAFFE_GET_BLOCKS(num*num_landmark_), CAFFE_CUDA_NUM_THREADS >> >(
num*num_landmark_, num_landmark_, tmp_diff.gpu_diff(), tmp_dist.mutable_gpu_data());
// 求真实landmark的眼睛间的距离
calc_eye_dist_68_kernel<Dtype> <<<CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >>>(
num, num_landmark_, bottom[1]->gpu_data(), tmp_eye_dist.mutable_gpu_data());
const Dtype *tmp_dist_data = tmp_dist.gpu_data();
const Dtype *tmp_eye_dist_data = tmp_eye_dist.cpu_data();
Dtype error = Dtype(0.0);
for (int i = 0; i < num; ++i) {
Dtype sum_dist = Dtype(0.0);
caffe_gpu_asum(num_landmark_, tmp_dist_data + i*num_landmark_, &sum_dist);
sum_dist /= static_cast<Dtype>(num_landmark_);
error += sum_dist / tmp_eye_dist_data[i];
}
top[0]->mutable_cpu_data()[0] = error / static_cast<Dtype>(num);
}
template <typename Dtype>
void LandmarkPairLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const int num = bottom[0]->num();
const int landmark_dim = 2 * num_landmark_;
const Dtype *tmp_diff_data = tmp_diff.gpu_data();
const Dtype *tmp_dist_data = tmp_dist.gpu_data();
const Dtype *tmp_eye_dist_data = tmp_eye_dist.cpu_data();
const Dtype *top_diff = top[0]->cpu_diff();
Dtype *bottom_diff = bottom[0]->mutable_gpu_diff();
for (int i = 0; i < bottom[0]->num(); ++i) {
Dtype alpha = top_diff[0] / (num*tmp_eye_dist_data[i] * num_landmark_);
caffe_gpu_div(num_landmark_, tmp_diff_data, tmp_dist_data, bottom_diff);
caffe_gpu_div(num_landmark_, tmp_diff_data + num_landmark_, tmp_dist_data, bottom_diff + num_landmark_);
caffe_gpu_scal(landmark_dim, alpha, bottom_diff);
tmp_diff_data += landmark_dim;
tmp_dist_data += num_landmark_;
bottom_diff += landmark_dim;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LandmarkPairLossLayer);
} // namespace caffe
|
c85e240830795f90d96a1d18bdfea3baf1bb5d38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaFlow.h"
__global__
void SolveDataL1Kernel(const float *duhat0, const float *dvhat0,
const float *pu1, const float *pu2,
const float *pv1, const float *pv2,
const float *Ix, const float *Iy, const float *It,
int width, int height, int stride,
float lambda, float theta,
float *duhat1, float *dvhat1)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
float dix, diy, dit, duhat, dvhat, du, dv;
if ((iy < height) && (ix < width))
{
int pos = ix + iy * stride; // current pixel index
dix = Ix[pos];
diy = Iy[pos];
dit = It[pos];
float duhat = duhat0[pos];
float dvhat = dvhat0[pos];
//problem 1a
float rho = (dix*duhat + diy*dvhat + dit);
float upper = lambda*theta*(dix*dix + diy*diy);
float lower = -lambda*theta*(dix*dix + diy*diy);;
if ((rho <= upper) && (rho >= lower)) {
float magi = dix*dix + diy*diy;
if (magi != 0) {
du = duhat - rho*dix / magi;
dv = dvhat - rho*diy / magi;
}
else {
du = duhat;
dv = dvhat;
}
}
else if (rho < lower) {
du = duhat + lambda*theta*dix;
dv = dvhat + lambda*theta*diy;
}
else if (rho > upper) {
du = duhat - lambda*theta*dix;
dv = dvhat - lambda*theta*diy;
}
//problem 1b
float divpu, divpv;
int left = (ix - 1) + iy * stride;
int right = (ix + 1) + iy * stride;
int down = ix + (iy - 1) * stride;
int up = ix + (iy + 1) * stride;
if ((ix - 1) < 0) {
if ((iy - 1) < 0) {
//divpu = pu1[right] - pu1[pos] + pu2[up] - pu2[pos];
//divpv = pv1[right] - pv1[pos] + pv2[up] - pv2[pos];
divpu = pu1[pos] + pu2[pos];
divpv = pv1[pos] + pv2[pos];
}
else {
//divpu = pu1[right] - pu1[pos] + pu2[pos] - pu2[down];
//divpv = pv1[right] - pv1[pos] + pv2[pos] - pv2[down];
divpu = pu1[pos] + pu2[pos] - pu2[down];
divpv = pv1[pos] + pv2[pos] - pv2[down];
}
}
else {
if ((iy - 1) < 0) {
//divpu = pu1[pos] - pu1[left] + pu2[up] - pu2[pos];
//divpv = pv1[pos] - pv1[left] + pv2[up] - pv2[pos];
divpu = pu1[pos] - pu1[left] + pu2[pos];
divpv = pv1[pos] - pv1[left] + pv2[pos];
}
else {
divpu = pu1[pos] - pu1[left] + pu2[pos] - pu2[down];
divpv = pv1[pos] - pv1[left] + pv2[pos] - pv2[down];
}
}
duhat1[pos] = du + theta*divpu;
dvhat1[pos] = dv + theta*divpv;
}
}
void sor::CudaFlow::SolveDataL1(const float *duhat0, const float *dvhat0,
const float *pu1, const float *pu2,
const float *pv1, const float *pv2,
const float *Ix, const float *Iy, const float *Iz,
int w, int h, int s,
float lambda, float theta,
float *duhat1, float *dvhat1)
{
// CTA size
dim3 threads(BlockWidth, BlockHeight);
// grid size
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
hipLaunchKernelGGL(( SolveDataL1Kernel) , dim3(blocks), dim3(threads) , 0, 0, duhat0, dvhat0,
pu1, pu2,
pv1, pv2,
Ix, Iy, Iz,
w, h, s,
lambda, theta,
duhat1, dvhat1);
}
| c85e240830795f90d96a1d18bdfea3baf1bb5d38.cu | #include "CudaFlow.h"
__global__
void SolveDataL1Kernel(const float *duhat0, const float *dvhat0,
const float *pu1, const float *pu2,
const float *pv1, const float *pv2,
const float *Ix, const float *Iy, const float *It,
int width, int height, int stride,
float lambda, float theta,
float *duhat1, float *dvhat1)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
float dix, diy, dit, duhat, dvhat, du, dv;
if ((iy < height) && (ix < width))
{
int pos = ix + iy * stride; // current pixel index
dix = Ix[pos];
diy = Iy[pos];
dit = It[pos];
float duhat = duhat0[pos];
float dvhat = dvhat0[pos];
//problem 1a
float rho = (dix*duhat + diy*dvhat + dit);
float upper = lambda*theta*(dix*dix + diy*diy);
float lower = -lambda*theta*(dix*dix + diy*diy);;
if ((rho <= upper) && (rho >= lower)) {
float magi = dix*dix + diy*diy;
if (magi != 0) {
du = duhat - rho*dix / magi;
dv = dvhat - rho*diy / magi;
}
else {
du = duhat;
dv = dvhat;
}
}
else if (rho < lower) {
du = duhat + lambda*theta*dix;
dv = dvhat + lambda*theta*diy;
}
else if (rho > upper) {
du = duhat - lambda*theta*dix;
dv = dvhat - lambda*theta*diy;
}
//problem 1b
float divpu, divpv;
int left = (ix - 1) + iy * stride;
int right = (ix + 1) + iy * stride;
int down = ix + (iy - 1) * stride;
int up = ix + (iy + 1) * stride;
if ((ix - 1) < 0) {
if ((iy - 1) < 0) {
//divpu = pu1[right] - pu1[pos] + pu2[up] - pu2[pos];
//divpv = pv1[right] - pv1[pos] + pv2[up] - pv2[pos];
divpu = pu1[pos] + pu2[pos];
divpv = pv1[pos] + pv2[pos];
}
else {
//divpu = pu1[right] - pu1[pos] + pu2[pos] - pu2[down];
//divpv = pv1[right] - pv1[pos] + pv2[pos] - pv2[down];
divpu = pu1[pos] + pu2[pos] - pu2[down];
divpv = pv1[pos] + pv2[pos] - pv2[down];
}
}
else {
if ((iy - 1) < 0) {
//divpu = pu1[pos] - pu1[left] + pu2[up] - pu2[pos];
//divpv = pv1[pos] - pv1[left] + pv2[up] - pv2[pos];
divpu = pu1[pos] - pu1[left] + pu2[pos];
divpv = pv1[pos] - pv1[left] + pv2[pos];
}
else {
divpu = pu1[pos] - pu1[left] + pu2[pos] - pu2[down];
divpv = pv1[pos] - pv1[left] + pv2[pos] - pv2[down];
}
}
duhat1[pos] = du + theta*divpu;
dvhat1[pos] = dv + theta*divpv;
}
}
void sor::CudaFlow::SolveDataL1(const float *duhat0, const float *dvhat0,
const float *pu1, const float *pu2,
const float *pv1, const float *pv2,
const float *Ix, const float *Iy, const float *Iz,
int w, int h, int s,
float lambda, float theta,
float *duhat1, float *dvhat1)
{
// CTA size
dim3 threads(BlockWidth, BlockHeight);
// grid size
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
SolveDataL1Kernel <<< blocks, threads >>> (duhat0, dvhat0,
pu1, pu2,
pv1, pv2,
Ix, Iy, Iz,
w, h, s,
lambda, theta,
duhat1, dvhat1);
}
|
cuspAdapter.hip | // !!! This is a file automatically generated by hipify!!!
#include <cusp/csr_matrix.h>
#include <cusp/multiply.h>
#include "cuspAdapter.hu"
using namespace thundercat;
void CuspAdapter::preprocess( int m, int n, int nnz, int * rowPtr, int * colIndx, double * values) {
M = m;
N = n;
NNZ = nnz;
int *devRowPtr;
int *devColIndx;
double *devValues;
hipMalloc(&devRowPtr, (N+1) * sizeof(int));
hipMalloc(&devColIndx, NNZ * sizeof(int));
hipMalloc(&devValues, NNZ * sizeof(double));
hipMalloc(&devX, M * sizeof(double));
hipMalloc(&devY, N * sizeof(double));
hipMemcpy(devRowPtr, rowPtr, (N+1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(devColIndx, colIndx, NNZ * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(devValues, values, NNZ * sizeof(double), hipMemcpyHostToDevice);
// *NOTE* raw pointers must be wrapped with thrust::device_ptr!
thrust::device_ptr<int> wrapped_device_Ap(devRowPtr);
thrust::device_ptr<int> wrapped_device_Aj(devColIndx);
thrust::device_ptr<double> wrapped_device_Ax(devValues);
thrust::device_ptr<double> wrapped_device_x(devX);
thrust::device_ptr<double> wrapped_device_y(devY);
DeviceIndexArrayView row_offsets(wrapped_device_Ap, wrapped_device_Ap + N + 1);
DeviceIndexArrayView column_indices(wrapped_device_Aj, wrapped_device_Aj + NNZ);
DeviceValueArrayView values_array (wrapped_device_Ax, wrapped_device_Ax + NNZ);
DeviceValueArrayView x_local(wrapped_device_x, wrapped_device_x + M);
DeviceValueArrayView y_local(wrapped_device_y, wrapped_device_y + N);
DeviceView A_local(M, N, NNZ, row_offsets, column_indices, values_array);
A = A_local;
x = x_local;
y = y_local;
}
void CuspAdapter::setX(double * v) {
hipMemcpy(devX, v, M * sizeof(double), hipMemcpyHostToDevice);
hipDeviceSynchronize();
}
void CuspAdapter::getY(double * w) {
hipMemcpy(w, devY, N * sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
}
void CuspAdapter::spmv() {
cusp::multiply(A, x, y);
hipDeviceSynchronize();
}
CuspAdapter* thundercat::newCuspAdapter() {
return new CuspAdapter();
}
void thundercat::deleteCuspAdapter(CuspAdapter* handle) {
delete handle;
}
| cuspAdapter.cu | #include <cusp/csr_matrix.h>
#include <cusp/multiply.h>
#include "cuspAdapter.hu"
using namespace thundercat;
void CuspAdapter::preprocess( int m, int n, int nnz, int * rowPtr, int * colIndx, double * values) {
M = m;
N = n;
NNZ = nnz;
int *devRowPtr;
int *devColIndx;
double *devValues;
cudaMalloc(&devRowPtr, (N+1) * sizeof(int));
cudaMalloc(&devColIndx, NNZ * sizeof(int));
cudaMalloc(&devValues, NNZ * sizeof(double));
cudaMalloc(&devX, M * sizeof(double));
cudaMalloc(&devY, N * sizeof(double));
cudaMemcpy(devRowPtr, rowPtr, (N+1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(devColIndx, colIndx, NNZ * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(devValues, values, NNZ * sizeof(double), cudaMemcpyHostToDevice);
// *NOTE* raw pointers must be wrapped with thrust::device_ptr!
thrust::device_ptr<int> wrapped_device_Ap(devRowPtr);
thrust::device_ptr<int> wrapped_device_Aj(devColIndx);
thrust::device_ptr<double> wrapped_device_Ax(devValues);
thrust::device_ptr<double> wrapped_device_x(devX);
thrust::device_ptr<double> wrapped_device_y(devY);
DeviceIndexArrayView row_offsets(wrapped_device_Ap, wrapped_device_Ap + N + 1);
DeviceIndexArrayView column_indices(wrapped_device_Aj, wrapped_device_Aj + NNZ);
DeviceValueArrayView values_array (wrapped_device_Ax, wrapped_device_Ax + NNZ);
DeviceValueArrayView x_local(wrapped_device_x, wrapped_device_x + M);
DeviceValueArrayView y_local(wrapped_device_y, wrapped_device_y + N);
DeviceView A_local(M, N, NNZ, row_offsets, column_indices, values_array);
A = A_local;
x = x_local;
y = y_local;
}
void CuspAdapter::setX(double * v) {
cudaMemcpy(devX, v, M * sizeof(double), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
}
void CuspAdapter::getY(double * w) {
cudaMemcpy(w, devY, N * sizeof(double), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
}
void CuspAdapter::spmv() {
cusp::multiply(A, x, y);
cudaThreadSynchronize();
}
CuspAdapter* thundercat::newCuspAdapter() {
return new CuspAdapter();
}
void thundercat::deleteCuspAdapter(CuspAdapter* handle) {
delete handle;
}
|
e2339367b6de25c20d019ff84e2f45b47aa76b86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace fastertransformer {
template <typename T>
__global__ void self_attention_kernel(const int* memory_sequence_length,
T* key_buf,
T* value_buf,
T* query_buf,
const T* self_Q_bias,
T* key_cache,
const T* self_K_bias,
T* value_cache,
const T* self_V_bias,
T* context_buf,
int batch_size,
int head_num,
int size_per_head,
const int step,
const int start_len,
const T scalar) {
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T*>(s_buf);
T* logits = reinterpret_cast<T*>(&sq[size_per_head]);
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if (tid < size_per_head)
sq[tid] = query_buf[qkv_id] + self_Q_bias[qkv_bias_id];
__syncthreads();
// offset for each step
int offset = batch_size * head_num * size_per_head;
for (int ite = 0; ite < step; ++ite) {
T key = tid < size_per_head ? key_cache[ite * offset + qkv_id] : (T)0.0f;
// for the last step, we should update K + bias_K to the cache
if (ite == step - 1 && tid < size_per_head) {
key = key_buf[qkv_id] + self_K_bias[qkv_bias_id];
key_cache[ite * offset + qkv_id] = key;
}
T val = (tid < size_per_head) ? key * sq[tid] * (T)(scalar) : (T)(0.0f);
T qk = blockReduceSum(val);
if (threadIdx.x == 0) {
logits[ite] = qk;
}
__syncthreads(); // try to remove
}
__syncthreads(); // try to remove
__shared__ float s_max_val, s_sum;
float local_i =
(tid >= (start_len - memory_sequence_length[bid]) && (tid < step))
? (float)logits[tid]
: -1e20f;
float max_val = blockReduceMax<float>(local_i);
if (tid == 0) s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o =
(tid >= (start_len - memory_sequence_length[bid]) && (tid < step))
? __expf(local_i)
: 0.0f;
float val = blockReduceSum<float>(local_o);
if (tid == 0) s_sum = val; // + 1e-6;
__syncthreads();
if (tid >= (start_len - memory_sequence_length[bid]) && (tid < step)) {
logits[tid] = local_o / s_sum;
} else if (tid < step) {
logits[tid] = static_cast<T>(0.0f);
}
__syncthreads();
if (tid < size_per_head) {
T sum = (T)0.0f;
for (int ite = 0; ite < step; ++ite) {
T value = value_cache[ite * offset + qkv_id];
// for the last step, we should update V + bias_V to the cache
if (ite == step - 1) {
value = value_buf[qkv_id] + self_V_bias[qkv_bias_id];
value_cache[ite * offset + qkv_id] = value;
}
sum += value * logits[ite];
}
context_buf[qkv_id] = sum;
}
}
template <typename T>
void self_attention_dispatch(const int* memory_sequence_length,
T* key_buf,
T* value_buf,
T* query_buf,
const T* self_Q_bias,
T* key_cache,
const T* self_K_bias,
T* value_cache,
const T* self_V_bias,
T* context_buf,
int batch_size,
int head_num,
int size_per_head,
const int step,
const int start_len,
hipStream_t stream) {
const int block_sz = ATTENTION_BLOCK_SIZE;
T scalar = (T)(1.f / sqrtf(size_per_head * 1.0f));
dim3 grid(batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT) ? 1 : 0);
switch (cond) {
/*case 32:
masked_attention_kernel_opt<32, block_sz, T><<<grid, block_sz,
sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache,
self_V_bias, context_buf,
batch_size, head_num, step, scalar);
break;
case 64:
if(sizeof(T) == 2)
masked_attention_kernel_opt_half2<64, block_sz><<<grid, block_sz,
sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache,
self_V_bias, context_buf,
batch_size, head_num, step, scalar);
else
masked_attention_kernel_opt<64, block_sz, T><<<grid, block_sz,
sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias,
key_cache, self_K_bias,
value_cache, self_V_bias,
context_buf,
batch_size, head_num, step, scalar);
break;
case 128:
if(sizeof(T) == 2)
masked_attention_kernel_opt_half2<128, block_sz><<<grid, block_sz,
sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache,
self_V_bias, context_buf,
batch_size, head_num, step, scalar);
else
masked_attention_kernel_opt<128, block_sz, T><<<grid, block_sz,
sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache,
self_V_bias, context_buf,
batch_size, head_num, step, scalar);
break;*/
default:
// default path
int block_size = 128;
// suppose size_per_head <= 128
if (step <= 64)
block_size = 64;
else if (step <= 128 && step > size_per_head)
block_size = 128;
else if (step > 128 && step <= 256)
block_size = 256;
else if (step > 256 && step <= 512)
block_size = 512;
else
block_size = 1024;
if ((int)block_size < size_per_head) {
block_size = size_per_head;
}
assert(block_size <= 1024);
dim3 block(block_size);
T scalar = 1 / sqrtf(size_per_head * 1.0f);
int shared_size = sizeof(T) * (size_per_head + step);
hipLaunchKernelGGL(( self_attention_kernel<T>), dim3(grid), dim3(block), shared_size, stream,
memory_sequence_length,
key_buf,
value_buf,
query_buf,
self_Q_bias,
key_cache,
self_K_bias,
value_cache,
self_V_bias,
context_buf,
batch_size,
head_num,
size_per_head,
step,
start_len,
scalar);
#ifndef NDEBUG
hipDeviceSynchronize();
check_cuda_error(hipGetLastError());
#endif
}
}
template <OperationType OpType_>
void OpenTransformerDecoder<OpType_>::self_multi_head_attention(
const DataType_* from_tensor,
const int* memory_sequence_length,
DataType_* key_cache_,
DataType_* value_cache_,
DataType_* decoder_output,
const int step,
const int start_len) {
int m = batch_size_;
int n = hidden_units_;
int k = hidden_units_;
DataType_ alpha = (DataType_)1.0f, beta = (DataType_)0.0f;
if (is_fuse_QKV == true) {
check_cuda_error(
hipblasGemmBatchedEx(param_.cublas_handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
n,
m,
k,
&alpha,
(const void* const*)qkv_kernel_,
AType_,
n,
(const void* const*)qkv_input_,
BType_,
k,
&beta,
(void* const*)qkv_buf_,
CType_,
n,
3,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[4])));
} else {
key_buf_ = key_cache_ + (step - 1) * m * n;
value_buf_ = value_cache_ + (step - 1) * m * n;
check_cuda_error(
hipblasGemmEx(param_.cublas_handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
n,
m,
k,
&alpha,
param_.self_attention.query_weight.kernel,
AType_,
n,
from_tensor,
BType_,
k,
&beta,
query_buf_,
CType_,
n,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0])));
check_cuda_error(
hipblasGemmEx(param_.cublas_handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
n,
m,
k,
&alpha,
param_.self_attention.key_weight.kernel,
AType_,
n,
from_tensor,
BType_,
k,
&beta,
key_buf_,
CType_,
n,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0])));
check_cuda_error(
hipblasGemmEx(param_.cublas_handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
n,
m,
k,
&alpha,
param_.self_attention.value_weight.kernel,
AType_,
n,
from_tensor,
BType_,
k,
&beta,
value_buf_,
CType_,
n,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0])));
}
self_attention_dispatch<DataType_>(memory_sequence_length,
key_buf_,
value_buf_,
query_buf_,
param_.self_attention.query_weight.bias,
key_cache_,
param_.self_attention.key_weight.bias,
value_cache_,
param_.self_attention.value_weight.bias,
context_buf_,
batch_size_,
head_num_,
size_per_head_,
step,
start_len,
param_.stream);
check_cuda_error(
hipblasGemmEx(param_.cublas_handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
n,
m,
k,
&alpha,
param_.self_attention.attention_output_weight.kernel,
AType_,
n,
context_buf_,
BType_,
k,
&beta,
decoder_output,
CType_,
n,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0])));
}
template <OperationType OpType_>
void OpenTransformerDecoder<OpType_>::decoder_norm1(const DataType_* input,
const DataType_* gamma,
const DataType_* beta,
DataType_* output,
int m,
int n) {
dim3 grid(m);
dim3 block(min(n, 1024));
/* For general cases, n is equal to hidden_units, e.g., 512/1024.
Since we have warp shuffle inside the code, block.x % 32 should be 0.
*/
if (n % 32 != 0) block.x = 1024;
block.x =
block.x /
(4 / sizeof(DataType_)); // if using half, only need half of block.x
/* should pay attention to the rsqrt precision*/
// assert(block.x <= 1024);
// decoder_norm1_kernel<DataType_><<<grid, block, 0, param_.stream>>>(input,
// gamma, beta, output, m, n);
hipLaunchKernelGGL(( decoder_norm1_kernel_generalize<DataType_>), dim3(grid), dim3(block), 0, param_.stream,
input, gamma, beta, output, m, n); // For gpt-3
}
template <OperationType OpType_>
void OpenTransformerDecoder<OpType_>::decoder_norm2(const DataType_* input,
const DataType_* gamma,
const DataType_* beta,
const DataType_* bias,
DataType_* output,
DataType_* norm_output,
int m,
int n) {
dim3 grid(m);
dim3 block(min(n, 1024));
/* For general cases, n is equal to hidden_units, e.g., 512/1024.
Since we have warp shuffle inside the code, block.x % 32 should be 0.
*/
if (n % 32 != 0) block.x = 1024;
block.x =
block.x /
(4 / sizeof(DataType_)); // if using half, only need half of block.x
/* should pay attention to the rsqrt precision*/
// assert(block.x <= 1024);
// decoder_norm2_kernel<DataType_><<<grid, block, 0, param_.stream>>>(input,
// gamma, beta, bias, output, norm_output, m, n);
hipLaunchKernelGGL(( decoder_norm2_kernel_generalize<DataType_>), dim3(grid), dim3(block), 0, param_.stream,
input, gamma, beta, bias, output, norm_output, m, n); // For gpt-3
}
template <OperationType OpType_>
void OpenTransformerDecoder<OpType_>::ffn(const DataType_* input,
DataType_* ffn_inner,
DataType_* output,
const int m,
const int inner_size,
const int n,
ActivationType activation_type) {
int m1 = m, k1 = n, n1 = inner_size;
DataType_ alpha = (DataType_)1.0f;
DataType_ beta = (DataType_)0.0f;
check_cuda_error(hipblasGemmEx(param_.cublas_handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
n1,
m1,
k1,
&alpha,
param_.ffn.intermediate_weight.kernel,
AType_,
n1,
input,
BType_,
k1,
&beta,
ffn_inner,
CType_,
n1,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[2])));
// dim3 grid(min(m1, 65536));
// dim3 block(min(n1 / 4, 1024));
// // TODO remove this limitation
// // assert(block.x <= 1024);
// if(activation_type == ActivationType::RELU)
// hipLaunchKernelGGL(( add_bias_relu<DataType_>), dim3(grid), dim3(block), 0, param_.stream, ffn_inner,
// param_.ffn.intermediate_weight.bias, m1, n1);
// else if(activation_type == ActivationType::GELU)
// hipLaunchKernelGGL(( add_bias_gelu<DataType_>), dim3(grid), dim3(block), 0, param_.stream, ffn_inner,
// param_.ffn.intermediate_weight.bias, m1, n1);
dim3 block(min((int)(n1 / 4 / (4 / sizeof(DataType_))), 1024));
dim3 grid(min(m1 * n1 / block.x, 65536));
if (activation_type == ActivationType::RELU)
hipLaunchKernelGGL(( add_bias_relu<DataType_>), dim3(grid), dim3(block), 0, param_.stream,
ffn_inner,
param_.ffn.intermediate_weight.bias,
m1,
n1 / (4 / sizeof(DataType_)));
else if (activation_type == ActivationType::GELU)
hipLaunchKernelGGL(( add_bias_gelu<DataType_>), dim3(grid), dim3(block), 0, param_.stream,
ffn_inner,
param_.ffn.intermediate_weight.bias,
m1,
n1 / (4 / sizeof(DataType_)));
int m2 = m, n2 = n, k2 = inner_size;
check_cuda_error(hipblasGemmEx(param_.cublas_handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
n2,
m2,
k2,
&alpha,
param_.ffn.output_weight.kernel,
AType_,
n2,
ffn_inner,
BType_,
k2,
&beta,
output,
CType_,
n2,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[3])));
}
template <OperationType OpType_>
void OpenTransformerDecoder<OpType_>::add_bias_act(
DataType_* input,
const DataType_* bias,
int m,
int n,
hipStream_t stream,
ActivationType activation_type = ActivationType::GELU) {
dim3 block_(min((int)(n / 4 / (4 / sizeof(DataType_))), 1024));
dim3 grid_(min(m * n / block_.x, 65536));
if (activation_type == ActivationType::RELU)
hipLaunchKernelGGL(( add_bias_relu<DataType_>), dim3(grid_), dim3(block_), 0, stream,
input, bias, m, n / (4 / sizeof(DataType_)));
else if (activation_type == ActivationType::GELU)
hipLaunchKernelGGL(( add_bias_gelu<DataType_>), dim3(grid_), dim3(block_), 0, stream,
input, bias, m, n / (4 / sizeof(DataType_)));
}
template <OperationType OpType_>
void OpenTransformerDecoder<OpType_>::add_bias_input(DataType_* output,
const DataType_* input,
const int m,
const int n) {
dim3 grid(min(m, 65536));
dim3 block(min(n, 1024));
hipLaunchKernelGGL(( add_bias_input_kernel_generalize), dim3(grid), dim3(block), 0, param_.stream,
output, input, param_.ffn.output_weight.bias, m, n);
}
template void
OpenTransformerDecoder<OperationType::FP32>::self_multi_head_attention(
const float* from_tensor,
const int* memory_sequence_length,
float* key_cache,
float* value_cache,
float* decoder_output,
const int step,
const int start_len);
template void
OpenTransformerDecoder<OperationType::FP16>::self_multi_head_attention(
const half* from_tensor,
const int* memory_sequence_length,
half* key_cache,
half* value_cache,
half* decoder_output,
const int step,
const int start_len);
template void OpenTransformerDecoder<OperationType::FP32>::ffn(
const float* input,
float* ffn_inner,
float* otuput,
const int m,
const int inner_size,
const int n,
ActivationType activation_type);
template void OpenTransformerDecoder<OperationType::FP16>::ffn(
const half* input,
half* ffn_inner,
half* otuput,
const int m,
const int inner_size,
const int n,
ActivationType activation_type);
template void OpenTransformerDecoder<OperationType::FP32>::decoder_norm1(
const float* input,
const float* gamma,
const float* beta,
float* output,
int m,
int n);
template void OpenTransformerDecoder<OperationType::FP16>::decoder_norm1(
const half* input,
const half* gamma,
const half* beta,
half* output,
int m,
int n);
template void OpenTransformerDecoder<OperationType::FP32>::decoder_norm2(
const float* input,
const float* gamma,
const float* beta,
const float* bias,
float* output,
float* norm_output,
int m,
int n);
template void OpenTransformerDecoder<OperationType::FP16>::decoder_norm2(
const half* input,
const half* gamma,
const half* beta,
const half* bias,
half* output,
half* norm_output,
int m,
int n);
template void OpenTransformerDecoder<OperationType::FP32>::add_bias_act(
float* input,
const float* bias,
int m,
int n,
hipStream_t stream,
ActivationType activation_type);
template void OpenTransformerDecoder<OperationType::FP16>::add_bias_act(
half* input,
const half* bias,
int m,
int n,
hipStream_t stream,
ActivationType activation_type);
template void OpenTransformerDecoder<OperationType::FP32>::add_bias_input(
float* output, const float* input, const int m, const int n);
template void OpenTransformerDecoder<OperationType::FP16>::add_bias_input(
half* output, const half* input, const int m, const int n);
} // namespace FasterTransformer
| e2339367b6de25c20d019ff84e2f45b47aa76b86.cu | /*
* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace fastertransformer {
template <typename T>
__global__ void self_attention_kernel(const int* memory_sequence_length,
T* key_buf,
T* value_buf,
T* query_buf,
const T* self_Q_bias,
T* key_cache,
const T* self_K_bias,
T* value_cache,
const T* self_V_bias,
T* context_buf,
int batch_size,
int head_num,
int size_per_head,
const int step,
const int start_len,
const T scalar) {
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T*>(s_buf);
T* logits = reinterpret_cast<T*>(&sq[size_per_head]);
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if (tid < size_per_head)
sq[tid] = query_buf[qkv_id] + self_Q_bias[qkv_bias_id];
__syncthreads();
// offset for each step
int offset = batch_size * head_num * size_per_head;
for (int ite = 0; ite < step; ++ite) {
T key = tid < size_per_head ? key_cache[ite * offset + qkv_id] : (T)0.0f;
// for the last step, we should update K + bias_K to the cache
if (ite == step - 1 && tid < size_per_head) {
key = key_buf[qkv_id] + self_K_bias[qkv_bias_id];
key_cache[ite * offset + qkv_id] = key;
}
T val = (tid < size_per_head) ? key * sq[tid] * (T)(scalar) : (T)(0.0f);
T qk = blockReduceSum(val);
if (threadIdx.x == 0) {
logits[ite] = qk;
}
__syncthreads(); // try to remove
}
__syncthreads(); // try to remove
__shared__ float s_max_val, s_sum;
float local_i =
(tid >= (start_len - memory_sequence_length[bid]) && (tid < step))
? (float)logits[tid]
: -1e20f;
float max_val = blockReduceMax<float>(local_i);
if (tid == 0) s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o =
(tid >= (start_len - memory_sequence_length[bid]) && (tid < step))
? __expf(local_i)
: 0.0f;
float val = blockReduceSum<float>(local_o);
if (tid == 0) s_sum = val; // + 1e-6;
__syncthreads();
if (tid >= (start_len - memory_sequence_length[bid]) && (tid < step)) {
logits[tid] = local_o / s_sum;
} else if (tid < step) {
logits[tid] = static_cast<T>(0.0f);
}
__syncthreads();
if (tid < size_per_head) {
T sum = (T)0.0f;
for (int ite = 0; ite < step; ++ite) {
T value = value_cache[ite * offset + qkv_id];
// for the last step, we should update V + bias_V to the cache
if (ite == step - 1) {
value = value_buf[qkv_id] + self_V_bias[qkv_bias_id];
value_cache[ite * offset + qkv_id] = value;
}
sum += value * logits[ite];
}
context_buf[qkv_id] = sum;
}
}
template <typename T>
void self_attention_dispatch(const int* memory_sequence_length,
T* key_buf,
T* value_buf,
T* query_buf,
const T* self_Q_bias,
T* key_cache,
const T* self_K_bias,
T* value_cache,
const T* self_V_bias,
T* context_buf,
int batch_size,
int head_num,
int size_per_head,
const int step,
const int start_len,
cudaStream_t stream) {
const int block_sz = ATTENTION_BLOCK_SIZE;
T scalar = (T)(1.f / sqrtf(size_per_head * 1.0f));
dim3 grid(batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT) ? 1 : 0);
switch (cond) {
/*case 32:
masked_attention_kernel_opt<32, block_sz, T><<<grid, block_sz,
sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache,
self_V_bias, context_buf,
batch_size, head_num, step, scalar);
break;
case 64:
if(sizeof(T) == 2)
masked_attention_kernel_opt_half2<64, block_sz><<<grid, block_sz,
sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache,
self_V_bias, context_buf,
batch_size, head_num, step, scalar);
else
masked_attention_kernel_opt<64, block_sz, T><<<grid, block_sz,
sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias,
key_cache, self_K_bias,
value_cache, self_V_bias,
context_buf,
batch_size, head_num, step, scalar);
break;
case 128:
if(sizeof(T) == 2)
masked_attention_kernel_opt_half2<128, block_sz><<<grid, block_sz,
sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache,
self_V_bias, context_buf,
batch_size, head_num, step, scalar);
else
masked_attention_kernel_opt<128, block_sz, T><<<grid, block_sz,
sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache,
self_V_bias, context_buf,
batch_size, head_num, step, scalar);
break;*/
default:
// default path
int block_size = 128;
// suppose size_per_head <= 128
if (step <= 64)
block_size = 64;
else if (step <= 128 && step > size_per_head)
block_size = 128;
else if (step > 128 && step <= 256)
block_size = 256;
else if (step > 256 && step <= 512)
block_size = 512;
else
block_size = 1024;
if ((int)block_size < size_per_head) {
block_size = size_per_head;
}
assert(block_size <= 1024);
dim3 block(block_size);
T scalar = 1 / sqrtf(size_per_head * 1.0f);
int shared_size = sizeof(T) * (size_per_head + step);
self_attention_kernel<T><<<grid, block, shared_size, stream>>>(
memory_sequence_length,
key_buf,
value_buf,
query_buf,
self_Q_bias,
key_cache,
self_K_bias,
value_cache,
self_V_bias,
context_buf,
batch_size,
head_num,
size_per_head,
step,
start_len,
scalar);
#ifndef NDEBUG
cudaDeviceSynchronize();
check_cuda_error(cudaGetLastError());
#endif
}
}
template <OperationType OpType_>
void OpenTransformerDecoder<OpType_>::self_multi_head_attention(
const DataType_* from_tensor,
const int* memory_sequence_length,
DataType_* key_cache_,
DataType_* value_cache_,
DataType_* decoder_output,
const int step,
const int start_len) {
int m = batch_size_;
int n = hidden_units_;
int k = hidden_units_;
DataType_ alpha = (DataType_)1.0f, beta = (DataType_)0.0f;
if (is_fuse_QKV == true) {
check_cuda_error(
cublasGemmBatchedEx(param_.cublas_handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
n,
m,
k,
&alpha,
(const void* const*)qkv_kernel_,
AType_,
n,
(const void* const*)qkv_input_,
BType_,
k,
&beta,
(void* const*)qkv_buf_,
CType_,
n,
3,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[4])));
} else {
key_buf_ = key_cache_ + (step - 1) * m * n;
value_buf_ = value_cache_ + (step - 1) * m * n;
check_cuda_error(
cublasGemmEx(param_.cublas_handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
n,
m,
k,
&alpha,
param_.self_attention.query_weight.kernel,
AType_,
n,
from_tensor,
BType_,
k,
&beta,
query_buf_,
CType_,
n,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[0])));
check_cuda_error(
cublasGemmEx(param_.cublas_handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
n,
m,
k,
&alpha,
param_.self_attention.key_weight.kernel,
AType_,
n,
from_tensor,
BType_,
k,
&beta,
key_buf_,
CType_,
n,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[0])));
check_cuda_error(
cublasGemmEx(param_.cublas_handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
n,
m,
k,
&alpha,
param_.self_attention.value_weight.kernel,
AType_,
n,
from_tensor,
BType_,
k,
&beta,
value_buf_,
CType_,
n,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[0])));
}
self_attention_dispatch<DataType_>(memory_sequence_length,
key_buf_,
value_buf_,
query_buf_,
param_.self_attention.query_weight.bias,
key_cache_,
param_.self_attention.key_weight.bias,
value_cache_,
param_.self_attention.value_weight.bias,
context_buf_,
batch_size_,
head_num_,
size_per_head_,
step,
start_len,
param_.stream);
check_cuda_error(
cublasGemmEx(param_.cublas_handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
n,
m,
k,
&alpha,
param_.self_attention.attention_output_weight.kernel,
AType_,
n,
context_buf_,
BType_,
k,
&beta,
decoder_output,
CType_,
n,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[0])));
}
template <OperationType OpType_>
void OpenTransformerDecoder<OpType_>::decoder_norm1(const DataType_* input,
const DataType_* gamma,
const DataType_* beta,
DataType_* output,
int m,
int n) {
dim3 grid(m);
dim3 block(min(n, 1024));
/* For general cases, n is equal to hidden_units, e.g., 512/1024.
Since we have warp shuffle inside the code, block.x % 32 should be 0.
*/
if (n % 32 != 0) block.x = 1024;
block.x =
block.x /
(4 / sizeof(DataType_)); // if using half, only need half of block.x
/* should pay attention to the rsqrt precision*/
// assert(block.x <= 1024);
// decoder_norm1_kernel<DataType_><<<grid, block, 0, param_.stream>>>(input,
// gamma, beta, output, m, n);
decoder_norm1_kernel_generalize<DataType_><<<grid, block, 0, param_.stream>>>(
input, gamma, beta, output, m, n); // For gpt-3
}
template <OperationType OpType_>
void OpenTransformerDecoder<OpType_>::decoder_norm2(const DataType_* input,
const DataType_* gamma,
const DataType_* beta,
const DataType_* bias,
DataType_* output,
DataType_* norm_output,
int m,
int n) {
dim3 grid(m);
dim3 block(min(n, 1024));
/* For general cases, n is equal to hidden_units, e.g., 512/1024.
Since we have warp shuffle inside the code, block.x % 32 should be 0.
*/
if (n % 32 != 0) block.x = 1024;
block.x =
block.x /
(4 / sizeof(DataType_)); // if using half, only need half of block.x
/* should pay attention to the rsqrt precision*/
// assert(block.x <= 1024);
// decoder_norm2_kernel<DataType_><<<grid, block, 0, param_.stream>>>(input,
// gamma, beta, bias, output, norm_output, m, n);
decoder_norm2_kernel_generalize<DataType_><<<grid, block, 0, param_.stream>>>(
input, gamma, beta, bias, output, norm_output, m, n); // For gpt-3
}
template <OperationType OpType_>
void OpenTransformerDecoder<OpType_>::ffn(const DataType_* input,
DataType_* ffn_inner,
DataType_* output,
const int m,
const int inner_size,
const int n,
ActivationType activation_type) {
int m1 = m, k1 = n, n1 = inner_size;
DataType_ alpha = (DataType_)1.0f;
DataType_ beta = (DataType_)0.0f;
check_cuda_error(cublasGemmEx(param_.cublas_handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
n1,
m1,
k1,
&alpha,
param_.ffn.intermediate_weight.kernel,
AType_,
n1,
input,
BType_,
k1,
&beta,
ffn_inner,
CType_,
n1,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[2])));
// dim3 grid(min(m1, 65536));
// dim3 block(min(n1 / 4, 1024));
// // TODO remove this limitation
// // assert(block.x <= 1024);
// if(activation_type == ActivationType::RELU)
// add_bias_relu<DataType_><<<grid, block, 0, param_.stream>>>(ffn_inner,
// param_.ffn.intermediate_weight.bias, m1, n1);
// else if(activation_type == ActivationType::GELU)
// add_bias_gelu<DataType_><<<grid, block, 0, param_.stream>>>(ffn_inner,
// param_.ffn.intermediate_weight.bias, m1, n1);
dim3 block(min((int)(n1 / 4 / (4 / sizeof(DataType_))), 1024));
dim3 grid(min(m1 * n1 / block.x, 65536));
if (activation_type == ActivationType::RELU)
add_bias_relu<DataType_><<<grid, block, 0, param_.stream>>>(
ffn_inner,
param_.ffn.intermediate_weight.bias,
m1,
n1 / (4 / sizeof(DataType_)));
else if (activation_type == ActivationType::GELU)
add_bias_gelu<DataType_><<<grid, block, 0, param_.stream>>>(
ffn_inner,
param_.ffn.intermediate_weight.bias,
m1,
n1 / (4 / sizeof(DataType_)));
int m2 = m, n2 = n, k2 = inner_size;
check_cuda_error(cublasGemmEx(param_.cublas_handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
n2,
m2,
k2,
&alpha,
param_.ffn.output_weight.kernel,
AType_,
n2,
ffn_inner,
BType_,
k2,
&beta,
output,
CType_,
n2,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[3])));
}
template <OperationType OpType_>
void OpenTransformerDecoder<OpType_>::add_bias_act(
DataType_* input,
const DataType_* bias,
int m,
int n,
cudaStream_t stream,
ActivationType activation_type = ActivationType::GELU) {
dim3 block_(min((int)(n / 4 / (4 / sizeof(DataType_))), 1024));
dim3 grid_(min(m * n / block_.x, 65536));
if (activation_type == ActivationType::RELU)
add_bias_relu<DataType_><<<grid_, block_, 0, stream>>>(
input, bias, m, n / (4 / sizeof(DataType_)));
else if (activation_type == ActivationType::GELU)
add_bias_gelu<DataType_><<<grid_, block_, 0, stream>>>(
input, bias, m, n / (4 / sizeof(DataType_)));
}
template <OperationType OpType_>
void OpenTransformerDecoder<OpType_>::add_bias_input(DataType_* output,
const DataType_* input,
const int m,
const int n) {
dim3 grid(min(m, 65536));
dim3 block(min(n, 1024));
add_bias_input_kernel_generalize<<<grid, block, 0, param_.stream>>>(
output, input, param_.ffn.output_weight.bias, m, n);
}
template void
OpenTransformerDecoder<OperationType::FP32>::self_multi_head_attention(
const float* from_tensor,
const int* memory_sequence_length,
float* key_cache,
float* value_cache,
float* decoder_output,
const int step,
const int start_len);
template void
OpenTransformerDecoder<OperationType::FP16>::self_multi_head_attention(
const half* from_tensor,
const int* memory_sequence_length,
half* key_cache,
half* value_cache,
half* decoder_output,
const int step,
const int start_len);
template void OpenTransformerDecoder<OperationType::FP32>::ffn(
const float* input,
float* ffn_inner,
float* otuput,
const int m,
const int inner_size,
const int n,
ActivationType activation_type);
template void OpenTransformerDecoder<OperationType::FP16>::ffn(
const half* input,
half* ffn_inner,
half* otuput,
const int m,
const int inner_size,
const int n,
ActivationType activation_type);
template void OpenTransformerDecoder<OperationType::FP32>::decoder_norm1(
const float* input,
const float* gamma,
const float* beta,
float* output,
int m,
int n);
template void OpenTransformerDecoder<OperationType::FP16>::decoder_norm1(
const half* input,
const half* gamma,
const half* beta,
half* output,
int m,
int n);
template void OpenTransformerDecoder<OperationType::FP32>::decoder_norm2(
const float* input,
const float* gamma,
const float* beta,
const float* bias,
float* output,
float* norm_output,
int m,
int n);
template void OpenTransformerDecoder<OperationType::FP16>::decoder_norm2(
const half* input,
const half* gamma,
const half* beta,
const half* bias,
half* output,
half* norm_output,
int m,
int n);
template void OpenTransformerDecoder<OperationType::FP32>::add_bias_act(
float* input,
const float* bias,
int m,
int n,
cudaStream_t stream,
ActivationType activation_type);
template void OpenTransformerDecoder<OperationType::FP16>::add_bias_act(
half* input,
const half* bias,
int m,
int n,
cudaStream_t stream,
ActivationType activation_type);
template void OpenTransformerDecoder<OperationType::FP32>::add_bias_input(
float* output, const float* input, const int m, const int n);
template void OpenTransformerDecoder<OperationType::FP16>::add_bias_input(
half* output, const half* input, const int m, const int n);
} // namespace FasterTransformer
|
839b928d59f233610cd9ce88207a5eb6c46090d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include <iostream>
#include <sstream>
#include <stdexcept>
template <typename T, int TD_T, int BLOCK_ROWS>
__global__ void transposeNoBankConflicts(T *odata, T *idata, int width, int height)
{
__shared__ T tile[TD_T][TD_T+1];
int xIndex = blockIdx.x * TD_T + threadIdx.x;
int yIndex = blockIdx.y * TD_T + threadIdx.y;
int index_in = xIndex + yIndex * width;
xIndex = blockIdx.y * TD_T + threadIdx.x;
yIndex = blockIdx.x * TD_T + threadIdx.y;
int index_out = xIndex + (yIndex) * height;
if ( xIndex < height && yIndex < width)
{
for (int i=0; i<TD_T; i+=BLOCK_ROWS)
{
tile[threadIdx.y + i][threadIdx.x] = idata[ index_in + i * width];
}
__syncthreads();
for ( int i = 0; i < TD_T; i += BLOCK_ROWS)
{
odata[ index_out + i * height] = tile[ threadIdx.x ][ threadIdx.y + i ];
}
}
}
template <typename T>
void transpose_2( size_t height, size_t width, T* idata, T* odata, int tile_dimension,hipStream_t stream )
{
int gridx=width / tile_dimension;
if ( width % tile_dimension != 0)
{
++ gridx;
std::stringstream ss;
ss << "Transpose 2: Width " << width << " is not divisible by tile dimension: " << tile_dimension << ". Aborting\n";
throw std::runtime_error( ss.str() );
}
int gridy = height/tile_dimension;
if ( height % tile_dimension != 0)
{
++ gridy;
std::stringstream ss;
ss << "Transpose 2: Height " << height<< " is not divisible by tile dimension: " << tile_dimension << ". Aborting\n";
throw std::runtime_error( ss.str() );
}
dim3 grid( gridx, gridy),
threads( tile_dimension, tile_dimension );
switch (tile_dimension)
{
case 2:
hipLaunchKernelGGL(( transposeNoBankConflicts<T,2,2>), dim3(grid), dim3(threads),0,stream, odata, idata, width, height);
break;
case 4:
hipLaunchKernelGGL(( transposeNoBankConflicts<T,4,4>), dim3(grid), dim3(threads),0,stream, odata, idata, width, height);
break;
case 8:
hipLaunchKernelGGL(( transposeNoBankConflicts<T,8,8>), dim3(grid), dim3(threads),0,stream, odata, idata, width, height);
break;
case 16:
hipLaunchKernelGGL(( transposeNoBankConflicts<T,16,16>), dim3(grid), dim3(threads),0,stream, odata, idata, width, height);
break;
/* case 24:
transposeNoBankConflicts<T,24,24><<<grid, threads>>>(odata, idata, width, height);
break;
case 32:
transposeNoBankConflicts<T,32,32><<<grid, threads>>>(odata, idata, width, height);
break;
case 64:
transposeNoBankConflicts<T,64,64><<<grid, threads>>>(odata, idata, width, height);
case 128:
transposeNoBankConflicts<T,128,128><<<grid, threads>>>(odata, idata, width, height);
*/ default:
std::cerr << "Tile Dimension: " << tile_dimension << " not supported. Aborting\n";
exit( -1 );
}
}
| 839b928d59f233610cd9ce88207a5eb6c46090d0.cu | #pragma once
#include <iostream>
#include <sstream>
#include <stdexcept>
template <typename T, int TD_T, int BLOCK_ROWS>
__global__ void transposeNoBankConflicts(T *odata, T *idata, int width, int height)
{
__shared__ T tile[TD_T][TD_T+1];
int xIndex = blockIdx.x * TD_T + threadIdx.x;
int yIndex = blockIdx.y * TD_T + threadIdx.y;
int index_in = xIndex + yIndex * width;
xIndex = blockIdx.y * TD_T + threadIdx.x;
yIndex = blockIdx.x * TD_T + threadIdx.y;
int index_out = xIndex + (yIndex) * height;
if ( xIndex < height && yIndex < width)
{
for (int i=0; i<TD_T; i+=BLOCK_ROWS)
{
tile[threadIdx.y + i][threadIdx.x] = idata[ index_in + i * width];
}
__syncthreads();
for ( int i = 0; i < TD_T; i += BLOCK_ROWS)
{
odata[ index_out + i * height] = tile[ threadIdx.x ][ threadIdx.y + i ];
}
}
}
template <typename T>
void transpose_2( size_t height, size_t width, T* idata, T* odata, int tile_dimension,cudaStream_t stream )
{
int gridx=width / tile_dimension;
if ( width % tile_dimension != 0)
{
++ gridx;
std::stringstream ss;
ss << "Transpose 2: Width " << width << " is not divisible by tile dimension: " << tile_dimension << ". Aborting\n";
throw std::runtime_error( ss.str() );
}
int gridy = height/tile_dimension;
if ( height % tile_dimension != 0)
{
++ gridy;
std::stringstream ss;
ss << "Transpose 2: Height " << height<< " is not divisible by tile dimension: " << tile_dimension << ". Aborting\n";
throw std::runtime_error( ss.str() );
}
dim3 grid( gridx, gridy),
threads( tile_dimension, tile_dimension );
switch (tile_dimension)
{
case 2:
transposeNoBankConflicts<T,2,2><<<grid, threads,0,stream>>>(odata, idata, width, height);
break;
case 4:
transposeNoBankConflicts<T,4,4><<<grid, threads,0,stream>>>(odata, idata, width, height);
break;
case 8:
transposeNoBankConflicts<T,8,8><<<grid, threads,0,stream>>>(odata, idata, width, height);
break;
case 16:
transposeNoBankConflicts<T,16,16><<<grid, threads,0,stream>>>(odata, idata, width, height);
break;
/* case 24:
transposeNoBankConflicts<T,24,24><<<grid, threads>>>(odata, idata, width, height);
break;
case 32:
transposeNoBankConflicts<T,32,32><<<grid, threads>>>(odata, idata, width, height);
break;
case 64:
transposeNoBankConflicts<T,64,64><<<grid, threads>>>(odata, idata, width, height);
case 128:
transposeNoBankConflicts<T,128,128><<<grid, threads>>>(odata, idata, width, height);
*/ default:
std::cerr << "Tile Dimension: " << tile_dimension << " not supported. Aborting\n";
exit( -1 );
}
}
|
d2e9fd3aa7bd68f23dba0ff273be3fe52ab21555.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "LB.h"
extern "C"
__global__ void PeriodicBC(float *df, int periodic,int LX, int LY)
{
//////////////////////////////////////////////////////////////////////////////////////////////////
int tIdx = threadIdx.x;
int tIdy = threadIdx.y;
int tx = tIdx + blockIdx.x * blockDim.x;
int ty = tIdy + blockIdx.y * blockDim.y;
//////////////////////////////////////////////////////////////////////////////////////////////////
if(periodic == 0){
int nodeSouth = (tx + (0) * LX) * 9;
int nodeNorth = (tx + (LY - 1) * LX) * 9;
df[nodeSouth + 3] = df[nodeNorth + 3];
df[nodeSouth + 5] = df[nodeNorth + 5];
df[nodeSouth + 7] = df[nodeNorth + 7];
df[nodeNorth + 4] = df[nodeSouth + 4];
df[nodeNorth + 8] = df[nodeSouth + 8];
df[nodeNorth + 6] = df[nodeSouth + 6];
}
if(periodic == 1){
int nodeWest = (0 + ty * LX) * 9;
int nodeEast = ((LX - 1) + ty * LX) * 9;
df[nodeWest + 1] = df[nodeEast + 1];
df[nodeWest + 5] = df[nodeEast + 5];
df[nodeWest + 8] = df[nodeEast + 8];
df[nodeEast + 2] = df[nodeWest + 2];
df[nodeEast + 7] = df[nodeWest + 7];
df[nodeEast + 6] = df[nodeWest + 6];
}
}
| d2e9fd3aa7bd68f23dba0ff273be3fe52ab21555.cu | #include "LB.h"
extern "C"
__global__ void PeriodicBC(float *df, int periodic,int LX, int LY)
{
//////////////////////////////////////////////////////////////////////////////////////////////////
int tIdx = threadIdx.x;
int tIdy = threadIdx.y;
int tx = tIdx + blockIdx.x * blockDim.x;
int ty = tIdy + blockIdx.y * blockDim.y;
//////////////////////////////////////////////////////////////////////////////////////////////////
if(periodic == 0){
int nodeSouth = (tx + (0) * LX) * 9;
int nodeNorth = (tx + (LY - 1) * LX) * 9;
df[nodeSouth + 3] = df[nodeNorth + 3];
df[nodeSouth + 5] = df[nodeNorth + 5];
df[nodeSouth + 7] = df[nodeNorth + 7];
df[nodeNorth + 4] = df[nodeSouth + 4];
df[nodeNorth + 8] = df[nodeSouth + 8];
df[nodeNorth + 6] = df[nodeSouth + 6];
}
if(periodic == 1){
int nodeWest = (0 + ty * LX) * 9;
int nodeEast = ((LX - 1) + ty * LX) * 9;
df[nodeWest + 1] = df[nodeEast + 1];
df[nodeWest + 5] = df[nodeEast + 5];
df[nodeWest + 8] = df[nodeEast + 8];
df[nodeEast + 2] = df[nodeWest + 2];
df[nodeEast + 7] = df[nodeWest + 7];
df[nodeEast + 6] = df[nodeWest + 6];
}
}
|
1ca30f4d23c7f6b0b0ec63174f20ed99d18c35d7.hip | // !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 80
#define F 28
#define ITERATIONS (unsigned)( 10000 )
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int iterations){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
int m_sum=0;
for (unsigned j=0; j<iterations; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
m_sum=A[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))];
}
m_sum+=j;
}
C[tid]=m_sum;
__syncthreads();
}
// Host code
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
//checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
hipFree(d_A);
//if (d_B)
// hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
| 1ca30f4d23c7f6b0b0ec63174f20ed99d18c35d7.cu | // Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 80
#define F 28
#define ITERATIONS (unsigned)( 10000 )
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int iterations){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
int m_sum=0;
for (unsigned j=0; j<iterations; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
m_sum=A[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))];
}
m_sum+=j;
}
C[tid]=m_sum;
__syncthreads();
}
// Host code
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
//checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
cudaFree(d_A);
//if (d_B)
// cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
b33e6176a94849234391b60543c1864b337b8259.hip | // !!! This is a file automatically generated by hipify!!!
// GPU anim test: evaluate RDFT-encoded motion data using CUDA
// Author: Sergey Chaban <[email protected]>
#include <hip/hip_runtime.h>
#include <math_functions.h>
#include <math_constants.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#define WIN32_LEAN_AND_MEAN 1
#define NOMINMAX
#define _WIN32_WINNT 0x0601
#include <Windows.h>
int64_t timestamp() {
LARGE_INTEGER ctr;
::QueryPerformanceCounter(&ctr);
return ctr.QuadPart;
}
//---------------------------------------------------------------
enum E_MOT_TRK { POS, ROT, SCL };
enum E_MOT_RORD { XYZ, XZY, YXZ, YZX, ZXY, ZYX };
enum E_MOT_XORD { SRT, STR, RST, RTS, TSR, TRS };
struct MOT_STRING {
uint8_t len;
char chr[0x40 - 1];
operator char* () { return chr; }
};
union MOT_VEC {
struct { float x, y, z; };
float v[3];
};
union MOT_QUAT {
struct { float x, y, z, w; };
float v[4];
};
struct MOT_TRACK {
MOT_VEC vmin;
MOT_VEC vmax;
uint8_t srcMask;
uint8_t dataMask;
uint8_t reserved[6];
};
struct MOT_NODE {
MOT_STRING name;
uint32_t offs[3];
uint8_t xord;
uint8_t rord;
uint8_t reserved[2];
MOT_TRACK trk[3];
};
struct MOT_CLIP {
char sig[4];
uint32_t size;
float rate;
uint32_t nfrm;
uint32_t nnod;
uint32_t hash;
uint32_t eval;
uint32_t seq;
MOT_STRING name;
MOT_NODE nodes[1];
};
MOT_QUAT motQuatExp(MOT_VEC v) {
MOT_QUAT q = {};
float ha = sqrtf(v.x*v.x + v.y*v.y + v.z*v.z);
float s = fabsf(ha) < 1.0e-4f ? 1.0f : sinf(ha) / ha;
q.x = v.x * s;
q.y = v.y * s;
q.z = v.z * s;
q.w = cosf(ha);
s = 1.0f / sqrtf(q.x*q.x + q.y*q.y + q.z*q.z + q.w*q.w);
q.x *= s;
q.y *= s;
q.z *= s;
q.w *= s;
return q;
}
bool motCkNodeIdx(const MOT_CLIP* pClip, int nodeIdx) { return pClip && ((uint32_t)nodeIdx < pClip->nnod); }
bool motCkFrameNo(const MOT_CLIP* pClip, int fno) { return pClip && ((uint32_t)fno < pClip->nfrm); }
float* motGetTrackData(const MOT_CLIP* pClip, int nodeIdx, E_MOT_TRK trk) {
float* p = nullptr;
if (pClip && motCkNodeIdx(pClip, nodeIdx)) {
int itrk = (int)trk;
if (itrk < 3) {
uint32_t offs = pClip->nodes[nodeIdx].offs[itrk];
if (offs) {
char* pTop = (char*)pClip;
p = (float*)&pTop[offs];
}
}
}
return p;
}
void motGetChanData(const MOT_CLIP* pClip, int nodeIdx, E_MOT_TRK trk, int chIdx, float** ppData, int* pStride) {
int stride = 0;
float* p = nullptr;
if ((uint32_t)chIdx < 3) {
float* pTrk = motGetTrackData(pClip, nodeIdx, trk);
if (pTrk) {
int dataMask = pClip->nodes[nodeIdx].trk[(int)trk].dataMask;
for (int i = 0; i < 3; ++i) {
if (dataMask & (1 << i)) {
p = pTrk + stride;
++stride;
}
}
}
}
if (ppData) {
*ppData = p;
}
if (pStride) {
*pStride = stride;
}
}
MOT_VEC motGetVec(const MOT_CLIP* pClip, int nodeIdx, int fno, E_MOT_TRK trk) {
MOT_VEC v = {};
if (pClip && motCkNodeIdx(pClip, nodeIdx) && motCkFrameNo(pClip, fno)) {
float* p = motGetTrackData(pClip, nodeIdx, trk);
if (p) {
int itrk = (int)trk;
if (itrk < 3) {
float defVal = trk == SCL ? 1.0f : 0.0f;
int dataMask = pClip->nodes[nodeIdx].trk[itrk].dataMask;
int srcMask = pClip->nodes[nodeIdx].trk[itrk].srcMask;
int vsize = 0;
for (int i = 0; i < 3; ++i) {
if (dataMask & (1 << i)) ++vsize;
}
p += fno * vsize;
for (int i = 0; i < 3; ++i) {
if (dataMask & (1 << i)) {
v.v[i] = *p++;
} else if (srcMask & (1 << i)) {
v.v[i] = pClip->nodes[nodeIdx].trk[itrk].vmin.v[i];
} else {
v.v[i] = defVal;
}
}
}
}
}
return v;
}
MOT_QUAT motGetQuat(const MOT_CLIP* pClip, int nodeIdx, int fno) {
return motQuatExp(motGetVec(pClip, nodeIdx, fno, ROT));
}
MOT_CLIP* motClipLoad(const char* pPath) {
MOT_CLIP* pClip = nullptr;
FILE* f = fopen(pPath, "rb");
if (f) {
long len = 0;
if (0 == fseek(f, 0, SEEK_END)) {
len = ftell(f);
}
fseek(f, 0, SEEK_SET);
if (len) {
pClip = (MOT_CLIP*)malloc(len);
if (pClip) {
fread(pClip, len, 1, f);
}
}
fclose(f);
}
return pClip;
}
void motClipUnload(MOT_CLIP* pClip) {
if (pClip) { free(pClip); }
}
//---------------------------------------------------------------
void RDFT_fwd(float* pDst, const float* pSrc, int nsrc, int stride) {
int n = (nsrc & 1) ? nsrc + 1 : nsrc;
int hn = n / 2;
float* pRe = pDst;
float* pIm = pDst + hn;
float nrm = 1.0f / (float)hn;
float s = atanf(1.0f) * 8.0f / (float)n;
for (int i = 0; i < n; ++i) {
pDst[i] = 0.0f;
}
for (int i = 0; i < hn; ++i) {
for (int j = 0; j < n; ++j) {
int idx = (j % nsrc) * stride;
float val = pSrc[idx];
float t = s * (float)i * (float)j;
pRe[i] += val * cosf(t);
pIm[i] -= val * sinf(t);
}
}
for (int i = 0; i < hn; ++i) {
pRe[i] *= nrm;
}
pRe[0] /= 2;
pRe[hn - 1] /= 2;
for (int i = 0; i < hn; ++i) {
pIm[i] *= -nrm;
}
}
void RDFT_inv(float* pDst, const float* pSrc, int ndst) {
int n = (ndst & 1) ? ndst + 1 : ndst;
int hn = n / 2;
const float* pRe = pSrc;
const float* pIm = pSrc + hn;
float s = atanf(1.0f) * 8.0f / (float)n;
for (int i = 0; i < ndst; ++i) {
pDst[i] = 0.0f;
}
for (int i = 0; i < ndst; ++i) {
float t = s * (float)i;
for (int j = 0; j < hn; ++j) {
float re = pRe[j];
float im = pIm[j];
float r = t * (float)j;
pDst[i] += re*cosf(r) + im*sinf(r);
}
}
}
class cMotion {
public:
struct ROT_CHANNEL {
float* pData;
int stride;
int nodeId;
int chId;
float* pCoefs;
int cut;
};
protected:
MOT_CLIP* mpClip;
int mPosVecsNum;
MOT_VEC* mpPosVecs;
int mRotChansNum;
ROT_CHANNEL* mpRotChans;
int mCoefsNum;
float mParamFactor;
int mCut;
float* mpEvalCoefsCPU;
float* mpEvalCoefsDev;
float* mpEvalResCPU;
float* mpEvalResDev;
hipEvent_t mEvt;
void eval_pos_vecs(float frame);
public:
cMotion()
:
mpClip(nullptr),
mPosVecsNum(0), mpPosVecs(nullptr),
mRotChansNum(0), mpRotChans(nullptr),
mCoefsNum(0), mParamFactor(0.0f), mCut(0),
mpEvalCoefsCPU(nullptr), mpEvalCoefsDev(nullptr),
mpEvalResCPU(nullptr), mpEvalResDev(nullptr)
{}
~cMotion() {
unload();
}
void load(const char* pPath);
void unload();
float frame_to_param(float frame) {
if (!mpClip) return 0.0f;
float n = (float)mpClip->nfrm;
float f = ::fmodf(::fabsf(frame), n);
return f * mParamFactor;
}
int get_nfrm() const { return mpClip ? mpClip->nfrm : 0; }
int get_nrot() const { return mRotChansNum; }
int get_npos() const { return mPosVecsNum; }
int get_ncut() const { return mCut; }
float* get_res_ptr() { return mpEvalResCPU; }
void clear_res() {
if (mpEvalResCPU) {
::memset(mpEvalResCPU, 0, mRotChansNum * sizeof(float));
}
}
void eval_cpu(float frame);
void eval_dev(float frame);
};
void cMotion::load(const char* pPath) {
mpClip = motClipLoad(pPath);
if (!mpClip) return;
int numNodes = mpClip->nnod;
mPosVecsNum = 0;
for (int i = 0; i < numNodes; ++i) {
int srcMask = mpClip->nodes[i].trk[POS].dataMask;
int dataMask = mpClip->nodes[i].trk[POS].dataMask;
if (srcMask || dataMask) {
++mPosVecsNum;
}
}
if (mPosVecsNum > 0) {
mpPosVecs = (MOT_VEC*)::malloc(mPosVecsNum * sizeof(MOT_VEC));
}
mRotChansNum = 0;
for (int i = 0; i < numNodes; ++i) {
int dataMask = mpClip->nodes[i].trk[ROT].dataMask;
for (int j = 0; j < 3; ++j) {
if (dataMask & (1 << j)) {
++mRotChansNum;
}
}
}
if (mRotChansNum > 0) {
mpRotChans = (ROT_CHANNEL*)::malloc(mRotChansNum * sizeof(ROT_CHANNEL));
}
if (!mpRotChans) return;
int chIdx = 0;
for (int i = 0; i < numNodes; ++i) {
int dataMask = mpClip->nodes[i].trk[ROT].dataMask;
for (int j = 0; j < 3; ++j) {
if (dataMask & (1 << j)) {
ROT_CHANNEL* pCh = &mpRotChans[chIdx++];
pCh->nodeId = i;
pCh->chId = j;
motGetChanData(mpClip, i, ROT, j, &pCh->pData, &pCh->stride);
}
}
}
int nfrm = mpClip->nfrm;
float* pTmp = 0 ? (float*)::malloc(nfrm * sizeof(float)) : nullptr;
int ncoef = nfrm;
if (ncoef & 1) ++ncoef;
mCoefsNum = ncoef;
mParamFactor = ::atanf(1.0f) * 8.0f / (float)ncoef;
for (int i = 0; i < mRotChansNum; ++i) {
ROT_CHANNEL* pCh = &mpRotChans[i];
pCh->pCoefs = (float*)::malloc(ncoef * sizeof(float));
if (pCh->pCoefs) {
RDFT_fwd(pCh->pCoefs, pCh->pData, nfrm, pCh->stride);
if (pTmp) {
char* pNodeName = mpClip->nodes[pCh->nodeId].name;
RDFT_inv(pTmp, pCh->pCoefs, nfrm);
::printf("-- [%d] %s:%c\n", i, pNodeName, "xyz"[pCh->chId]);
for (int k = 0; k < nfrm; ++k) {
float ref = pCh->pData[k*pCh->stride];
float val = pTmp[k];
::printf("[%d]: %.4f - %.4f = %f\n", k, ref, val, ref - val);
}
}
}
}
if (pTmp) {
::free(pTmp);
pTmp = nullptr;
}
int minCut = nfrm + 1;
int maxCut = 0;
for (int i = 0; i < mRotChansNum; ++i) {
ROT_CHANNEL* pCh = &mpRotChans[i];
pCh->cut = (nfrm & (~1)) / 2;
float* pRe = pCh->pCoefs;
if (pRe) {
const float qs = 0.0005f;
for (int j = 0; j < ncoef / 2; ++j) {
float x = pRe[j];
float qx = ::floorf(::fabsf(x) / qs) * qs * (x < 0.0f ? -1.0f : 1.0f);
if (qx == 0) {
pCh->cut = j;
break;
}
}
}
if (pCh->cut < minCut) {
minCut = pCh->cut;
}
if (pCh->cut > maxCut) {
maxCut = pCh->cut;
}
}
mCut = (int)((float)maxCut * 0.75f);
::printf("coefs cut: %d .. %d -> %d\n", minCut, maxCut, mCut);
size_t evalCoefsSize = mRotChansNum * (mCut + mCut - 1) * sizeof(float);
hipHostMalloc(&mpEvalCoefsCPU, evalCoefsSize);
if (!mpEvalCoefsCPU) return;
float* pCoefs = mpEvalCoefsCPU;
for (int i = 0; i < mRotChansNum; ++i) {
ROT_CHANNEL* pCh = &mpRotChans[i];
if (pCh->pCoefs) {
*pCoefs++ = pCh->pCoefs[0];
for (int j = 1; j < mCut; ++j) {
*pCoefs++ = pCh->pCoefs[j];
}
for (int j = 1; j < mCut; ++j) {
*pCoefs++ = pCh->pCoefs[j + (mCoefsNum / 2)];
}
}
}
hipMalloc(&mpEvalCoefsDev, evalCoefsSize);
if (mpEvalCoefsDev) {
hipMemcpy(mpEvalCoefsDev, mpEvalCoefsCPU, evalCoefsSize, hipMemcpyHostToDevice);
}
size_t evalResSize = mRotChansNum * sizeof(float);
hipHostMalloc(&mpEvalResCPU, evalResSize);
clear_res();
hipMalloc(&mpEvalResDev, evalResSize);
hipEventCreateWithFlags(&mEvt, hipEventDisableTiming);
}
void cMotion::unload() {
hipDeviceSynchronize();
hipEventDestroy(mEvt);
if (mpEvalResDev) {
hipFree(mpEvalResDev);
mpEvalResDev = nullptr;
}
if (mpEvalResCPU) {
hipHostFree(mpEvalResCPU);
mpEvalResCPU = nullptr;
}
if (mpEvalCoefsDev) {
hipFree(mpEvalCoefsDev);
mpEvalCoefsDev = nullptr;
}
if (mpEvalCoefsCPU) {
hipHostFree(mpEvalCoefsCPU);
mpEvalCoefsCPU = nullptr;
}
if (mpRotChans) {
for (int i = 0; i < mRotChansNum; ++i) {
ROT_CHANNEL* pCh = &mpRotChans[i];
if (pCh->pCoefs) {
::free(pCh->pCoefs);
pCh->pCoefs = nullptr;
}
}
::free(mpRotChans);
mpRotChans = nullptr;
mRotChansNum = 0;
}
if (mpPosVecs) {
::free(mpPosVecs);
mpPosVecs = nullptr;
mPosVecsNum = 0;
}
if (mpClip) {
motClipUnload(mpClip);
mpClip = nullptr;
}
mCut = 0;
mCoefsNum = 0;
mParamFactor = 0.0f;
}
void cMotion::eval_pos_vecs(float frame) {
if (!mpClip) return;
if (!mpPosVecs) return;
int nfrm = mpClip->nfrm;
int numNodes = mpClip->nnod;
int idx = 0;
for (int i = 0; i < numNodes; ++i) {
int srcMask = mpClip->nodes[i].trk[POS].dataMask;
int dataMask = mpClip->nodes[i].trk[POS].dataMask;
if (srcMask || dataMask) {
int fno = (int)frame;
MOT_VEC v = motGetVec(mpClip, i, fno, POS);
if (fno < nfrm - 1) {
float t = frame - (float)fno;
MOT_VEC v1 = motGetVec(mpClip, i, fno + 1, POS);
for (int j = 0; j < 3; ++j) {
v.v[j] += (v1.v[j] - v.v[j]) * t;
}
}
mpPosVecs[idx] = v;
++idx;
}
}
}
__host__ __device__
#if 0
void eval_sub(float* pRes, const float* pCoefs, float t, int n, int tid) {
const float* pRe = &pCoefs[tid * (n + n - 1)];
const float* pIm = &pRe[n];
float res = pRe[0];
for (int i = 1; i < n; ++i) {
float r = t * (float)i;
float re = pRe[i];
float im = pIm[i - 1];
res += re*cosf(r) + im*sinf(r);
}
pRes[tid] = res;
}
#else
// NR ed3: (5.4.6)
void eval_sub(float* pRes, const float* pCoefs, float t, int n, int tid) {
const float* pRe = &pCoefs[tid * (n + n - 1)];
const float* pIm = &pRe[n];
float res = pRe[0];
float r = t;
float c = cosf(r);
float s = sinf(r);
float a = sinf(r*0.5f);
a = 2.0f * a*a;
float b = s;
float re = pRe[1];
float im = pIm[0];
res += re*c + im*s;
for (int i = 2; i < n; ++i) {
float ci = c - (a*c + b*s);
float si = s - (a*s - b*c);
re = pRe[i];
im = pIm[i - 1];
res += re*ci + im*si;
c = ci;
s = si;
}
pRes[tid] = res;
}
#endif
void cMotion::eval_cpu(float frame) {
float* pCoefs = mpEvalCoefsCPU;
float* pRes = mpEvalResCPU;
if (!pCoefs || !pRes) return;
float t = frame_to_param(frame);
int n = mRotChansNum;
//#pragma omp parallel for
for (int i = 0; i < n; ++i) {
eval_sub(pRes, pCoefs, t, mCut, i);
}
eval_pos_vecs(frame);
}
__global__ void eval_kernel(float* pRes, const float* pCoefs, float t, int n, int nres) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid >= nres) return;
eval_sub(pRes, pCoefs, t, n, tid);
}
static int s_blkMin = 64;
static int s_blkMax = 128;
static int calc_thr_num(int nwk) {
int n = (int)::log2(nwk) - 1;
if (n < 0) n = 0;
n = 1 << n;
if (n < s_blkMin) n = s_blkMin;
if (n > s_blkMax) n = s_blkMax;
return n;
}
void cMotion::eval_dev(float frame) {
float* pCoefs = mpEvalCoefsDev;
float* pRes = mpEvalResDev;
if (!pCoefs || !pRes) return;
float t = frame_to_param(frame);
int nch = mRotChansNum;
int nthr = calc_thr_num(nch);
int nblk = (nch + nthr - 1) / nthr;
hipLaunchKernelGGL(( eval_kernel), dim3(nblk), dim3(nthr), 0, 0, pRes, pCoefs, t, mCut, nch);
hipMemcpyAsync(mpEvalResCPU, pRes, mRotChansNum * sizeof(float), hipMemcpyDeviceToHost, 0);
hipEventRecord(mEvt, 0);
eval_pos_vecs(frame);
while (hipEventQuery(mEvt) == hipErrorNotReady) {}
}
static cMotion s_mot;
void init(const char* pClipPath) {
hipDeviceProp_t devProps;
hipGetDeviceProperties(&devProps, 0);
s_blkMax = devProps.maxThreadsPerBlock / 8;
::printf("device: %s, compute %d.%d\n", devProps.name, devProps.major, devProps.minor);
::printf("SM count = %d\n", devProps.multiProcessorCount);
::printf("max thr/SM = %d\n", devProps.maxThreadsPerMultiProcessor);
::printf("max thr/blk = %d\n", devProps.maxThreadsPerBlock);
::printf("concurrent exec = %s\n", devProps.concurrentKernels ? "yes" : "no");
::printf("\n");
s_mot.load(pClipPath);
::printf("#rot chans = %d\n", s_mot.get_nrot());
::printf("#pos vecs = %d\n", s_mot.get_npos());
}
double res_l2() {
double res = 0;
int n = s_mot.get_nrot();
float* p = s_mot.get_res_ptr();
if (p) {
for (int i = 0; i < n; ++i) {
res += p[i] * p[i];
}
res = sqrt(res);
}
return res;
}
int main(int argc, char* argv[]) {
if (argc < 2) {
return -1;
}
init(argv[1]);
const int N = 1000;
double cpuT = 0.0f;
double devT = 0.0f;
double devRes = 0.0;
double cpuRes = 0.0;
::printf("-----\n");
for (int i = 0; i < N; ++i) {
float frm = s_mot.get_nfrm() * float(i) / float(N);
int64_t devT0 = timestamp();
s_mot.eval_dev(frm);
int64_t devT1 = timestamp();
double devDT = (double)(devT1 - devT0);
devT += devDT;
devRes += res_l2();
}
devT /= N;
::printf("dev res = %.1f\n", devRes);
::printf("dev t = %.1f\n", devT);
s_mot.clear_res();
for (int i = 0; i < N; ++i) {
float frm = s_mot.get_nfrm() * float(i) / float(N);
int64_t cpuT0 = timestamp();
s_mot.eval_cpu(frm);
int64_t cpuT1 = timestamp();
double cpuDT = (double)(cpuT1 - cpuT0);
cpuT += cpuDT;
cpuRes += res_l2();
}
cpuT /= N;
::printf("cpu res = %.1f\n", cpuRes);
::printf("cpu t = %.1f\n", cpuT);
::printf("%f\n", cpuT / devT);
return 0;
}
| b33e6176a94849234391b60543c1864b337b8259.cu | // GPU anim test: evaluate RDFT-encoded motion data using CUDA
// Author: Sergey Chaban <[email protected]>
#include <cuda_runtime.h>
#include <math_functions.h>
#include <math_constants.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#define WIN32_LEAN_AND_MEAN 1
#define NOMINMAX
#define _WIN32_WINNT 0x0601
#include <Windows.h>
int64_t timestamp() {
LARGE_INTEGER ctr;
::QueryPerformanceCounter(&ctr);
return ctr.QuadPart;
}
//---------------------------------------------------------------
enum E_MOT_TRK { POS, ROT, SCL };
enum E_MOT_RORD { XYZ, XZY, YXZ, YZX, ZXY, ZYX };
enum E_MOT_XORD { SRT, STR, RST, RTS, TSR, TRS };
struct MOT_STRING {
uint8_t len;
char chr[0x40 - 1];
operator char* () { return chr; }
};
union MOT_VEC {
struct { float x, y, z; };
float v[3];
};
union MOT_QUAT {
struct { float x, y, z, w; };
float v[4];
};
struct MOT_TRACK {
MOT_VEC vmin;
MOT_VEC vmax;
uint8_t srcMask;
uint8_t dataMask;
uint8_t reserved[6];
};
struct MOT_NODE {
MOT_STRING name;
uint32_t offs[3];
uint8_t xord;
uint8_t rord;
uint8_t reserved[2];
MOT_TRACK trk[3];
};
struct MOT_CLIP {
char sig[4];
uint32_t size;
float rate;
uint32_t nfrm;
uint32_t nnod;
uint32_t hash;
uint32_t eval;
uint32_t seq;
MOT_STRING name;
MOT_NODE nodes[1];
};
MOT_QUAT motQuatExp(MOT_VEC v) {
MOT_QUAT q = {};
float ha = sqrtf(v.x*v.x + v.y*v.y + v.z*v.z);
float s = fabsf(ha) < 1.0e-4f ? 1.0f : sinf(ha) / ha;
q.x = v.x * s;
q.y = v.y * s;
q.z = v.z * s;
q.w = cosf(ha);
s = 1.0f / sqrtf(q.x*q.x + q.y*q.y + q.z*q.z + q.w*q.w);
q.x *= s;
q.y *= s;
q.z *= s;
q.w *= s;
return q;
}
bool motCkNodeIdx(const MOT_CLIP* pClip, int nodeIdx) { return pClip && ((uint32_t)nodeIdx < pClip->nnod); }
bool motCkFrameNo(const MOT_CLIP* pClip, int fno) { return pClip && ((uint32_t)fno < pClip->nfrm); }
float* motGetTrackData(const MOT_CLIP* pClip, int nodeIdx, E_MOT_TRK trk) {
float* p = nullptr;
if (pClip && motCkNodeIdx(pClip, nodeIdx)) {
int itrk = (int)trk;
if (itrk < 3) {
uint32_t offs = pClip->nodes[nodeIdx].offs[itrk];
if (offs) {
char* pTop = (char*)pClip;
p = (float*)&pTop[offs];
}
}
}
return p;
}
void motGetChanData(const MOT_CLIP* pClip, int nodeIdx, E_MOT_TRK trk, int chIdx, float** ppData, int* pStride) {
int stride = 0;
float* p = nullptr;
if ((uint32_t)chIdx < 3) {
float* pTrk = motGetTrackData(pClip, nodeIdx, trk);
if (pTrk) {
int dataMask = pClip->nodes[nodeIdx].trk[(int)trk].dataMask;
for (int i = 0; i < 3; ++i) {
if (dataMask & (1 << i)) {
p = pTrk + stride;
++stride;
}
}
}
}
if (ppData) {
*ppData = p;
}
if (pStride) {
*pStride = stride;
}
}
MOT_VEC motGetVec(const MOT_CLIP* pClip, int nodeIdx, int fno, E_MOT_TRK trk) {
MOT_VEC v = {};
if (pClip && motCkNodeIdx(pClip, nodeIdx) && motCkFrameNo(pClip, fno)) {
float* p = motGetTrackData(pClip, nodeIdx, trk);
if (p) {
int itrk = (int)trk;
if (itrk < 3) {
float defVal = trk == SCL ? 1.0f : 0.0f;
int dataMask = pClip->nodes[nodeIdx].trk[itrk].dataMask;
int srcMask = pClip->nodes[nodeIdx].trk[itrk].srcMask;
int vsize = 0;
for (int i = 0; i < 3; ++i) {
if (dataMask & (1 << i)) ++vsize;
}
p += fno * vsize;
for (int i = 0; i < 3; ++i) {
if (dataMask & (1 << i)) {
v.v[i] = *p++;
} else if (srcMask & (1 << i)) {
v.v[i] = pClip->nodes[nodeIdx].trk[itrk].vmin.v[i];
} else {
v.v[i] = defVal;
}
}
}
}
}
return v;
}
MOT_QUAT motGetQuat(const MOT_CLIP* pClip, int nodeIdx, int fno) {
return motQuatExp(motGetVec(pClip, nodeIdx, fno, ROT));
}
MOT_CLIP* motClipLoad(const char* pPath) {
MOT_CLIP* pClip = nullptr;
FILE* f = fopen(pPath, "rb");
if (f) {
long len = 0;
if (0 == fseek(f, 0, SEEK_END)) {
len = ftell(f);
}
fseek(f, 0, SEEK_SET);
if (len) {
pClip = (MOT_CLIP*)malloc(len);
if (pClip) {
fread(pClip, len, 1, f);
}
}
fclose(f);
}
return pClip;
}
void motClipUnload(MOT_CLIP* pClip) {
if (pClip) { free(pClip); }
}
//---------------------------------------------------------------
void RDFT_fwd(float* pDst, const float* pSrc, int nsrc, int stride) {
int n = (nsrc & 1) ? nsrc + 1 : nsrc;
int hn = n / 2;
float* pRe = pDst;
float* pIm = pDst + hn;
float nrm = 1.0f / (float)hn;
float s = atanf(1.0f) * 8.0f / (float)n;
for (int i = 0; i < n; ++i) {
pDst[i] = 0.0f;
}
for (int i = 0; i < hn; ++i) {
for (int j = 0; j < n; ++j) {
int idx = (j % nsrc) * stride;
float val = pSrc[idx];
float t = s * (float)i * (float)j;
pRe[i] += val * cosf(t);
pIm[i] -= val * sinf(t);
}
}
for (int i = 0; i < hn; ++i) {
pRe[i] *= nrm;
}
pRe[0] /= 2;
pRe[hn - 1] /= 2;
for (int i = 0; i < hn; ++i) {
pIm[i] *= -nrm;
}
}
void RDFT_inv(float* pDst, const float* pSrc, int ndst) {
int n = (ndst & 1) ? ndst + 1 : ndst;
int hn = n / 2;
const float* pRe = pSrc;
const float* pIm = pSrc + hn;
float s = atanf(1.0f) * 8.0f / (float)n;
for (int i = 0; i < ndst; ++i) {
pDst[i] = 0.0f;
}
for (int i = 0; i < ndst; ++i) {
float t = s * (float)i;
for (int j = 0; j < hn; ++j) {
float re = pRe[j];
float im = pIm[j];
float r = t * (float)j;
pDst[i] += re*cosf(r) + im*sinf(r);
}
}
}
class cMotion {
public:
struct ROT_CHANNEL {
float* pData;
int stride;
int nodeId;
int chId;
float* pCoefs;
int cut;
};
protected:
MOT_CLIP* mpClip;
int mPosVecsNum;
MOT_VEC* mpPosVecs;
int mRotChansNum;
ROT_CHANNEL* mpRotChans;
int mCoefsNum;
float mParamFactor;
int mCut;
float* mpEvalCoefsCPU;
float* mpEvalCoefsDev;
float* mpEvalResCPU;
float* mpEvalResDev;
cudaEvent_t mEvt;
void eval_pos_vecs(float frame);
public:
cMotion()
:
mpClip(nullptr),
mPosVecsNum(0), mpPosVecs(nullptr),
mRotChansNum(0), mpRotChans(nullptr),
mCoefsNum(0), mParamFactor(0.0f), mCut(0),
mpEvalCoefsCPU(nullptr), mpEvalCoefsDev(nullptr),
mpEvalResCPU(nullptr), mpEvalResDev(nullptr)
{}
~cMotion() {
unload();
}
void load(const char* pPath);
void unload();
float frame_to_param(float frame) {
if (!mpClip) return 0.0f;
float n = (float)mpClip->nfrm;
float f = ::fmodf(::fabsf(frame), n);
return f * mParamFactor;
}
int get_nfrm() const { return mpClip ? mpClip->nfrm : 0; }
int get_nrot() const { return mRotChansNum; }
int get_npos() const { return mPosVecsNum; }
int get_ncut() const { return mCut; }
float* get_res_ptr() { return mpEvalResCPU; }
void clear_res() {
if (mpEvalResCPU) {
::memset(mpEvalResCPU, 0, mRotChansNum * sizeof(float));
}
}
void eval_cpu(float frame);
void eval_dev(float frame);
};
void cMotion::load(const char* pPath) {
mpClip = motClipLoad(pPath);
if (!mpClip) return;
int numNodes = mpClip->nnod;
mPosVecsNum = 0;
for (int i = 0; i < numNodes; ++i) {
int srcMask = mpClip->nodes[i].trk[POS].dataMask;
int dataMask = mpClip->nodes[i].trk[POS].dataMask;
if (srcMask || dataMask) {
++mPosVecsNum;
}
}
if (mPosVecsNum > 0) {
mpPosVecs = (MOT_VEC*)::malloc(mPosVecsNum * sizeof(MOT_VEC));
}
mRotChansNum = 0;
for (int i = 0; i < numNodes; ++i) {
int dataMask = mpClip->nodes[i].trk[ROT].dataMask;
for (int j = 0; j < 3; ++j) {
if (dataMask & (1 << j)) {
++mRotChansNum;
}
}
}
if (mRotChansNum > 0) {
mpRotChans = (ROT_CHANNEL*)::malloc(mRotChansNum * sizeof(ROT_CHANNEL));
}
if (!mpRotChans) return;
int chIdx = 0;
for (int i = 0; i < numNodes; ++i) {
int dataMask = mpClip->nodes[i].trk[ROT].dataMask;
for (int j = 0; j < 3; ++j) {
if (dataMask & (1 << j)) {
ROT_CHANNEL* pCh = &mpRotChans[chIdx++];
pCh->nodeId = i;
pCh->chId = j;
motGetChanData(mpClip, i, ROT, j, &pCh->pData, &pCh->stride);
}
}
}
int nfrm = mpClip->nfrm;
float* pTmp = 0 ? (float*)::malloc(nfrm * sizeof(float)) : nullptr;
int ncoef = nfrm;
if (ncoef & 1) ++ncoef;
mCoefsNum = ncoef;
mParamFactor = ::atanf(1.0f) * 8.0f / (float)ncoef;
for (int i = 0; i < mRotChansNum; ++i) {
ROT_CHANNEL* pCh = &mpRotChans[i];
pCh->pCoefs = (float*)::malloc(ncoef * sizeof(float));
if (pCh->pCoefs) {
RDFT_fwd(pCh->pCoefs, pCh->pData, nfrm, pCh->stride);
if (pTmp) {
char* pNodeName = mpClip->nodes[pCh->nodeId].name;
RDFT_inv(pTmp, pCh->pCoefs, nfrm);
::printf("-- [%d] %s:%c\n", i, pNodeName, "xyz"[pCh->chId]);
for (int k = 0; k < nfrm; ++k) {
float ref = pCh->pData[k*pCh->stride];
float val = pTmp[k];
::printf("[%d]: %.4f - %.4f = %f\n", k, ref, val, ref - val);
}
}
}
}
if (pTmp) {
::free(pTmp);
pTmp = nullptr;
}
int minCut = nfrm + 1;
int maxCut = 0;
for (int i = 0; i < mRotChansNum; ++i) {
ROT_CHANNEL* pCh = &mpRotChans[i];
pCh->cut = (nfrm & (~1)) / 2;
float* pRe = pCh->pCoefs;
if (pRe) {
const float qs = 0.0005f;
for (int j = 0; j < ncoef / 2; ++j) {
float x = pRe[j];
float qx = ::floorf(::fabsf(x) / qs) * qs * (x < 0.0f ? -1.0f : 1.0f);
if (qx == 0) {
pCh->cut = j;
break;
}
}
}
if (pCh->cut < minCut) {
minCut = pCh->cut;
}
if (pCh->cut > maxCut) {
maxCut = pCh->cut;
}
}
mCut = (int)((float)maxCut * 0.75f);
::printf("coefs cut: %d .. %d -> %d\n", minCut, maxCut, mCut);
size_t evalCoefsSize = mRotChansNum * (mCut + mCut - 1) * sizeof(float);
cudaMallocHost(&mpEvalCoefsCPU, evalCoefsSize);
if (!mpEvalCoefsCPU) return;
float* pCoefs = mpEvalCoefsCPU;
for (int i = 0; i < mRotChansNum; ++i) {
ROT_CHANNEL* pCh = &mpRotChans[i];
if (pCh->pCoefs) {
*pCoefs++ = pCh->pCoefs[0];
for (int j = 1; j < mCut; ++j) {
*pCoefs++ = pCh->pCoefs[j];
}
for (int j = 1; j < mCut; ++j) {
*pCoefs++ = pCh->pCoefs[j + (mCoefsNum / 2)];
}
}
}
cudaMalloc(&mpEvalCoefsDev, evalCoefsSize);
if (mpEvalCoefsDev) {
cudaMemcpy(mpEvalCoefsDev, mpEvalCoefsCPU, evalCoefsSize, cudaMemcpyHostToDevice);
}
size_t evalResSize = mRotChansNum * sizeof(float);
cudaMallocHost(&mpEvalResCPU, evalResSize);
clear_res();
cudaMalloc(&mpEvalResDev, evalResSize);
cudaEventCreateWithFlags(&mEvt, cudaEventDisableTiming);
}
void cMotion::unload() {
cudaDeviceSynchronize();
cudaEventDestroy(mEvt);
if (mpEvalResDev) {
cudaFree(mpEvalResDev);
mpEvalResDev = nullptr;
}
if (mpEvalResCPU) {
cudaFreeHost(mpEvalResCPU);
mpEvalResCPU = nullptr;
}
if (mpEvalCoefsDev) {
cudaFree(mpEvalCoefsDev);
mpEvalCoefsDev = nullptr;
}
if (mpEvalCoefsCPU) {
cudaFreeHost(mpEvalCoefsCPU);
mpEvalCoefsCPU = nullptr;
}
if (mpRotChans) {
for (int i = 0; i < mRotChansNum; ++i) {
ROT_CHANNEL* pCh = &mpRotChans[i];
if (pCh->pCoefs) {
::free(pCh->pCoefs);
pCh->pCoefs = nullptr;
}
}
::free(mpRotChans);
mpRotChans = nullptr;
mRotChansNum = 0;
}
if (mpPosVecs) {
::free(mpPosVecs);
mpPosVecs = nullptr;
mPosVecsNum = 0;
}
if (mpClip) {
motClipUnload(mpClip);
mpClip = nullptr;
}
mCut = 0;
mCoefsNum = 0;
mParamFactor = 0.0f;
}
void cMotion::eval_pos_vecs(float frame) {
if (!mpClip) return;
if (!mpPosVecs) return;
int nfrm = mpClip->nfrm;
int numNodes = mpClip->nnod;
int idx = 0;
for (int i = 0; i < numNodes; ++i) {
int srcMask = mpClip->nodes[i].trk[POS].dataMask;
int dataMask = mpClip->nodes[i].trk[POS].dataMask;
if (srcMask || dataMask) {
int fno = (int)frame;
MOT_VEC v = motGetVec(mpClip, i, fno, POS);
if (fno < nfrm - 1) {
float t = frame - (float)fno;
MOT_VEC v1 = motGetVec(mpClip, i, fno + 1, POS);
for (int j = 0; j < 3; ++j) {
v.v[j] += (v1.v[j] - v.v[j]) * t;
}
}
mpPosVecs[idx] = v;
++idx;
}
}
}
__host__ __device__
#if 0
void eval_sub(float* pRes, const float* pCoefs, float t, int n, int tid) {
const float* pRe = &pCoefs[tid * (n + n - 1)];
const float* pIm = &pRe[n];
float res = pRe[0];
for (int i = 1; i < n; ++i) {
float r = t * (float)i;
float re = pRe[i];
float im = pIm[i - 1];
res += re*cosf(r) + im*sinf(r);
}
pRes[tid] = res;
}
#else
// NR ed3: (5.4.6)
void eval_sub(float* pRes, const float* pCoefs, float t, int n, int tid) {
const float* pRe = &pCoefs[tid * (n + n - 1)];
const float* pIm = &pRe[n];
float res = pRe[0];
float r = t;
float c = cosf(r);
float s = sinf(r);
float a = sinf(r*0.5f);
a = 2.0f * a*a;
float b = s;
float re = pRe[1];
float im = pIm[0];
res += re*c + im*s;
for (int i = 2; i < n; ++i) {
float ci = c - (a*c + b*s);
float si = s - (a*s - b*c);
re = pRe[i];
im = pIm[i - 1];
res += re*ci + im*si;
c = ci;
s = si;
}
pRes[tid] = res;
}
#endif
void cMotion::eval_cpu(float frame) {
float* pCoefs = mpEvalCoefsCPU;
float* pRes = mpEvalResCPU;
if (!pCoefs || !pRes) return;
float t = frame_to_param(frame);
int n = mRotChansNum;
//#pragma omp parallel for
for (int i = 0; i < n; ++i) {
eval_sub(pRes, pCoefs, t, mCut, i);
}
eval_pos_vecs(frame);
}
__global__ void eval_kernel(float* pRes, const float* pCoefs, float t, int n, int nres) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid >= nres) return;
eval_sub(pRes, pCoefs, t, n, tid);
}
static int s_blkMin = 64;
static int s_blkMax = 128;
static int calc_thr_num(int nwk) {
int n = (int)::log2(nwk) - 1;
if (n < 0) n = 0;
n = 1 << n;
if (n < s_blkMin) n = s_blkMin;
if (n > s_blkMax) n = s_blkMax;
return n;
}
void cMotion::eval_dev(float frame) {
float* pCoefs = mpEvalCoefsDev;
float* pRes = mpEvalResDev;
if (!pCoefs || !pRes) return;
float t = frame_to_param(frame);
int nch = mRotChansNum;
int nthr = calc_thr_num(nch);
int nblk = (nch + nthr - 1) / nthr;
eval_kernel<<<nblk, nthr, 0, 0>>>(pRes, pCoefs, t, mCut, nch);
cudaMemcpyAsync(mpEvalResCPU, pRes, mRotChansNum * sizeof(float), cudaMemcpyDeviceToHost, 0);
cudaEventRecord(mEvt, 0);
eval_pos_vecs(frame);
while (cudaEventQuery(mEvt) == cudaErrorNotReady) {}
}
static cMotion s_mot;
void init(const char* pClipPath) {
cudaDeviceProp devProps;
cudaGetDeviceProperties(&devProps, 0);
s_blkMax = devProps.maxThreadsPerBlock / 8;
::printf("device: %s, compute %d.%d\n", devProps.name, devProps.major, devProps.minor);
::printf("SM count = %d\n", devProps.multiProcessorCount);
::printf("max thr/SM = %d\n", devProps.maxThreadsPerMultiProcessor);
::printf("max thr/blk = %d\n", devProps.maxThreadsPerBlock);
::printf("concurrent exec = %s\n", devProps.concurrentKernels ? "yes" : "no");
::printf("\n");
s_mot.load(pClipPath);
::printf("#rot chans = %d\n", s_mot.get_nrot());
::printf("#pos vecs = %d\n", s_mot.get_npos());
}
double res_l2() {
double res = 0;
int n = s_mot.get_nrot();
float* p = s_mot.get_res_ptr();
if (p) {
for (int i = 0; i < n; ++i) {
res += p[i] * p[i];
}
res = sqrt(res);
}
return res;
}
int main(int argc, char* argv[]) {
if (argc < 2) {
return -1;
}
init(argv[1]);
const int N = 1000;
double cpuT = 0.0f;
double devT = 0.0f;
double devRes = 0.0;
double cpuRes = 0.0;
::printf("-----\n");
for (int i = 0; i < N; ++i) {
float frm = s_mot.get_nfrm() * float(i) / float(N);
int64_t devT0 = timestamp();
s_mot.eval_dev(frm);
int64_t devT1 = timestamp();
double devDT = (double)(devT1 - devT0);
devT += devDT;
devRes += res_l2();
}
devT /= N;
::printf("dev res = %.1f\n", devRes);
::printf("dev t = %.1f\n", devT);
s_mot.clear_res();
for (int i = 0; i < N; ++i) {
float frm = s_mot.get_nfrm() * float(i) / float(N);
int64_t cpuT0 = timestamp();
s_mot.eval_cpu(frm);
int64_t cpuT1 = timestamp();
double cpuDT = (double)(cpuT1 - cpuT0);
cpuT += cpuDT;
cpuRes += res_l2();
}
cpuT /= N;
::printf("cpu res = %.1f\n", cpuRes);
::printf("cpu t = %.1f\n", cpuT);
::printf("%f\n", cpuT / devT);
return 0;
}
|
f921e04c9c55e74013afad8cf23e251d0b2b6502.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <mpi.h>
#ifdef _OPENMP
#include <omp.h>
#endif//_OPENMP
#include "algorithm.h"
#include "../geometries/geometries.h"
const double lx = 2*M_PI;
const double ly = 2*M_PI;
const double lz = 1.;
dg::bc bcx = dg::PER;
dg::bc bcy = dg::PER;
dg::bc bcz = dg::PER;
double left( double x, double y, double z) {return sin(x)*cos(y)*z;}
double right( double x, double y, double z) {return cos(x)*sin(y)*z;}
//double right2( double x, double y) {return sin(y);}
double jacobian( double x, double y, double z)
{
return z*z*cos(x)*sin(y)*2*sin(2*x)*cos(2*y)-sin(x)*cos(y)*2*cos(2*x)*sin(2*y);
}
const double R_0 = 1000;
double fct(double x, double y, double z){ return sin(x-R_0)*sin(y);}
double derivative( double x, double y, double z){return cos(x-R_0)*sin(y);}
double laplace_fct( double x, double y, double z) { return -1./x*cos(x-R_0)*sin(y) + 2.*sin(y)*sin(x-R_0);}
double initial( double x, double y, double z) {return sin(0);}
typedef dg::MDMatrix Matrix;
typedef dg::MIDMatrix IMatrix;
typedef dg::MDVec Vector;
/*******************************************************************************
program expects npx, npy, npz, n, Nx, Ny, Nz from std::cin
outputs one line to std::cout
# npx npy npz #procs #threads n Nx Ny Nz t_AXPBY t_DOT t_DX t_DY t_DZ t_ARAKAWA #iterations t_1xELLIPTIC_CG t_DS
if Nz == 1, ds is not executed
Run with:
>$ echo npx npy npz n Nx Ny Nz | mpirun -#procs ./cluster_mpib
*******************************************************************************/
int main(int argc, char* argv[])
{
MPI_Init( &argc, &argv);
unsigned n, Nx, Ny, Nz;
int periods[3] = {false,false, false};
if( bcx == dg::PER) periods[0] = true;
if( bcy == dg::PER) periods[1] = true;
if( bcz == dg::PER) periods[2] = true;
int rank, size;
MPI_Comm_rank( MPI_COMM_WORLD, &rank);
MPI_Comm_size( MPI_COMM_WORLD, &size);
#if THRUST_DEVICE_SYSTEM==THRUST_DEVICE_SYSTEM_CUDA
int num_devices=0;
hipGetDeviceCount(&num_devices);
if(num_devices == 0)
{
std::cerr << "No CUDA capable devices found"<<std::endl;
return -1;
}
int device = rank % num_devices; //assume # of gpus/node is fixed
hipSetDevice( device);
#endif//cuda
int np[3];
if( rank == 0)
{
std::cin >> np[0] >> np[1]>>np[2];
std::cout<< "xxx "<<np[0] <<" "<<np[1]<<" "<<np[2]<<" "<<size<<" ";
assert( size == np[0]*np[1]*np[2]);
}
MPI_Bcast( np, 3, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Comm comm;
MPI_Cart_create( MPI_COMM_WORLD, 3, np, periods, true, &comm);
int num_threads = 1;
#ifdef _OPENMP
num_threads = omp_get_max_threads( );
#endif //omp
if(rank==0)std::cout << num_threads<<" ";
if( rank == 0)
{
std::cin >> n >> Nx >> Ny >> Nz;
std::cout<< n <<" "<<Nx<<" "<<Ny<<" "<<Nz<<" ";
}
MPI_Bcast( &n,1 , MPI_UNSIGNED, 0, MPI_COMM_WORLD);
MPI_Bcast( &Nx,1 , MPI_UNSIGNED, 0, MPI_COMM_WORLD);
MPI_Bcast( &Ny,1 , MPI_UNSIGNED, 0, MPI_COMM_WORLD);
MPI_Bcast( &Nz,1 , MPI_UNSIGNED, 0, MPI_COMM_WORLD);
////////////////////////////////////////////////////////////////
dg::CartesianMPIGrid3d grid( 0, lx, 0, ly, 0,lz, n, Nx, Ny, Nz, bcx, bcy, dg::PER, comm);
dg::Timer t;
Vector w3d = dg::create::weights( grid);
Vector lhs = dg::evaluate ( left, grid), jac(lhs);
Vector rhs = dg::evaluate ( right,grid);
const Vector sol = dg::evaluate( jacobian, grid );
Vector eins = dg::evaluate( dg::one, grid );
std::cout<< std::setprecision(6);
unsigned multi=20;
//AXPBY
t.tic();
for( unsigned i=0; i<multi; i++)
dg::blas1::axpby( 1., lhs, -1., jac);
t.toc();
if(rank==0)std::cout<<t.diff()/(double)multi<<" ";
//DOT
t.tic();
double norm;
for( unsigned i=0; i<multi; i++)
norm = dg::blas2::dot( w3d, jac);
t.toc();
if(rank==0)std::cout<<t.diff()/(double)multi<<" ";
norm++;//avoid compiler warning
//Matrix-Vector product
Matrix dx = dg::create::dx( grid, dg::centered);
t.tic();
for( unsigned i=0; i<multi; i++)
dg::blas2::symv( dx, rhs, jac);
t.toc();
if(rank==0)std::cout<<t.diff()/(double)multi<<" ";
//Matrix-Vector product
Matrix dy = dg::create::dy( grid, dg::centered);
t.tic();
for( unsigned i=0; i<multi; i++)
dg::blas2::symv( dy, rhs, jac);
t.toc();
if(rank==0)std::cout<<t.diff()/(double)multi<<" ";
if( Nz > 2)
{
//Matrix-Vector product
Matrix dz = dg::create::dz( grid, dg::centered);
t.tic();
for( unsigned i=0; i<multi; i++)
dg::blas2::symv( dz, rhs, jac);
t.toc();
if(rank==0)std::cout<<t.diff()/(double)multi<<" ";
}
//The Arakawa scheme
dg::ArakawaX<dg::CartesianMPIGrid3d, Matrix, Vector> arakawa( grid);
t.tic();
for( unsigned i=0; i<multi; i++)
arakawa( lhs, rhs, jac);
t.toc();
if(rank==0)std::cout<<t.diff()/(double)multi<<" ";
//The Elliptic scheme
periods[0] = false, periods[1] = false;
MPI_Comm commEll;
MPI_Cart_create( MPI_COMM_WORLD, 3, np, periods, true, &commEll);
dg::CylindricalMPIGrid3d gridEll( R_0, R_0+lx, 0., ly, 0.,lz, n, Nx, Ny,Nz, dg::DIR, dg::DIR, dg::PER, commEll);
const Vector ellw3d = dg::create::volume(gridEll);
const Vector ellv3d = dg::create::inv_volume(gridEll);
dg::Elliptic<dg::CylindricalMPIGrid3d, Matrix, Vector> laplace(gridEll, dg::not_normed, dg::centered);
const Vector solution = dg::evaluate ( fct, gridEll);
const Vector deriv = dg::evaluate( derivative, gridEll);
Vector x = dg::evaluate( initial, gridEll);
Vector b = dg::evaluate ( laplace_fct, gridEll);
dg::blas2::symv( ellw3d, b, b);
dg::CG< Vector > pcg( x, n*n*Nx*Ny);
t.tic();
unsigned number = pcg(laplace, x, b, ellv3d, 1e-6);
t.toc();
if(rank==0)std::cout << number << " "<<t.diff()/(double)number<<" "<<std::flush;
if( Nz > 1)
{
//Application of ds
double gpR0 = 10, gpI0=20;
double inv_aspect_ratio = 0.1;
double gpa = gpR0*inv_aspect_ratio;
double Rmin=gpR0-1.0*gpa;
double Zmin=-1.0*gpa*1.00;
double Rmax=gpR0+1.0*gpa;
double Zmax=1.0*gpa*1.00;
dg::CylindricalMPIGrid3d g3d( Rmin,Rmax, Zmin,Zmax, 0, 2.*M_PI, n, Nx ,Ny, Nz,dg::DIR, dg::DIR, dg::PER,commEll);
dg::geo::TokamakMagneticField magfield = dg::geo::createGuentherField(gpR0, gpI0);
dg::geo::Fieldaligned<dg::aProductMPIGeometry3d, IMatrix, Vector> dsFA( magfield, g3d, dg::NEU, dg::NEU, dg::geo::FullLimiter());
dg::geo::DS<dg::aProductMPIGeometry3d, IMatrix, Matrix, Vector> ds ( dsFA, dg::not_normed, dg::centered);
dg::geo::guenther::FuncNeu funcNEU(gpR0,gpI0);
Vector function = dg::evaluate( funcNEU, g3d) , dsTdsfb(function);
t.tic();
for( unsigned i=0; i<multi; i++)
ds.symv(function,dsTdsfb);
t.toc();
if(rank==0)std::cout<<t.diff()/(double)multi<<" ";
}
if(rank==0)std::cout <<" XXX"<<std::endl;
MPI_Finalize();
return 0;
}
| f921e04c9c55e74013afad8cf23e251d0b2b6502.cu | #include <iostream>
#include <iomanip>
#include <mpi.h>
#ifdef _OPENMP
#include <omp.h>
#endif//_OPENMP
#include "algorithm.h"
#include "../geometries/geometries.h"
const double lx = 2*M_PI;
const double ly = 2*M_PI;
const double lz = 1.;
dg::bc bcx = dg::PER;
dg::bc bcy = dg::PER;
dg::bc bcz = dg::PER;
double left( double x, double y, double z) {return sin(x)*cos(y)*z;}
double right( double x, double y, double z) {return cos(x)*sin(y)*z;}
//double right2( double x, double y) {return sin(y);}
double jacobian( double x, double y, double z)
{
return z*z*cos(x)*sin(y)*2*sin(2*x)*cos(2*y)-sin(x)*cos(y)*2*cos(2*x)*sin(2*y);
}
const double R_0 = 1000;
double fct(double x, double y, double z){ return sin(x-R_0)*sin(y);}
double derivative( double x, double y, double z){return cos(x-R_0)*sin(y);}
double laplace_fct( double x, double y, double z) { return -1./x*cos(x-R_0)*sin(y) + 2.*sin(y)*sin(x-R_0);}
double initial( double x, double y, double z) {return sin(0);}
typedef dg::MDMatrix Matrix;
typedef dg::MIDMatrix IMatrix;
typedef dg::MDVec Vector;
/*******************************************************************************
program expects npx, npy, npz, n, Nx, Ny, Nz from std::cin
outputs one line to std::cout
# npx npy npz #procs #threads n Nx Ny Nz t_AXPBY t_DOT t_DX t_DY t_DZ t_ARAKAWA #iterations t_1xELLIPTIC_CG t_DS
if Nz == 1, ds is not executed
Run with:
>$ echo npx npy npz n Nx Ny Nz | mpirun -#procs ./cluster_mpib
*******************************************************************************/
int main(int argc, char* argv[])
{
MPI_Init( &argc, &argv);
unsigned n, Nx, Ny, Nz;
int periods[3] = {false,false, false};
if( bcx == dg::PER) periods[0] = true;
if( bcy == dg::PER) periods[1] = true;
if( bcz == dg::PER) periods[2] = true;
int rank, size;
MPI_Comm_rank( MPI_COMM_WORLD, &rank);
MPI_Comm_size( MPI_COMM_WORLD, &size);
#if THRUST_DEVICE_SYSTEM==THRUST_DEVICE_SYSTEM_CUDA
int num_devices=0;
cudaGetDeviceCount(&num_devices);
if(num_devices == 0)
{
std::cerr << "No CUDA capable devices found"<<std::endl;
return -1;
}
int device = rank % num_devices; //assume # of gpus/node is fixed
cudaSetDevice( device);
#endif//cuda
int np[3];
if( rank == 0)
{
std::cin >> np[0] >> np[1]>>np[2];
std::cout<< "xxx "<<np[0] <<" "<<np[1]<<" "<<np[2]<<" "<<size<<" ";
assert( size == np[0]*np[1]*np[2]);
}
MPI_Bcast( np, 3, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Comm comm;
MPI_Cart_create( MPI_COMM_WORLD, 3, np, periods, true, &comm);
int num_threads = 1;
#ifdef _OPENMP
num_threads = omp_get_max_threads( );
#endif //omp
if(rank==0)std::cout << num_threads<<" ";
if( rank == 0)
{
std::cin >> n >> Nx >> Ny >> Nz;
std::cout<< n <<" "<<Nx<<" "<<Ny<<" "<<Nz<<" ";
}
MPI_Bcast( &n,1 , MPI_UNSIGNED, 0, MPI_COMM_WORLD);
MPI_Bcast( &Nx,1 , MPI_UNSIGNED, 0, MPI_COMM_WORLD);
MPI_Bcast( &Ny,1 , MPI_UNSIGNED, 0, MPI_COMM_WORLD);
MPI_Bcast( &Nz,1 , MPI_UNSIGNED, 0, MPI_COMM_WORLD);
////////////////////////////////////////////////////////////////
dg::CartesianMPIGrid3d grid( 0, lx, 0, ly, 0,lz, n, Nx, Ny, Nz, bcx, bcy, dg::PER, comm);
dg::Timer t;
Vector w3d = dg::create::weights( grid);
Vector lhs = dg::evaluate ( left, grid), jac(lhs);
Vector rhs = dg::evaluate ( right,grid);
const Vector sol = dg::evaluate( jacobian, grid );
Vector eins = dg::evaluate( dg::one, grid );
std::cout<< std::setprecision(6);
unsigned multi=20;
//AXPBY
t.tic();
for( unsigned i=0; i<multi; i++)
dg::blas1::axpby( 1., lhs, -1., jac);
t.toc();
if(rank==0)std::cout<<t.diff()/(double)multi<<" ";
//DOT
t.tic();
double norm;
for( unsigned i=0; i<multi; i++)
norm = dg::blas2::dot( w3d, jac);
t.toc();
if(rank==0)std::cout<<t.diff()/(double)multi<<" ";
norm++;//avoid compiler warning
//Matrix-Vector product
Matrix dx = dg::create::dx( grid, dg::centered);
t.tic();
for( unsigned i=0; i<multi; i++)
dg::blas2::symv( dx, rhs, jac);
t.toc();
if(rank==0)std::cout<<t.diff()/(double)multi<<" ";
//Matrix-Vector product
Matrix dy = dg::create::dy( grid, dg::centered);
t.tic();
for( unsigned i=0; i<multi; i++)
dg::blas2::symv( dy, rhs, jac);
t.toc();
if(rank==0)std::cout<<t.diff()/(double)multi<<" ";
if( Nz > 2)
{
//Matrix-Vector product
Matrix dz = dg::create::dz( grid, dg::centered);
t.tic();
for( unsigned i=0; i<multi; i++)
dg::blas2::symv( dz, rhs, jac);
t.toc();
if(rank==0)std::cout<<t.diff()/(double)multi<<" ";
}
//The Arakawa scheme
dg::ArakawaX<dg::CartesianMPIGrid3d, Matrix, Vector> arakawa( grid);
t.tic();
for( unsigned i=0; i<multi; i++)
arakawa( lhs, rhs, jac);
t.toc();
if(rank==0)std::cout<<t.diff()/(double)multi<<" ";
//The Elliptic scheme
periods[0] = false, periods[1] = false;
MPI_Comm commEll;
MPI_Cart_create( MPI_COMM_WORLD, 3, np, periods, true, &commEll);
dg::CylindricalMPIGrid3d gridEll( R_0, R_0+lx, 0., ly, 0.,lz, n, Nx, Ny,Nz, dg::DIR, dg::DIR, dg::PER, commEll);
const Vector ellw3d = dg::create::volume(gridEll);
const Vector ellv3d = dg::create::inv_volume(gridEll);
dg::Elliptic<dg::CylindricalMPIGrid3d, Matrix, Vector> laplace(gridEll, dg::not_normed, dg::centered);
const Vector solution = dg::evaluate ( fct, gridEll);
const Vector deriv = dg::evaluate( derivative, gridEll);
Vector x = dg::evaluate( initial, gridEll);
Vector b = dg::evaluate ( laplace_fct, gridEll);
dg::blas2::symv( ellw3d, b, b);
dg::CG< Vector > pcg( x, n*n*Nx*Ny);
t.tic();
unsigned number = pcg(laplace, x, b, ellv3d, 1e-6);
t.toc();
if(rank==0)std::cout << number << " "<<t.diff()/(double)number<<" "<<std::flush;
if( Nz > 1)
{
//Application of ds
double gpR0 = 10, gpI0=20;
double inv_aspect_ratio = 0.1;
double gpa = gpR0*inv_aspect_ratio;
double Rmin=gpR0-1.0*gpa;
double Zmin=-1.0*gpa*1.00;
double Rmax=gpR0+1.0*gpa;
double Zmax=1.0*gpa*1.00;
dg::CylindricalMPIGrid3d g3d( Rmin,Rmax, Zmin,Zmax, 0, 2.*M_PI, n, Nx ,Ny, Nz,dg::DIR, dg::DIR, dg::PER,commEll);
dg::geo::TokamakMagneticField magfield = dg::geo::createGuentherField(gpR0, gpI0);
dg::geo::Fieldaligned<dg::aProductMPIGeometry3d, IMatrix, Vector> dsFA( magfield, g3d, dg::NEU, dg::NEU, dg::geo::FullLimiter());
dg::geo::DS<dg::aProductMPIGeometry3d, IMatrix, Matrix, Vector> ds ( dsFA, dg::not_normed, dg::centered);
dg::geo::guenther::FuncNeu funcNEU(gpR0,gpI0);
Vector function = dg::evaluate( funcNEU, g3d) , dsTdsfb(function);
t.tic();
for( unsigned i=0; i<multi; i++)
ds.symv(function,dsTdsfb);
t.toc();
if(rank==0)std::cout<<t.diff()/(double)multi<<" ";
}
if(rank==0)std::cout <<" XXX"<<std::endl;
MPI_Finalize();
return 0;
}
|
01f524f5780770588870a792d408616807cf53f1.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "NvtxNaming.h"
#include <hip/hip_runtime_api.h>
#include <cassert>
#include <cstddef>
#include <cstdint>
#define checkCudaErrors(Code) assert((Code) == hipSuccess)
int main(void)
{
auto nvtxDomain = nvtxDomainCreateA("my-domain");
// Create allocation and label it "My allocation".
void* dptr;
checkCudaErrors(hipMalloc(&dptr, 1));
NV::LabelMemory(nvtxDomain, dptr, "My allocation");
// Leak 1 unitialized byte
checkCudaErrors(hipDeviceReset());
}
| 01f524f5780770588870a792d408616807cf53f1.cu | /* Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "NvtxNaming.h"
#include <cuda_runtime_api.h>
#include <cassert>
#include <cstddef>
#include <cstdint>
#define checkCudaErrors(Code) assert((Code) == cudaSuccess)
int main(void)
{
auto nvtxDomain = nvtxDomainCreateA("my-domain");
// Create allocation and label it "My allocation".
void* dptr;
checkCudaErrors(cudaMalloc(&dptr, 1));
NV::LabelMemory(nvtxDomain, dptr, "My allocation");
// Leak 1 unitialized byte
checkCudaErrors(cudaDeviceReset());
}
|
96de3872d1a9e8a0ca8428ae1c9f3149a7ba432e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T bilinear_interpolate(const T* bottom_data,
const int height, const int width,
T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int) y;
int x_low = (int) x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignRotatedForward(const int nthreads, const T* bottom_data,
const T spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
const T* bottom_rois, T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_center_w = offset_bottom_rois[1] * spatial_scale;
T roi_center_h = offset_bottom_rois[2] * spatial_scale;
T roi_width = offset_bottom_rois[3] * spatial_scale;
T roi_height = offset_bottom_rois[4] * spatial_scale;
T theta = offset_bottom_rois[5] * M_PI / 180.0;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T xx = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height, const int width,
T y, T x,
T & w1, T & w2, T & w3, T & w4,
int & x_low, int & x_high, int & y_low, int & y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int) y;
x_low = (int) x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignRotatedBackwardFeature(const int nthreads, const T* top_diff,
const int num_rois, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_center_w = offset_bottom_rois[1] * spatial_scale;
T roi_center_h = offset_bottom_rois[2] * spatial_scale;
T roi_width = offset_bottom_rois[3] * spatial_scale;
T roi_height = offset_bottom_rois[4] * spatial_scale;
T theta = offset_bottom_rois[5] * M_PI / 180.0;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T xx = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(height, width, y, x,
w1, w2, w3, w4,
x_low, x_high, y_low, y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0)
{
atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignRotatedBackward
at::Tensor ROIAlignRotated_forward_cuda(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(THCCeilDiv((long)output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
THCudaCheck(hipGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlignRotated_forward", [&] {
hipLaunchKernelGGL(( RoIAlignRotatedForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>());
});
THCudaCheck(hipGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIAlignRotated_backward_cuda(const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(THCCeilDiv((long)grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(hipGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlignRotated_backward", [&] {
hipLaunchKernelGGL(( RoIAlignRotatedBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
THCudaCheck(hipGetLastError());
return grad_input;
}
| 96de3872d1a9e8a0ca8428ae1c9f3149a7ba432e.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T bilinear_interpolate(const T* bottom_data,
const int height, const int width,
T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int) y;
int x_low = (int) x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignRotatedForward(const int nthreads, const T* bottom_data,
const T spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
const T* bottom_rois, T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_center_w = offset_bottom_rois[1] * spatial_scale;
T roi_center_h = offset_bottom_rois[2] * spatial_scale;
T roi_width = offset_bottom_rois[3] * spatial_scale;
T roi_height = offset_bottom_rois[4] * spatial_scale;
T theta = offset_bottom_rois[5] * M_PI / 180.0;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T xx = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height, const int width,
T y, T x,
T & w1, T & w2, T & w3, T & w4,
int & x_low, int & x_high, int & y_low, int & y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int) y;
x_low = (int) x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignRotatedBackwardFeature(const int nthreads, const T* top_diff,
const int num_rois, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_center_w = offset_bottom_rois[1] * spatial_scale;
T roi_center_h = offset_bottom_rois[2] * spatial_scale;
T roi_width = offset_bottom_rois[3] * spatial_scale;
T roi_height = offset_bottom_rois[4] * spatial_scale;
T theta = offset_bottom_rois[5] * M_PI / 180.0;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T xx = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(height, width, y, x,
w1, w2, w3, w4,
x_low, x_high, y_low, y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0)
{
atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignRotatedBackward
at::Tensor ROIAlignRotated_forward_cuda(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(THCCeilDiv((long)output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
THCudaCheck(cudaGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlignRotated_forward", [&] {
RoIAlignRotatedForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIAlignRotated_backward_cuda(const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(THCCeilDiv((long)grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlignRotated_backward", [&] {
RoIAlignRotatedBackwardFeature<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return grad_input;
}
|
eb7e11beb3ed98b9aad467973e9112e19cf2504b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
const size_t DSIZE = 16384; // matrix side dimension
const int block_size = 256; // CUDA maximum is 1024
// matrix row-sum kernel
// we will assign one block per row
__global__ void row_sums(const float *A, float *sums, size_t ds){
int idx = blockIdx.x; // our block index becomes our row indicator
if (idx < ds){
__shared__ float sdata[block_size];
int tid = threadIdx.x;
sdata[tid] = 0.0f;
size_t tidx = tid;
while (tidx < ds) { // block stride loop to load data
sdata[tid] += A[idx*ds+tidx];
tidx += blockDim.x;
}
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
__syncthreads();
if (tid < s) // parallel sweep reduction
sdata[tid] += sdata[tid + s];
}
if (tid == 0) sums[idx] = sdata[0];
}
}
// matrix column-sum kernel
__global__ void column_sums(const float *A, float *sums, size_t ds){
int idx = threadIdx.x+blockDim.x*blockIdx.x; // create typical 1D thread index from built-in variables
if (idx < ds){
float sum = 0.0f;
for (size_t i = 0; i < ds; i++)
sum += A[idx+ds*i]; // write a for loop that will cause the thread to iterate down a column, keeeping a running sum, and write the result to sums
sums[idx] = sum;
}}
bool validate(float *data, size_t sz){
for (size_t i = 0; i < sz; i++)
if (data[i] != (float)sz) {printf("results mismatch at %lu, was: %f, should be: %f\n", i, data[i], (float)sz); return false;}
return true;
}
int main(){
float *h_A, *h_sums, *d_A, *d_sums;
h_A = new float[DSIZE*DSIZE]; // allocate space for data in host memory
h_sums = new float[DSIZE]();
for (int i = 0; i < DSIZE*DSIZE; i++) // initialize matrix in host memory
h_A[i] = 1.0f;
hipMalloc(&d_A, DSIZE*DSIZE*sizeof(float)); // allocate device space for A
hipMalloc(&d_sums, DSIZE*sizeof(float)); // allocate device space for vector d_sums
cudaCheckErrors("hipMalloc failure"); // error checking
// copy matrix A to device:
hipMemcpy(d_A, h_A, DSIZE*DSIZE*sizeof(float), hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy H2D failure");
//cuda processing sequence step 1 is complete
hipLaunchKernelGGL(( row_sums), dim3(DSIZE), dim3(block_size), 0, 0, d_A, d_sums, DSIZE);
cudaCheckErrors("kernel launch failure");
//cuda processing sequence step 2 is complete
// copy vector sums from device to host:
hipMemcpy(h_sums, d_sums, DSIZE*sizeof(float), hipMemcpyDeviceToHost);
//cuda processing sequence step 3 is complete
cudaCheckErrors("kernel execution failure or hipMemcpy H2D failure");
if (!validate(h_sums, DSIZE)) return -1;
printf("row sums correct!\n");
hipMemset(d_sums, 0, DSIZE*sizeof(float));
hipLaunchKernelGGL(( column_sums), dim3((DSIZE+block_size-1)/block_size), dim3(block_size), 0, 0, d_A, d_sums, DSIZE);
cudaCheckErrors("kernel launch failure");
//cuda processing sequence step 2 is complete
// copy vector sums from device to host:
hipMemcpy(h_sums, d_sums, DSIZE*sizeof(float), hipMemcpyDeviceToHost);
//cuda processing sequence step 3 is complete
cudaCheckErrors("kernel execution failure or hipMemcpy H2D failure");
if (!validate(h_sums, DSIZE)) return -1;
printf("column sums correct!\n");
return 0;
}
| eb7e11beb3ed98b9aad467973e9112e19cf2504b.cu | #include <stdio.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
const size_t DSIZE = 16384; // matrix side dimension
const int block_size = 256; // CUDA maximum is 1024
// matrix row-sum kernel
// we will assign one block per row
__global__ void row_sums(const float *A, float *sums, size_t ds){
int idx = blockIdx.x; // our block index becomes our row indicator
if (idx < ds){
__shared__ float sdata[block_size];
int tid = threadIdx.x;
sdata[tid] = 0.0f;
size_t tidx = tid;
while (tidx < ds) { // block stride loop to load data
sdata[tid] += A[idx*ds+tidx];
tidx += blockDim.x;
}
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
__syncthreads();
if (tid < s) // parallel sweep reduction
sdata[tid] += sdata[tid + s];
}
if (tid == 0) sums[idx] = sdata[0];
}
}
// matrix column-sum kernel
__global__ void column_sums(const float *A, float *sums, size_t ds){
int idx = threadIdx.x+blockDim.x*blockIdx.x; // create typical 1D thread index from built-in variables
if (idx < ds){
float sum = 0.0f;
for (size_t i = 0; i < ds; i++)
sum += A[idx+ds*i]; // write a for loop that will cause the thread to iterate down a column, keeeping a running sum, and write the result to sums
sums[idx] = sum;
}}
bool validate(float *data, size_t sz){
for (size_t i = 0; i < sz; i++)
if (data[i] != (float)sz) {printf("results mismatch at %lu, was: %f, should be: %f\n", i, data[i], (float)sz); return false;}
return true;
}
int main(){
float *h_A, *h_sums, *d_A, *d_sums;
h_A = new float[DSIZE*DSIZE]; // allocate space for data in host memory
h_sums = new float[DSIZE]();
for (int i = 0; i < DSIZE*DSIZE; i++) // initialize matrix in host memory
h_A[i] = 1.0f;
cudaMalloc(&d_A, DSIZE*DSIZE*sizeof(float)); // allocate device space for A
cudaMalloc(&d_sums, DSIZE*sizeof(float)); // allocate device space for vector d_sums
cudaCheckErrors("cudaMalloc failure"); // error checking
// copy matrix A to device:
cudaMemcpy(d_A, h_A, DSIZE*DSIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy H2D failure");
//cuda processing sequence step 1 is complete
row_sums<<<DSIZE, block_size>>>(d_A, d_sums, DSIZE);
cudaCheckErrors("kernel launch failure");
//cuda processing sequence step 2 is complete
// copy vector sums from device to host:
cudaMemcpy(h_sums, d_sums, DSIZE*sizeof(float), cudaMemcpyDeviceToHost);
//cuda processing sequence step 3 is complete
cudaCheckErrors("kernel execution failure or cudaMemcpy H2D failure");
if (!validate(h_sums, DSIZE)) return -1;
printf("row sums correct!\n");
cudaMemset(d_sums, 0, DSIZE*sizeof(float));
column_sums<<<(DSIZE+block_size-1)/block_size, block_size>>>(d_A, d_sums, DSIZE);
cudaCheckErrors("kernel launch failure");
//cuda processing sequence step 2 is complete
// copy vector sums from device to host:
cudaMemcpy(h_sums, d_sums, DSIZE*sizeof(float), cudaMemcpyDeviceToHost);
//cuda processing sequence step 3 is complete
cudaCheckErrors("kernel execution failure or cudaMemcpy H2D failure");
if (!validate(h_sums, DSIZE)) return -1;
printf("column sums correct!\n");
return 0;
}
|
2931f5fafab0b2d8c09aaa5a3709bf2cca84b117.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ void _sum_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sum_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sum_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sum_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sum_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float sum_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum
hipLaunchKernelGGL(( _sum_32_20_1), dim3(64),dim3(64), 0, 0, n,x,y);
hipLaunchKernelGGL(( _sum_32_20_2), dim3(1),dim3(64), 0, 0, y,z);
hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sum_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sum_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sum_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sum_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sum_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double sum_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum
hipLaunchKernelGGL(( _sum_64_20_1), dim3(64),dim3(64), 0, 0, n,x,y);
hipLaunchKernelGGL(( _sum_64_20_2), dim3(1),dim3(64), 0, 0, y,z);
hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _prod_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai*xi;
ai=x[i]; xi=x[i+16]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi;
}
__global__ void _prod_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 1;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai*xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_prod_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _prod_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads();
}
if(tid<32) {
_prod_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float prod_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum
hipLaunchKernelGGL(( _prod_32_20_1), dim3(64),dim3(64), 0, 0, n,x,y);
hipLaunchKernelGGL(( _prod_32_20_2), dim3(1),dim3(64), 0, 0, y,z);
hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _prod_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai*xi;
ai=x[i]; xi=x[i+16]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi;
}
__global__ void _prod_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 1;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai*xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_prod_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _prod_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads();
}
if(tid<32) {
_prod_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double prod_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum
hipLaunchKernelGGL(( _prod_64_20_1), dim3(64),dim3(64), 0, 0, n,x,y);
hipLaunchKernelGGL(( _prod_64_20_2), dim3(1),dim3(64), 0, 0, y,z);
hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _maximum_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi);
}
__global__ void _maximum_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = (-INFINITY);
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai>xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_maximum_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _maximum_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_maximum_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float maximum_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum
hipLaunchKernelGGL(( _maximum_32_20_1), dim3(64),dim3(64), 0, 0, n,x,y);
hipLaunchKernelGGL(( _maximum_32_20_2), dim3(1),dim3(64), 0, 0, y,z);
hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _maximum_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi);
}
__global__ void _maximum_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = (-INFINITY);
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai>xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_maximum_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _maximum_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_maximum_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double maximum_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum
hipLaunchKernelGGL(( _maximum_64_20_1), dim3(64),dim3(64), 0, 0, n,x,y);
hipLaunchKernelGGL(( _maximum_64_20_2), dim3(1),dim3(64), 0, 0, y,z);
hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _minimum_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi);
}
__global__ void _minimum_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = INFINITY;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai<xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_minimum_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _minimum_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_minimum_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float minimum_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum
hipLaunchKernelGGL(( _minimum_32_20_1), dim3(64),dim3(64), 0, 0, n,x,y);
hipLaunchKernelGGL(( _minimum_32_20_2), dim3(1),dim3(64), 0, 0, y,z);
hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _minimum_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi);
}
__global__ void _minimum_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = INFINITY;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai<xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_minimum_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _minimum_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_minimum_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double minimum_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum
hipLaunchKernelGGL(( _minimum_64_20_1), dim3(64),dim3(64), 0, 0, n,x,y);
hipLaunchKernelGGL(( _minimum_64_20_2), dim3(1),dim3(64), 0, 0, y,z);
hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float sumabs_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum
hipLaunchKernelGGL(( _sumabs_32_20_1), dim3(64),dim3(64), 0, 0, n,x,y);
hipLaunchKernelGGL(( _sumabs_32_20_2), dim3(1),dim3(64), 0, 0, y,z);
hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double sumabs_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum
hipLaunchKernelGGL(( _sumabs_64_20_1), dim3(64),dim3(64), 0, 0, n,x,y);
hipLaunchKernelGGL(( _sumabs_64_20_2), dim3(1),dim3(64), 0, 0, y,z);
hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs2_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs2_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi*xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs2_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs2_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs2_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float sumabs2_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum
hipLaunchKernelGGL(( _sumabs2_32_20_1), dim3(64),dim3(64), 0, 0, n,x,y);
hipLaunchKernelGGL(( _sumabs2_32_20_2), dim3(1),dim3(64), 0, 0, y,z);
hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs2_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs2_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi*xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs2_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs2_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs2_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double sumabs2_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum
hipLaunchKernelGGL(( _sumabs2_64_20_1), dim3(64),dim3(64), 0, 0, n,x,y);
hipLaunchKernelGGL(( _sumabs2_64_20_2), dim3(1),dim3(64), 0, 0, y,z);
hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _countnz_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _countnz_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi!=0); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_countnz_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _countnz_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_countnz_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float countnz_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum
hipLaunchKernelGGL(( _countnz_32_20_1), dim3(64),dim3(64), 0, 0, n,x,y);
hipLaunchKernelGGL(( _countnz_32_20_2), dim3(1),dim3(64), 0, 0, y,z);
hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _countnz_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _countnz_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi!=0); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_countnz_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _countnz_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_countnz_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double countnz_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum
hipLaunchKernelGGL(( _countnz_64_20_1), dim3(64),dim3(64), 0, 0, n,x,y);
hipLaunchKernelGGL(( _countnz_64_20_2), dim3(1),dim3(64), 0, 0, y,z);
hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
| 2931f5fafab0b2d8c09aaa5a3709bf2cca84b117.cu | __device__ void _sum_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sum_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sum_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sum_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sum_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float sum_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_sum_32_20_1<<<64,64>>>(n,x,y);
_sum_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sum_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sum_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sum_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sum_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sum_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double sum_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_sum_64_20_1<<<64,64>>>(n,x,y);
_sum_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _prod_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai*xi;
ai=x[i]; xi=x[i+16]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi;
}
__global__ void _prod_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 1;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai*xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_prod_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _prod_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads();
}
if(tid<32) {
_prod_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float prod_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_prod_32_20_1<<<64,64>>>(n,x,y);
_prod_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _prod_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai*xi;
ai=x[i]; xi=x[i+16]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi;
}
__global__ void _prod_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 1;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai*xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_prod_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _prod_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads();
}
if(tid<32) {
_prod_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double prod_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_prod_64_20_1<<<64,64>>>(n,x,y);
_prod_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _maximum_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi);
}
__global__ void _maximum_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = (-INFINITY);
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai>xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_maximum_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _maximum_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_maximum_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float maximum_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_maximum_32_20_1<<<64,64>>>(n,x,y);
_maximum_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _maximum_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi);
}
__global__ void _maximum_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = (-INFINITY);
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai>xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_maximum_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _maximum_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_maximum_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double maximum_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_maximum_64_20_1<<<64,64>>>(n,x,y);
_maximum_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _minimum_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi);
}
__global__ void _minimum_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = INFINITY;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai<xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_minimum_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _minimum_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_minimum_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float minimum_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_minimum_32_20_1<<<64,64>>>(n,x,y);
_minimum_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _minimum_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi);
}
__global__ void _minimum_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = INFINITY;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai<xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_minimum_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _minimum_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_minimum_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double minimum_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_minimum_64_20_1<<<64,64>>>(n,x,y);
_minimum_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float sumabs_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_sumabs_32_20_1<<<64,64>>>(n,x,y);
_sumabs_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double sumabs_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_sumabs_64_20_1<<<64,64>>>(n,x,y);
_sumabs_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs2_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs2_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi*xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs2_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs2_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs2_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float sumabs2_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_sumabs2_32_20_1<<<64,64>>>(n,x,y);
_sumabs2_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs2_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs2_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi*xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs2_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs2_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs2_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double sumabs2_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_sumabs2_64_20_1<<<64,64>>>(n,x,y);
_sumabs2_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _countnz_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _countnz_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi!=0); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_countnz_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _countnz_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_countnz_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float countnz_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_countnz_32_20_1<<<64,64>>>(n,x,y);
_countnz_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _countnz_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _countnz_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi!=0); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_countnz_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _countnz_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_countnz_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double countnz_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_countnz_64_20_1<<<64,64>>>(n,x,y);
_countnz_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
|
0a93b3b13bbd6be1875996b35c831d42ae8584b0.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_u_name[] = "chebyshev_polynomial_u_forward";
void chebyshev_polynomial_u_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_u_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_u_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_u_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_u_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return chebyshev_polynomial_u_forward<scalar_t, true>(x, n);
});
});
#endif
} // chebyshev_polynomial_u_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(chebyshev_polynomial_u_stub, &chebyshev_polynomial_u_kernel_cuda);
} // namespace at::native
| 0a93b3b13bbd6be1875996b35c831d42ae8584b0.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_u_name[] = "chebyshev_polynomial_u_forward";
void chebyshev_polynomial_u_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_u_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_u_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_u_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_u_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return chebyshev_polynomial_u_forward<scalar_t, true>(x, n);
});
});
#endif
} // chebyshev_polynomial_u_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(chebyshev_polynomial_u_stub, &chebyshev_polynomial_u_kernel_cuda);
} // namespace at::native
|
24d233ab58930622cf94c9f8a4cbfc55d7796275.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#define NUM_BANKS 16
#define N_ELEMENTS 16384
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
#include <cutil_inline.h>
// includes, kernels
__global__ void sum_kernel(float *g_odata, float *g_idata, int n)
{
int i;
int tid = threadIdx.x; //Calculate a thread ID based on this thread's position within the block
//int tid = threadIdx.y * blockDim.x + threadIdx.x; //Another thread ID example for a 2-D thread block
//int tid = blockIdk.x * blockDim.x + threadIdx.x; //Another thread ID example for assigning unique thread IDs across
//different blocks
g_odata[0] = 0;
//A single thread adds up all 1M array elements serially.
//This is a poor use of parallel hardware - your job is to increase the number of threads, split up the work, and communicate
//data between threads as necessary to improve the kernel performance.
for(i = 0;i < N_ELEMENTS;i++)
{
g_odata[0] += g_idata[i];
}
__syncthreads(); //Syncthreads forces all threads within a block to reach this point before continuing past. Note this is
//necessary within blocks because not all threads can physically execute at the same time.
//Syncthreads does NOT synchronize different blocks (but you should not need to for this project).
}
__global__ void sum_kernel_globalMem(float *g_odata, float *g_idata)
{
int myId = threadIdx.x + blockDim.x*blockIdx.x;
int tid = threadIdx.x;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if(tid<s){
g_idata[myId] += g_idata[myId + s];
}
__syncthreads();
}
if(tid==0){
g_odata[blockIdx.x]=g_idata[myId];
}
}
__global__ void sum_kernel_sharedMem(float *g_odata, float*g_idata)
{
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x*blockIdx.x;
int tid = threadIdx.x;
sdata[tid] = g_idata[myId];
__syncthreads();
for(unsigned int s= blockDim.x/2; s>0;s>>=1){
if(tid<s){
sdata[tid] +=sdata[tid+s];
}
__syncthreads();
}
if(tid==0){
g_odata[blockIdx.x]=sdata[0];
}
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
// regression test functionality
extern "C"
unsigned int compare( const float* reference, const float* data, const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest( int argc, char** argv)
{
hipSetDevice( cutGetMaxGflopsDeviceId() );
int num_elements = N_ELEMENTS;
cutGetCmdLineArgumenti( argc, (const char**) argv, "n", &num_elements);
unsigned int timer;
cutilCheckError( cutCreateTimer(&timer));
const unsigned int mem_size = sizeof( float) * num_elements;
// allocate host memory to store the input data
float* h_data = (float*) malloc( mem_size);
// initialize the input data on the host to be integer values
// between 0 and 1000
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = floorf(1000*(rand()/(float)RAND_MAX));
//h_data[i] = 1;
}
printf("\n");
// compute reference solution
float* reference = (float*) malloc( mem_size);
computeGold( reference, h_data, num_elements);
// allocate device memory input and output arrays
float* d_idata;
float* d_odata;
float* d_tdata;//intermediate data
cutilSafeCall( hipMalloc( (void**) &d_idata, mem_size));
cutilSafeCall( hipMalloc( (void**) &d_odata, mem_size));
cutilSafeCall( hipMalloc( (void**) &d_tdata, mem_size));
// copy host memory to device input array
cutilSafeCall( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) );
// setup execution parameters
// Note that these scans only support a single thread-block worth of data,
// but we invoke them here on many blocks so that we can accurately compare
// performance
// make sure there are no CUDA errors before we start
CUT_CHECK_ERROR("Kernel execution failed");
printf("Running sum of %d elements\n", num_elements);
// execute the kernels
unsigned int numIterations = 1;
cutStartTimer(timer);
for (int i = 0; i < numIterations; ++i)
{
//sum_kernel<<< grid, threads >>> (d_odata, d_idata, num_elements);
//dim3 threads(maxThrdperBlck, 1, 1);
//dim3 grid(N_ELEMENTS / threads.x, 1, 1);
int threads = 128;
int grid = num_elements/threads;
//sum_kernel_globalMem<<< grid, threads >>> (d_tdata, d_idata);
hipLaunchKernelGGL(( sum_kernel_sharedMem), dim3(grid), dim3(threads), threads*sizeof(float) , 0, d_tdata, d_idata);
threads =grid;
grid=1;
//sum_kernel_globalMem<<< grid, threads >>> (d_odata, d_tdata);
hipLaunchKernelGGL(( sum_kernel_sharedMem), dim3(grid), dim3(threads),threads*sizeof(float) , 0, d_odata, d_tdata);
}
hipDeviceSynchronize();
cutStopTimer(timer);
printf("\nAverage time: %f ms\n\n", cutGetTimerValue(timer) / numIterations);
cutResetTimer(timer);
// check for any errors
cutilCheckMsg("Kernel execution failed");
// check results
// copy result from device to host
cutilSafeCall(hipMemcpy( h_data, d_odata, sizeof(float) * num_elements, hipMemcpyDeviceToHost));
printf("OUTPUT: ");
printf(" %f ", h_data[0]);
printf("\n");
printf("REFERENCE: ");
printf(" %f ", reference[0]);
printf("\n");
// custom output handling when no regression test running
// in this case check if the result is equivalent to the expected soluion
// Due to the large number of additions, a non-zero epsilon is necessary to
// mask floating point precision errors.
float epsilon = 0.0f;
unsigned int result_regtest = cutComparefe( reference, h_data, 1, epsilon);
printf( "sum: Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
// cleanup memory
free( h_data);
free( reference);
cutilSafeCall(hipFree(d_idata));
cutilSafeCall(hipFree(d_odata));
cutilCheckError(cutDeleteTimer(timer));
}
| 24d233ab58930622cf94c9f8a4cbfc55d7796275.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#define NUM_BANKS 16
#define N_ELEMENTS 16384
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
#include <cutil_inline.h>
// includes, kernels
__global__ void sum_kernel(float *g_odata, float *g_idata, int n)
{
int i;
int tid = threadIdx.x; //Calculate a thread ID based on this thread's position within the block
//int tid = threadIdx.y * blockDim.x + threadIdx.x; //Another thread ID example for a 2-D thread block
//int tid = blockIdk.x * blockDim.x + threadIdx.x; //Another thread ID example for assigning unique thread IDs across
//different blocks
g_odata[0] = 0;
//A single thread adds up all 1M array elements serially.
//This is a poor use of parallel hardware - your job is to increase the number of threads, split up the work, and communicate
//data between threads as necessary to improve the kernel performance.
for(i = 0;i < N_ELEMENTS;i++)
{
g_odata[0] += g_idata[i];
}
__syncthreads(); //Syncthreads forces all threads within a block to reach this point before continuing past. Note this is
//necessary within blocks because not all threads can physically execute at the same time.
//Syncthreads does NOT synchronize different blocks (but you should not need to for this project).
}
__global__ void sum_kernel_globalMem(float *g_odata, float *g_idata)
{
int myId = threadIdx.x + blockDim.x*blockIdx.x;
int tid = threadIdx.x;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if(tid<s){
g_idata[myId] += g_idata[myId + s];
}
__syncthreads();
}
if(tid==0){
g_odata[blockIdx.x]=g_idata[myId];
}
}
__global__ void sum_kernel_sharedMem(float *g_odata, float*g_idata)
{
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x*blockIdx.x;
int tid = threadIdx.x;
sdata[tid] = g_idata[myId];
__syncthreads();
for(unsigned int s= blockDim.x/2; s>0;s>>=1){
if(tid<s){
sdata[tid] +=sdata[tid+s];
}
__syncthreads();
}
if(tid==0){
g_odata[blockIdx.x]=sdata[0];
}
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
// regression test functionality
extern "C"
unsigned int compare( const float* reference, const float* data, const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest( int argc, char** argv)
{
cudaSetDevice( cutGetMaxGflopsDeviceId() );
int num_elements = N_ELEMENTS;
cutGetCmdLineArgumenti( argc, (const char**) argv, "n", &num_elements);
unsigned int timer;
cutilCheckError( cutCreateTimer(&timer));
const unsigned int mem_size = sizeof( float) * num_elements;
// allocate host memory to store the input data
float* h_data = (float*) malloc( mem_size);
// initialize the input data on the host to be integer values
// between 0 and 1000
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = floorf(1000*(rand()/(float)RAND_MAX));
//h_data[i] = 1;
}
printf("\n");
// compute reference solution
float* reference = (float*) malloc( mem_size);
computeGold( reference, h_data, num_elements);
// allocate device memory input and output arrays
float* d_idata;
float* d_odata;
float* d_tdata;//intermediate data
cutilSafeCall( cudaMalloc( (void**) &d_idata, mem_size));
cutilSafeCall( cudaMalloc( (void**) &d_odata, mem_size));
cutilSafeCall( cudaMalloc( (void**) &d_tdata, mem_size));
// copy host memory to device input array
cutilSafeCall( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) );
// setup execution parameters
// Note that these scans only support a single thread-block worth of data,
// but we invoke them here on many blocks so that we can accurately compare
// performance
// make sure there are no CUDA errors before we start
CUT_CHECK_ERROR("Kernel execution failed");
printf("Running sum of %d elements\n", num_elements);
// execute the kernels
unsigned int numIterations = 1;
cutStartTimer(timer);
for (int i = 0; i < numIterations; ++i)
{
//sum_kernel<<< grid, threads >>> (d_odata, d_idata, num_elements);
//dim3 threads(maxThrdperBlck, 1, 1);
//dim3 grid(N_ELEMENTS / threads.x, 1, 1);
int threads = 128;
int grid = num_elements/threads;
//sum_kernel_globalMem<<< grid, threads >>> (d_tdata, d_idata);
sum_kernel_sharedMem<<< grid, threads, threads*sizeof(float) >>> (d_tdata, d_idata);
threads =grid;
grid=1;
//sum_kernel_globalMem<<< grid, threads >>> (d_odata, d_tdata);
sum_kernel_sharedMem<<< grid, threads,threads*sizeof(float) >>> (d_odata, d_tdata);
}
cudaThreadSynchronize();
cutStopTimer(timer);
printf("\nAverage time: %f ms\n\n", cutGetTimerValue(timer) / numIterations);
cutResetTimer(timer);
// check for any errors
cutilCheckMsg("Kernel execution failed");
// check results
// copy result from device to host
cutilSafeCall(cudaMemcpy( h_data, d_odata, sizeof(float) * num_elements, cudaMemcpyDeviceToHost));
printf("OUTPUT: ");
printf(" %f ", h_data[0]);
printf("\n");
printf("REFERENCE: ");
printf(" %f ", reference[0]);
printf("\n");
// custom output handling when no regression test running
// in this case check if the result is equivalent to the expected soluion
// Due to the large number of additions, a non-zero epsilon is necessary to
// mask floating point precision errors.
float epsilon = 0.0f;
unsigned int result_regtest = cutComparefe( reference, h_data, 1, epsilon);
printf( "sum: Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
// cleanup memory
free( h_data);
free( reference);
cutilSafeCall(cudaFree(d_idata));
cutilSafeCall(cudaFree(d_odata));
cutilCheckError(cutDeleteTimer(timer));
}
|
a9b166d25cc91e91f81d9c19279887187d5812f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "assignment.h"
#include <cstring>
#include <cstdlib>
#include <stdio.h>
// Math Problem:
// output[i] = ((input[i - 1] + input[i]) * input[i + 1]) >> const_shift_val
// boundaries wrap around: 0 - 1 -> access memory input[N - 1]
// N-1 + 1 -> access memory input[0]
// This math problem does not have any special meaning. I went for something interesting.
// For simplicity in the shared memory kernels, there must be an exact fit of blockSize in the array:
// arraySize % blockSize == 0
// Debugging Size
//#define arraySize 10
//#define blockSize 10
#define arraySize 15000
#define blockSize 500
#define ITERATIONS 1
// Max Size test
//#define blockSize 256
// vocareum tests at blockSize * 50000
//#define arraySize (blockSize * 250000)
//#define ITERATIONS 1
// Max Iterations & large size
//#define blockSize 256
//#define arraySize (blockSize * 25000)
//#define ITERATIONS 5000
const int numBlocks = arraySize / blockSize;
static_assert(arraySize % blockSize == 0, "This program only supports array sizes that fit the block size exactly.");
// Use static global memory variables to learn how to use them (I would use hipMalloc otherwise)
__device__ static int gmem_input[arraySize];
__device__ static int gmem_output[arraySize];
__device__ static int gmem_shift_value;
// const memory is limited. Compile this only if testing a size that
// cuda will allow us to allocate.
#define MAX_CONST_ARRAY_SIZE 15000
#if arraySize <= MAX_CONST_ARRAY_SIZE
__constant__ int const_input[arraySize];
#endif
__constant__ int const_shift_value;
__constant__ int const_value_1;
__constant__ int const_value_2;
__constant__ int const_value_3;
const int shift_value_for_const_test = 3;
const int value1_for_const_test = 208;
const int value2_for_const_test = 517;
const int value3_for_const_test = 28;
// Host buffers
static int host_input[arraySize];
static int host_output[arraySize];
enum TestKernelType {
GLOBAL_MEM, SHARED_MEM, CONST_MEM,
GLOBAL_MEM_WITH_PARAM, SHARED_MEM_WITH_PARAM, CONST_MEM_ARRAY
};
#pragma region CUDA Kernels
__device__ void kernelMathFunctionGlobalMemory(const int constant_value)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int lowerIndex = i - 1;
if (lowerIndex < 0)
lowerIndex = arraySize - 1;
int upperIndex = (i + 1) % arraySize;
// run multiple iterations simply to stress the memory. Calculation is the same as 1 iteration
int value = 0;
for (int count = 0; count < ITERATIONS; count++) {
value = gmem_input[lowerIndex] + gmem_input[i];
value *= gmem_input[upperIndex];
value >>= constant_value;
}
gmem_output[i] = value;
}
// constant value will be passed as a function parameter
__global__ void globalMemoryKernelWithConstantParameter(const int constant_value)
{
kernelMathFunctionGlobalMemory(constant_value);
}
// constant_value will reside in global memory
__global__ void globalMemoryKernel()
{
kernelMathFunctionGlobalMemory(gmem_shift_value);
}
__device__ void kernelMathFunctionSharedMemory(int* shared_memory, const int constant_value)
{
// shared index cannot accept a negative index for thread 0. To allow for this, increase the pointer by 1 location
// for all threads
shared_memory = shared_memory + 1;
const int sharedMemoryIndex = threadIdx.x;
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int threadIndex = threadIdx.x;
// load global memory into shared memory. Account for i - 1 < 0
int lowerIndex = i - 1;
if (lowerIndex < 0)
lowerIndex = arraySize - 1;
shared_memory[threadIndex - 1] = gmem_input[lowerIndex];
// load the last values in the block. The last thread's [i] value and [i + 1] value
if (threadIndex + 2 >= blockDim.x) {
// load i + 1. Account for i + 1 == arraySize
int upperIndex = (i + 1) % arraySize;
shared_memory[threadIndex + 1] = gmem_input[upperIndex];
}
__syncthreads();
// run multiple iterations simply to stress the memory. Calculation is the same as 1 iteration
int value = 0;
for (int count = 0; count < ITERATIONS; count++) {
value = shared_memory[sharedMemoryIndex - 1] + shared_memory[sharedMemoryIndex];
value *= shared_memory[sharedMemoryIndex + 1];
value >>= constant_value;
}
gmem_output[i] = value;
}
// constant value will be passed as a function parameter
__global__ void sharedMemoryKernelWithConstantParameter(const int constant_value)
{
// extra shared memory: index -1, index for last thread + 1
__shared__ int shared_memory[blockSize + 2];
kernelMathFunctionSharedMemory(shared_memory, constant_value);
}
// constant_value will reside in global memory
__global__ void sharedMemoryKernel()
{
// extra shared memory: index -1, index for last thread + 1
__shared__ int shared_memory[blockSize + 2];
// load global constant into shared memory
if (threadIdx.x == 0) {
shared_memory[0] = gmem_shift_value;
}
__syncthreads();
// load shared memory into local memory, sync threads before overwriting index 0
const int local_shift_value = shared_memory[0];
__syncthreads();
kernelMathFunctionSharedMemory(shared_memory, local_shift_value);
}
__global__ void constMemoryKernel() {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
// run multiple iterations simply to stress the memory. Calculation is the same as 1 iteration
int value = 0;
for (int count = 0; count < ITERATIONS; count++) {
value = const_value_1 + const_value_2;
value *= const_value_3;
value >>= const_shift_value;
}
gmem_output[i] = value;
}
__global__ void constMemoryKernelReadFromArray()
{
#if arraySize <= MAX_CONST_ARRAY_SIZE
int i = blockIdx.x * blockDim.x + threadIdx.x;
int lowerIndex = i - 1;
if (lowerIndex < 0)
lowerIndex = arraySize - 1;
int upperIndex = (i + 1) % arraySize;
#
// run multiple iterations simply to stress the memory. Calculation is the same as 1 iteration
int value = 0;
for (int count = 0; count < ITERATIONS; count++) {
value = const_input[lowerIndex] + const_input[i];
value *= const_input[upperIndex];
value >>= const_shift_value;
}
gmem_output[i] = value;
#endif
}
#pragma endregion
void populateTestData() {
for (int i = 0; i < arraySize; i++) {
host_input[i] = i + 1; // rand() %
}
// send input buffer to device
gpuErrchk(hipMemcpyToSymbol(gmem_input, host_input, sizeof(gmem_input)));
}
void validateCorrectness(const int shiftValue, bool isConstMemory=false) {
gpuErrchk(hipMemcpyFromSymbol(host_output, gmem_output, sizeof(host_output)));
int expectedConstResult = value1_for_const_test + value2_for_const_test;
expectedConstResult *= value3_for_const_test;
expectedConstResult >>= shift_value_for_const_test;
for (int i = 0; i < arraySize; i++) {
int expectedAnswer;
if (isConstMemory) {
expectedAnswer = expectedConstResult;
}
else {
int lowerIndex = (i == 0 ? arraySize - 1 : i - 1);
int upperIndex = (i + 1) % arraySize;
expectedAnswer = ((host_input[lowerIndex] + host_input[i]) * (host_input[upperIndex]) >> shiftValue);
}
//printf("%3d: ((%d + %d) * %d) >> %d) = %d\n", i, host_input[lowerIndex], host_input[i], host_input[upperIndex], shiftValue, expectedAnswer);
if (host_output[i] != expectedAnswer) {
printf("%3d: Error! Expected: %3d Actual: %3d\n", i, expectedAnswer, host_output[i]);
}
}
}
void resetOutputBufferData() {
int* d_output = nullptr;
gpuErrchk(hipGetSymbolAddress((void**)&d_output, gmem_output));
// clear output buffers
gpuErrchk(hipMemset(d_output, 0, sizeof(gmem_output)));
memset(host_output, 0, sizeof(host_output));
}
void testKernelRun(TestKernelType kernelType, const int shiftValue, const char* description) {
hipMemcpyToSymbol(gmem_shift_value, &shiftValue, sizeof(shiftValue));
{
TimeCodeBlock kernelRunMeasurement(description);
switch (kernelType)
{
case GLOBAL_MEM:
hipLaunchKernelGGL(( globalMemoryKernel) , dim3(numBlocks), dim3(blockSize) , 0, 0, );
break;
case GLOBAL_MEM_WITH_PARAM:
hipLaunchKernelGGL(( globalMemoryKernelWithConstantParameter) , dim3(numBlocks), dim3(blockSize) , 0, 0, shiftValue);
break;
case SHARED_MEM:
hipLaunchKernelGGL(( sharedMemoryKernel) , dim3(numBlocks), dim3(blockSize) , 0, 0, );
break;
case SHARED_MEM_WITH_PARAM:
hipLaunchKernelGGL(( sharedMemoryKernelWithConstantParameter) , dim3(numBlocks), dim3(blockSize) , 0, 0, shiftValue);
break;
case CONST_MEM:
hipLaunchKernelGGL(( constMemoryKernel), dim3(numBlocks), dim3(blockSize) , 0, 0, );
break;
case CONST_MEM_ARRAY:
hipLaunchKernelGGL(( constMemoryKernelReadFromArray), dim3(numBlocks), dim3(blockSize), 0, 0, );
break;
default:
break;
}
gpuErrchk(hipGetLastError());
gpuErrchk(hipDeviceSynchronize());
}
bool validateForConstMemory = kernelType == TestKernelType::CONST_MEM;
validateCorrectness(shiftValue, validateForConstMemory);
resetOutputBufferData();
}
void testKernelsLoadingShiftFromGlobalMemory(const int shiftValue)
{
printf("Test loading the shift value from global memory\n\n");
// for (const auto& testType : {TestKernelType::GLOBAL_MEM, TestKernelType::SHARED_MEM, TestKernelType::CONST_MEM})
}
void testKernels() {
printf("Arraysize: %d Blocksize: %d Iterations: %d\n", arraySize, blockSize, ITERATIONS);
populateTestData();
resetOutputBufferData();
const int shiftValue = 3;
printf("--------------- GLOBAL MEMORY TESTS -------------------------\n");
testKernelRun(TestKernelType::GLOBAL_MEM, shiftValue, "Global Memory Kernel, Global Memory Shift Value");
testKernelRun(TestKernelType::GLOBAL_MEM_WITH_PARAM, shiftValue, "Global Memory Kernel, Shift Value as Parameter");
printf("\n--------------- SHARED MEMORY TESTS -------------------------\n");
testKernelRun(TestKernelType::SHARED_MEM, shiftValue, "Shared Memory Kernel, Global Memory Shift Value");
testKernelRun(TestKernelType::SHARED_MEM_WITH_PARAM, shiftValue, "Shared Memory Kernel, Shift Value as Parameter");
hipMemcpyToSymbol(const_shift_value, &shift_value_for_const_test, sizeof(shiftValue));
hipMemcpyToSymbol(const_value_1, &value1_for_const_test, sizeof(shiftValue));
hipMemcpyToSymbol(const_value_2, &value2_for_const_test, sizeof(shiftValue));
hipMemcpyToSymbol(const_value_3, &value3_for_const_test, sizeof(shiftValue));
printf("\n--------------- CONST MEMORY TESTS -------------------------\n");
testKernelRun(TestKernelType::CONST_MEM, shiftValue, "Constant Memory Kernel");
#if arraySize <= MAX_CONST_ARRAY_SIZE
// const memory is limited
gpuErrchk(hipMemcpyToSymbol(const_input, host_input, sizeof(const_input)));
testKernelRun(TestKernelType::CONST_MEM_ARRAY, shiftValue, "Constant Memory Kernel, Read Const Memory Array");
#endif
}
int main(int argc, char* argv[])
{
testKernels();
return 0;
}
| a9b166d25cc91e91f81d9c19279887187d5812f5.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "assignment.h"
#include <cstring>
#include <cstdlib>
#include <stdio.h>
// Math Problem:
// output[i] = ((input[i - 1] + input[i]) * input[i + 1]) >> const_shift_val
// boundaries wrap around: 0 - 1 -> access memory input[N - 1]
// N-1 + 1 -> access memory input[0]
// This math problem does not have any special meaning. I went for something interesting.
// For simplicity in the shared memory kernels, there must be an exact fit of blockSize in the array:
// arraySize % blockSize == 0
// Debugging Size
//#define arraySize 10
//#define blockSize 10
#define arraySize 15000
#define blockSize 500
#define ITERATIONS 1
// Max Size test
//#define blockSize 256
// vocareum tests at blockSize * 50000
//#define arraySize (blockSize * 250000)
//#define ITERATIONS 1
// Max Iterations & large size
//#define blockSize 256
//#define arraySize (blockSize * 25000)
//#define ITERATIONS 5000
const int numBlocks = arraySize / blockSize;
static_assert(arraySize % blockSize == 0, "This program only supports array sizes that fit the block size exactly.");
// Use static global memory variables to learn how to use them (I would use cudaMalloc otherwise)
__device__ static int gmem_input[arraySize];
__device__ static int gmem_output[arraySize];
__device__ static int gmem_shift_value;
// const memory is limited. Compile this only if testing a size that
// cuda will allow us to allocate.
#define MAX_CONST_ARRAY_SIZE 15000
#if arraySize <= MAX_CONST_ARRAY_SIZE
__constant__ int const_input[arraySize];
#endif
__constant__ int const_shift_value;
__constant__ int const_value_1;
__constant__ int const_value_2;
__constant__ int const_value_3;
const int shift_value_for_const_test = 3;
const int value1_for_const_test = 208;
const int value2_for_const_test = 517;
const int value3_for_const_test = 28;
// Host buffers
static int host_input[arraySize];
static int host_output[arraySize];
enum TestKernelType {
GLOBAL_MEM, SHARED_MEM, CONST_MEM,
GLOBAL_MEM_WITH_PARAM, SHARED_MEM_WITH_PARAM, CONST_MEM_ARRAY
};
#pragma region CUDA Kernels
__device__ void kernelMathFunctionGlobalMemory(const int constant_value)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int lowerIndex = i - 1;
if (lowerIndex < 0)
lowerIndex = arraySize - 1;
int upperIndex = (i + 1) % arraySize;
// run multiple iterations simply to stress the memory. Calculation is the same as 1 iteration
int value = 0;
for (int count = 0; count < ITERATIONS; count++) {
value = gmem_input[lowerIndex] + gmem_input[i];
value *= gmem_input[upperIndex];
value >>= constant_value;
}
gmem_output[i] = value;
}
// constant value will be passed as a function parameter
__global__ void globalMemoryKernelWithConstantParameter(const int constant_value)
{
kernelMathFunctionGlobalMemory(constant_value);
}
// constant_value will reside in global memory
__global__ void globalMemoryKernel()
{
kernelMathFunctionGlobalMemory(gmem_shift_value);
}
__device__ void kernelMathFunctionSharedMemory(int* shared_memory, const int constant_value)
{
// shared index cannot accept a negative index for thread 0. To allow for this, increase the pointer by 1 location
// for all threads
shared_memory = shared_memory + 1;
const int sharedMemoryIndex = threadIdx.x;
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int threadIndex = threadIdx.x;
// load global memory into shared memory. Account for i - 1 < 0
int lowerIndex = i - 1;
if (lowerIndex < 0)
lowerIndex = arraySize - 1;
shared_memory[threadIndex - 1] = gmem_input[lowerIndex];
// load the last values in the block. The last thread's [i] value and [i + 1] value
if (threadIndex + 2 >= blockDim.x) {
// load i + 1. Account for i + 1 == arraySize
int upperIndex = (i + 1) % arraySize;
shared_memory[threadIndex + 1] = gmem_input[upperIndex];
}
__syncthreads();
// run multiple iterations simply to stress the memory. Calculation is the same as 1 iteration
int value = 0;
for (int count = 0; count < ITERATIONS; count++) {
value = shared_memory[sharedMemoryIndex - 1] + shared_memory[sharedMemoryIndex];
value *= shared_memory[sharedMemoryIndex + 1];
value >>= constant_value;
}
gmem_output[i] = value;
}
// constant value will be passed as a function parameter
__global__ void sharedMemoryKernelWithConstantParameter(const int constant_value)
{
// extra shared memory: index -1, index for last thread + 1
__shared__ int shared_memory[blockSize + 2];
kernelMathFunctionSharedMemory(shared_memory, constant_value);
}
// constant_value will reside in global memory
__global__ void sharedMemoryKernel()
{
// extra shared memory: index -1, index for last thread + 1
__shared__ int shared_memory[blockSize + 2];
// load global constant into shared memory
if (threadIdx.x == 0) {
shared_memory[0] = gmem_shift_value;
}
__syncthreads();
// load shared memory into local memory, sync threads before overwriting index 0
const int local_shift_value = shared_memory[0];
__syncthreads();
kernelMathFunctionSharedMemory(shared_memory, local_shift_value);
}
__global__ void constMemoryKernel() {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
// run multiple iterations simply to stress the memory. Calculation is the same as 1 iteration
int value = 0;
for (int count = 0; count < ITERATIONS; count++) {
value = const_value_1 + const_value_2;
value *= const_value_3;
value >>= const_shift_value;
}
gmem_output[i] = value;
}
__global__ void constMemoryKernelReadFromArray()
{
#if arraySize <= MAX_CONST_ARRAY_SIZE
int i = blockIdx.x * blockDim.x + threadIdx.x;
int lowerIndex = i - 1;
if (lowerIndex < 0)
lowerIndex = arraySize - 1;
int upperIndex = (i + 1) % arraySize;
#
// run multiple iterations simply to stress the memory. Calculation is the same as 1 iteration
int value = 0;
for (int count = 0; count < ITERATIONS; count++) {
value = const_input[lowerIndex] + const_input[i];
value *= const_input[upperIndex];
value >>= const_shift_value;
}
gmem_output[i] = value;
#endif
}
#pragma endregion
void populateTestData() {
for (int i = 0; i < arraySize; i++) {
host_input[i] = i + 1; // rand() %
}
// send input buffer to device
gpuErrchk(cudaMemcpyToSymbol(gmem_input, host_input, sizeof(gmem_input)));
}
void validateCorrectness(const int shiftValue, bool isConstMemory=false) {
gpuErrchk(cudaMemcpyFromSymbol(host_output, gmem_output, sizeof(host_output)));
int expectedConstResult = value1_for_const_test + value2_for_const_test;
expectedConstResult *= value3_for_const_test;
expectedConstResult >>= shift_value_for_const_test;
for (int i = 0; i < arraySize; i++) {
int expectedAnswer;
if (isConstMemory) {
expectedAnswer = expectedConstResult;
}
else {
int lowerIndex = (i == 0 ? arraySize - 1 : i - 1);
int upperIndex = (i + 1) % arraySize;
expectedAnswer = ((host_input[lowerIndex] + host_input[i]) * (host_input[upperIndex]) >> shiftValue);
}
//printf("%3d: ((%d + %d) * %d) >> %d) = %d\n", i, host_input[lowerIndex], host_input[i], host_input[upperIndex], shiftValue, expectedAnswer);
if (host_output[i] != expectedAnswer) {
printf("%3d: Error! Expected: %3d Actual: %3d\n", i, expectedAnswer, host_output[i]);
}
}
}
void resetOutputBufferData() {
int* d_output = nullptr;
gpuErrchk(cudaGetSymbolAddress((void**)&d_output, gmem_output));
// clear output buffers
gpuErrchk(cudaMemset(d_output, 0, sizeof(gmem_output)));
memset(host_output, 0, sizeof(host_output));
}
void testKernelRun(TestKernelType kernelType, const int shiftValue, const char* description) {
cudaMemcpyToSymbol(gmem_shift_value, &shiftValue, sizeof(shiftValue));
{
TimeCodeBlock kernelRunMeasurement(description);
switch (kernelType)
{
case GLOBAL_MEM:
globalMemoryKernel <<<numBlocks, blockSize >>> ();
break;
case GLOBAL_MEM_WITH_PARAM:
globalMemoryKernelWithConstantParameter <<<numBlocks, blockSize >>> (shiftValue);
break;
case SHARED_MEM:
sharedMemoryKernel <<<numBlocks, blockSize >>>();
break;
case SHARED_MEM_WITH_PARAM:
sharedMemoryKernelWithConstantParameter <<<numBlocks, blockSize >>> (shiftValue);
break;
case CONST_MEM:
constMemoryKernel<<<numBlocks, blockSize >>>();
break;
case CONST_MEM_ARRAY:
constMemoryKernelReadFromArray<<<numBlocks, blockSize>>>();
break;
default:
break;
}
gpuErrchk(cudaGetLastError());
gpuErrchk(cudaDeviceSynchronize());
}
bool validateForConstMemory = kernelType == TestKernelType::CONST_MEM;
validateCorrectness(shiftValue, validateForConstMemory);
resetOutputBufferData();
}
void testKernelsLoadingShiftFromGlobalMemory(const int shiftValue)
{
printf("Test loading the shift value from global memory\n\n");
// for (const auto& testType : {TestKernelType::GLOBAL_MEM, TestKernelType::SHARED_MEM, TestKernelType::CONST_MEM})
}
void testKernels() {
printf("Arraysize: %d Blocksize: %d Iterations: %d\n", arraySize, blockSize, ITERATIONS);
populateTestData();
resetOutputBufferData();
const int shiftValue = 3;
printf("--------------- GLOBAL MEMORY TESTS -------------------------\n");
testKernelRun(TestKernelType::GLOBAL_MEM, shiftValue, "Global Memory Kernel, Global Memory Shift Value");
testKernelRun(TestKernelType::GLOBAL_MEM_WITH_PARAM, shiftValue, "Global Memory Kernel, Shift Value as Parameter");
printf("\n--------------- SHARED MEMORY TESTS -------------------------\n");
testKernelRun(TestKernelType::SHARED_MEM, shiftValue, "Shared Memory Kernel, Global Memory Shift Value");
testKernelRun(TestKernelType::SHARED_MEM_WITH_PARAM, shiftValue, "Shared Memory Kernel, Shift Value as Parameter");
cudaMemcpyToSymbol(const_shift_value, &shift_value_for_const_test, sizeof(shiftValue));
cudaMemcpyToSymbol(const_value_1, &value1_for_const_test, sizeof(shiftValue));
cudaMemcpyToSymbol(const_value_2, &value2_for_const_test, sizeof(shiftValue));
cudaMemcpyToSymbol(const_value_3, &value3_for_const_test, sizeof(shiftValue));
printf("\n--------------- CONST MEMORY TESTS -------------------------\n");
testKernelRun(TestKernelType::CONST_MEM, shiftValue, "Constant Memory Kernel");
#if arraySize <= MAX_CONST_ARRAY_SIZE
// const memory is limited
gpuErrchk(cudaMemcpyToSymbol(const_input, host_input, sizeof(const_input)));
testKernelRun(TestKernelType::CONST_MEM_ARRAY, shiftValue, "Constant Memory Kernel, Read Const Memory Array");
#endif
}
int main(int argc, char* argv[])
{
testKernels();
return 0;
}
|
325695981730aeeab7081d4e8400cb57bc37dd9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/local_share/im2col.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "./im2col.cuh"
using namespace megdnn;
using namespace cuda;
using namespace local_share;
namespace {
template <typename T>
__global__ void local_share_im2col(const T* __restrict__ img,
T* __restrict__ col, int fh, int fw, int sh,
int sw, int nr_groups, Param param) {
const int in_ch_idx = threadIdx.x + blockIdx.y * blockDim.x;
const int batch = threadIdx.y + blockIdx.z * blockDim.y;
if (in_ch_idx >= param.ci || batch >= param.n)
return;
const int hw = blockIdx.x;
const int wo = param.grp_wo * param.sgw;
const int oh_idx = hw / wo;
const int ow_idx = hw - oh_idx * wo;
const int sgh_idx = oh_idx / param.grp_ho;
const int sgw_idx = ow_idx / param.grp_wo;
const int grp_oh_idx = oh_idx - sgh_idx * param.grp_ho;
const int grp_ow_idx = ow_idx - sgw_idx * param.grp_wo;
const int grp_sizes = param.grp_ho * param.grp_wo;
const int icpg = param.ci / nr_groups;
const int ch_grp_idx = in_ch_idx / icpg;
const int grp_ch_idx = in_ch_idx - icpg * ch_grp_idx;
const T* __restrict__ img_ptr = img +
batch * param.ci * param.hi * param.wi +
in_ch_idx * param.hi * param.wi;
const int ld = icpg * fh * fw;
T* __restrict__ col_ptr =
col +
ch_grp_idx * (param.sgh * param.sgw) * param.n * grp_sizes *
ld // channel group stride
+ (sgh_idx * param.sgw + sgw_idx) * param.n * grp_sizes *
ld // batch stride
+ grp_ch_idx * fh * fw // input channel stride
+ (batch * grp_sizes + (grp_oh_idx * param.grp_wo + grp_ow_idx)) *
ld; // row stride
for (int kh = 0; kh < fh; kh++) {
for (int kw = 0; kw < fw; kw++) {
int ih_idx = oh_idx * sh - param.ph + kh;
int iw_idx = ow_idx * sw - param.pw + kw;
float val = 0.f;
if (ih_idx < param.hi && ih_idx >= 0 && iw_idx < param.wi &&
iw_idx >= 0) {
val = img_ptr[ih_idx * param.wi + iw_idx];
}
*(col_ptr++) = val;
}
}
}
template <typename T>
__global__ void local_share_col2im(const T* __restrict__ col,
T* __restrict__ img, int fh, int fw, int sh,
int sw, int nr_groups, Param param) {
const int batch = threadIdx.x + blockIdx.y * blockDim.x;
const int in_ch_idx = threadIdx.y + blockIdx.z * blockDim.y;
if (in_ch_idx >= param.ci || batch >= param.n)
return;
const int hw = blockIdx.x;
const int ih_idx = hw / param.wi;
const int iw_idx = hw - ih_idx * param.wi;
const int ho = param.grp_ho * param.sgh;
const int wo = param.grp_wo * param.sgw;
const int icpg = param.ci / nr_groups;
const int grp_sizes = param.grp_ho * param.grp_wo;
const int filter_sizes = fh * fw;
const int ch_filter_sizes = icpg * filter_sizes;
const int nr_elems_per_grp = param.n * grp_sizes * ch_filter_sizes;
const int ch_grp_idx = in_ch_idx / icpg;
const int grp_ch_idx = in_ch_idx - icpg * ch_grp_idx;
const T* __restrict__ col_ptr =
col +
ch_grp_idx * param.sgh * param.sgw * ch_filter_sizes * grp_sizes *
param.n // channel group stride
+ batch // batch stride
+
grp_ch_idx * filter_sizes * grp_sizes * param.n; // channel stride
T res(0);
for (int kh = 0; kh < fh; ++kh) {
uint32_t anchorh = ih_idx + param.ph - kh;
if (anchorh < ho * sh && anchorh % sh == 0) {
int oh_idx = anchorh / sh;
int sgh_idx = oh_idx / param.grp_ho;
int grp_oh_idx = oh_idx - sgh_idx * param.grp_ho;
for (int kw = 0; kw < fw; ++kw) {
uint32_t anchorw = iw_idx + param.pw - kw;
if (anchorw < wo * sw && anchorw % sw == 0) {
int ow_idx = anchorw / sw;
int sgw_idx = ow_idx / param.grp_wo;
int grp_ow_idx = ow_idx - sgw_idx * param.grp_wo;
const T* __restrict__ sptr =
col_ptr +
(sgh_idx * param.sgw + sgw_idx) *
nr_elems_per_grp // spatial group stride
+ (grp_oh_idx * param.grp_wo + grp_ow_idx) *
param.n // spatial stride
+ (kh * fw + kw) * grp_sizes * param.n;
res += sptr[0];
}
}
}
}
img[batch * param.ci * param.hi * param.wi +
in_ch_idx * param.hi * param.wi + ih_idx * param.wi + iw_idx] = res;
}
} // namespace
void megdnn::cuda::local_share::_do_local_share_im2col(
const float* d_im, float* d_col, int fh, int fw, int sh, int sw,
int nr_groups, const Param& param, hipStream_t stream) {
void (*kern)(const float* __restrict__, float* __restrict__, int, int, int,
int, int, Param);
kern = local_share_im2col<float>;
constexpr int threads_x = 256;
uint32_t nr_threads =
_get_kern_block_size(reinterpret_cast<const void*>(kern));
uint32_t nr_threads_x = ::min(threads_x, param.ci);
uint32_t nr_threads_y =
::min(static_cast<int>(nr_threads / nr_threads_x), param.n);
uint32_t nr_blocks_x = param.sgw * param.sgh * param.grp_ho * param.grp_wo,
nr_blocks_y = DIVUP(param.ci, nr_threads_x),
nr_blocks_z = DIVUP(param.n, nr_threads_y);
dim3 threads{nr_threads_x, nr_threads_y, 1};
dim3 blocks{nr_blocks_x, nr_blocks_y, nr_blocks_z};
hipLaunchKernelGGL(( kern), dim3(blocks), dim3(threads), 0, stream, d_im, d_col, fh, fw, sh, sw, nr_groups,
param);
after_kernel_launch();
}
void megdnn::cuda::local_share::_do_local_share_col2im(
const float* d_col, float* d_im, int fh, int fw, int sh, int sw,
int nr_groups, const Param& param, hipStream_t stream) {
void (*kern)(const float* __restrict__, float* __restrict__, int, int, int,
int, int, Param);
kern = local_share_col2im<float>;
constexpr int threads_x = 256;
uint32_t nr_threads =
_get_kern_block_size(reinterpret_cast<const void*>(kern));
uint32_t nr_threads_x = ::min(threads_x, param.n);
uint32_t nr_threads_y =
::min(static_cast<int>(nr_threads / nr_threads_x), param.ci);
uint32_t nr_blocks_x = param.hi * param.wi,
nr_blocks_y = DIVUP(param.n, nr_threads_x),
nr_blocks_z = DIVUP(param.ci, nr_threads_y);
dim3 threads{nr_threads_x, nr_threads_y, 1};
dim3 blocks{nr_blocks_x, nr_blocks_y, nr_blocks_z};
hipLaunchKernelGGL(( kern), dim3(blocks), dim3(threads), 0, stream, d_col, d_im, fh, fw, sh, sw, nr_groups,
param);
after_kernel_launch();
}
// vim: syntax=cuda.doxygen
| 325695981730aeeab7081d4e8400cb57bc37dd9b.cu | /**
* \file dnn/src/cuda/local_share/im2col.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "./im2col.cuh"
using namespace megdnn;
using namespace cuda;
using namespace local_share;
namespace {
template <typename T>
__global__ void local_share_im2col(const T* __restrict__ img,
T* __restrict__ col, int fh, int fw, int sh,
int sw, int nr_groups, Param param) {
const int in_ch_idx = threadIdx.x + blockIdx.y * blockDim.x;
const int batch = threadIdx.y + blockIdx.z * blockDim.y;
if (in_ch_idx >= param.ci || batch >= param.n)
return;
const int hw = blockIdx.x;
const int wo = param.grp_wo * param.sgw;
const int oh_idx = hw / wo;
const int ow_idx = hw - oh_idx * wo;
const int sgh_idx = oh_idx / param.grp_ho;
const int sgw_idx = ow_idx / param.grp_wo;
const int grp_oh_idx = oh_idx - sgh_idx * param.grp_ho;
const int grp_ow_idx = ow_idx - sgw_idx * param.grp_wo;
const int grp_sizes = param.grp_ho * param.grp_wo;
const int icpg = param.ci / nr_groups;
const int ch_grp_idx = in_ch_idx / icpg;
const int grp_ch_idx = in_ch_idx - icpg * ch_grp_idx;
const T* __restrict__ img_ptr = img +
batch * param.ci * param.hi * param.wi +
in_ch_idx * param.hi * param.wi;
const int ld = icpg * fh * fw;
T* __restrict__ col_ptr =
col +
ch_grp_idx * (param.sgh * param.sgw) * param.n * grp_sizes *
ld // channel group stride
+ (sgh_idx * param.sgw + sgw_idx) * param.n * grp_sizes *
ld // batch stride
+ grp_ch_idx * fh * fw // input channel stride
+ (batch * grp_sizes + (grp_oh_idx * param.grp_wo + grp_ow_idx)) *
ld; // row stride
for (int kh = 0; kh < fh; kh++) {
for (int kw = 0; kw < fw; kw++) {
int ih_idx = oh_idx * sh - param.ph + kh;
int iw_idx = ow_idx * sw - param.pw + kw;
float val = 0.f;
if (ih_idx < param.hi && ih_idx >= 0 && iw_idx < param.wi &&
iw_idx >= 0) {
val = img_ptr[ih_idx * param.wi + iw_idx];
}
*(col_ptr++) = val;
}
}
}
template <typename T>
__global__ void local_share_col2im(const T* __restrict__ col,
T* __restrict__ img, int fh, int fw, int sh,
int sw, int nr_groups, Param param) {
const int batch = threadIdx.x + blockIdx.y * blockDim.x;
const int in_ch_idx = threadIdx.y + blockIdx.z * blockDim.y;
if (in_ch_idx >= param.ci || batch >= param.n)
return;
const int hw = blockIdx.x;
const int ih_idx = hw / param.wi;
const int iw_idx = hw - ih_idx * param.wi;
const int ho = param.grp_ho * param.sgh;
const int wo = param.grp_wo * param.sgw;
const int icpg = param.ci / nr_groups;
const int grp_sizes = param.grp_ho * param.grp_wo;
const int filter_sizes = fh * fw;
const int ch_filter_sizes = icpg * filter_sizes;
const int nr_elems_per_grp = param.n * grp_sizes * ch_filter_sizes;
const int ch_grp_idx = in_ch_idx / icpg;
const int grp_ch_idx = in_ch_idx - icpg * ch_grp_idx;
const T* __restrict__ col_ptr =
col +
ch_grp_idx * param.sgh * param.sgw * ch_filter_sizes * grp_sizes *
param.n // channel group stride
+ batch // batch stride
+
grp_ch_idx * filter_sizes * grp_sizes * param.n; // channel stride
T res(0);
for (int kh = 0; kh < fh; ++kh) {
uint32_t anchorh = ih_idx + param.ph - kh;
if (anchorh < ho * sh && anchorh % sh == 0) {
int oh_idx = anchorh / sh;
int sgh_idx = oh_idx / param.grp_ho;
int grp_oh_idx = oh_idx - sgh_idx * param.grp_ho;
for (int kw = 0; kw < fw; ++kw) {
uint32_t anchorw = iw_idx + param.pw - kw;
if (anchorw < wo * sw && anchorw % sw == 0) {
int ow_idx = anchorw / sw;
int sgw_idx = ow_idx / param.grp_wo;
int grp_ow_idx = ow_idx - sgw_idx * param.grp_wo;
const T* __restrict__ sptr =
col_ptr +
(sgh_idx * param.sgw + sgw_idx) *
nr_elems_per_grp // spatial group stride
+ (grp_oh_idx * param.grp_wo + grp_ow_idx) *
param.n // spatial stride
+ (kh * fw + kw) * grp_sizes * param.n;
res += sptr[0];
}
}
}
}
img[batch * param.ci * param.hi * param.wi +
in_ch_idx * param.hi * param.wi + ih_idx * param.wi + iw_idx] = res;
}
} // namespace
void megdnn::cuda::local_share::_do_local_share_im2col(
const float* d_im, float* d_col, int fh, int fw, int sh, int sw,
int nr_groups, const Param& param, cudaStream_t stream) {
void (*kern)(const float* __restrict__, float* __restrict__, int, int, int,
int, int, Param);
kern = local_share_im2col<float>;
constexpr int threads_x = 256;
uint32_t nr_threads =
_get_kern_block_size(reinterpret_cast<const void*>(kern));
uint32_t nr_threads_x = std::min(threads_x, param.ci);
uint32_t nr_threads_y =
std::min(static_cast<int>(nr_threads / nr_threads_x), param.n);
uint32_t nr_blocks_x = param.sgw * param.sgh * param.grp_ho * param.grp_wo,
nr_blocks_y = DIVUP(param.ci, nr_threads_x),
nr_blocks_z = DIVUP(param.n, nr_threads_y);
dim3 threads{nr_threads_x, nr_threads_y, 1};
dim3 blocks{nr_blocks_x, nr_blocks_y, nr_blocks_z};
kern<<<blocks, threads, 0, stream>>>(d_im, d_col, fh, fw, sh, sw, nr_groups,
param);
after_kernel_launch();
}
void megdnn::cuda::local_share::_do_local_share_col2im(
const float* d_col, float* d_im, int fh, int fw, int sh, int sw,
int nr_groups, const Param& param, cudaStream_t stream) {
void (*kern)(const float* __restrict__, float* __restrict__, int, int, int,
int, int, Param);
kern = local_share_col2im<float>;
constexpr int threads_x = 256;
uint32_t nr_threads =
_get_kern_block_size(reinterpret_cast<const void*>(kern));
uint32_t nr_threads_x = std::min(threads_x, param.n);
uint32_t nr_threads_y =
std::min(static_cast<int>(nr_threads / nr_threads_x), param.ci);
uint32_t nr_blocks_x = param.hi * param.wi,
nr_blocks_y = DIVUP(param.n, nr_threads_x),
nr_blocks_z = DIVUP(param.ci, nr_threads_y);
dim3 threads{nr_threads_x, nr_threads_y, 1};
dim3 blocks{nr_blocks_x, nr_blocks_y, nr_blocks_z};
kern<<<blocks, threads, 0, stream>>>(d_col, d_im, fh, fw, sh, sw, nr_groups,
param);
after_kernel_launch();
}
// vim: syntax=cuda.doxygen
|
5a4d40919e094b2cbe393b4c5d4fcf0417bd081b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
#include <string>
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 196608
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int warp_thread_id = threadIdx.x % 32;
__shared__ unsigned long long sdata[SHARED_MEM_ELEMENTS];
__shared__ void **tmp_ptr;
__shared__ void *arr[SHARED_MEM_ELEMENTS];
if (threadIdx.x == 0) {
for (i=0; i < SHARED_MEM_ELEMENTS; i++) {
arr[i] = (void *)&sdata[i];
}
for (i=0; i < (SHARED_MEM_ELEMENTS - 1); i++) {
sdata[i] = (unsigned long long)arr[i+1];
}
sdata[SHARED_MEM_ELEMENTS - 1] = (unsigned long long) arr[0];
}
__syncthreads();
tmp_ptr = (void **)(&(arr[(threadIdx.x + stride)%SHARED_MEM_ELEMENTS]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
// init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
// hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
| 5a4d40919e094b2cbe393b4c5d4fcf0417bd081b.cu | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#include <string>
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 196608
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int warp_thread_id = threadIdx.x % 32;
__shared__ unsigned long long sdata[SHARED_MEM_ELEMENTS];
__shared__ void **tmp_ptr;
__shared__ void *arr[SHARED_MEM_ELEMENTS];
if (threadIdx.x == 0) {
for (i=0; i < SHARED_MEM_ELEMENTS; i++) {
arr[i] = (void *)&sdata[i];
}
for (i=0; i < (SHARED_MEM_ELEMENTS - 1); i++) {
sdata[i] = (unsigned long long)arr[i+1];
}
sdata[SHARED_MEM_ELEMENTS - 1] = (unsigned long long) arr[0];
}
__syncthreads();
tmp_ptr = (void **)(&(arr[(threadIdx.x + stride)%SHARED_MEM_ELEMENTS]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
// init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
// cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
ddf99d3c607b95355c1f2fce6767aeaecc1c9c86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/shuffle_channel_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaximumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaximumNumBlocks);
}
template <typename T>
__global__ void ShuffleChannel(const int nthreads,
const int feature_map_size,
T* output,
const T* input,
int group_row,
int group_column,
int len) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t ii = index; ii < nthreads; ii += offset) {
const int n = index / group_row / group_column / len;
const int i = (index / group_column / len) % group_row;
const int j = index / len % group_column;
const int k = index - (n * feature_map_size + (i * group_column + j) * len);
T* p_o = output + n * feature_map_size + (j * group_row + i) * len;
p_o[k] = input[index];
}
}
template <typename DeviceContext, typename T>
class ShuffleChannelOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<framework::Tensor>("X");
auto* output = ctx.Output<framework::Tensor>("Out");
int group = ctx.Attr<int>("group");
auto input_dims = input->dims();
auto num = input_dims[0];
auto channel = input_dims[1];
auto height = input_dims[2];
auto weight = input_dims[3];
auto feature_map_size = channel * height * weight;
auto sp_sz = height * weight;
int group_row = group;
int group_column = channel / group_row;
// count is the product of NCHW same as numel()
int count = num * group_column * group_row * sp_sz;
int blocks = NumBlocks(output->numel());
int threads = kNumCUDAThreads;
const T* input_data = input->data<T>();
T* output_data = output->mutable_data<T>(ctx.GetPlace());
hipLaunchKernelGGL(( ShuffleChannel<T>)
, dim3(blocks), dim3(threads), 0, ctx.cuda_device_context().stream(),
count,
feature_map_size,
output_data,
input_data,
group_row,
group_column,
sp_sz);
}
};
template <typename DeviceContext, typename T>
class ShuffleChannelGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* output_grad =
ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* input_grad =
ctx.Output<framework::Tensor>(framework::GradVarName("X"));
int group = ctx.Attr<int>("group");
const auto& input_dims = input_grad->dims();
auto num = input_dims[0];
auto channel = input_dims[1];
auto height = input_dims[2];
auto weight = input_dims[3];
auto feature_map_size = channel * height * weight;
auto sp_sz = height * weight;
int group_row = group;
int group_column = channel / group_row;
T* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
const T* output_grad_data = output_grad->data<T>();
int blocks = NumBlocks(output_grad->numel());
int threads = kNumCUDAThreads;
int count = num * group_column * group_row * sp_sz;
hipLaunchKernelGGL(( ShuffleChannel<T>)
, dim3(blocks), dim3(threads), 0, ctx.cuda_device_context().stream(),
count,
feature_map_size,
input_grad_data,
output_grad_data,
group_row,
group_column,
sp_sz);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
shuffle_channel,
ops::ShuffleChannelOpCUDAKernel<phi::GPUContext, float>,
ops::ShuffleChannelOpCUDAKernel<phi::GPUContext, double>);
REGISTER_OP_CUDA_KERNEL(
shuffle_channel_grad,
ops::ShuffleChannelGradOpCUDAKernel<phi::GPUContext, float>,
ops::ShuffleChannelGradOpCUDAKernel<phi::GPUContext, double>);
| ddf99d3c607b95355c1f2fce6767aeaecc1c9c86.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/shuffle_channel_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaximumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaximumNumBlocks);
}
template <typename T>
__global__ void ShuffleChannel(const int nthreads,
const int feature_map_size,
T* output,
const T* input,
int group_row,
int group_column,
int len) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t ii = index; ii < nthreads; ii += offset) {
const int n = index / group_row / group_column / len;
const int i = (index / group_column / len) % group_row;
const int j = index / len % group_column;
const int k = index - (n * feature_map_size + (i * group_column + j) * len);
T* p_o = output + n * feature_map_size + (j * group_row + i) * len;
p_o[k] = input[index];
}
}
template <typename DeviceContext, typename T>
class ShuffleChannelOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<framework::Tensor>("X");
auto* output = ctx.Output<framework::Tensor>("Out");
int group = ctx.Attr<int>("group");
auto input_dims = input->dims();
auto num = input_dims[0];
auto channel = input_dims[1];
auto height = input_dims[2];
auto weight = input_dims[3];
auto feature_map_size = channel * height * weight;
auto sp_sz = height * weight;
int group_row = group;
int group_column = channel / group_row;
// count is the product of NCHW same as numel()
int count = num * group_column * group_row * sp_sz;
int blocks = NumBlocks(output->numel());
int threads = kNumCUDAThreads;
const T* input_data = input->data<T>();
T* output_data = output->mutable_data<T>(ctx.GetPlace());
ShuffleChannel<T>
<<<blocks, threads, 0, ctx.cuda_device_context().stream()>>>(
count,
feature_map_size,
output_data,
input_data,
group_row,
group_column,
sp_sz);
}
};
template <typename DeviceContext, typename T>
class ShuffleChannelGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* output_grad =
ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* input_grad =
ctx.Output<framework::Tensor>(framework::GradVarName("X"));
int group = ctx.Attr<int>("group");
const auto& input_dims = input_grad->dims();
auto num = input_dims[0];
auto channel = input_dims[1];
auto height = input_dims[2];
auto weight = input_dims[3];
auto feature_map_size = channel * height * weight;
auto sp_sz = height * weight;
int group_row = group;
int group_column = channel / group_row;
T* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
const T* output_grad_data = output_grad->data<T>();
int blocks = NumBlocks(output_grad->numel());
int threads = kNumCUDAThreads;
int count = num * group_column * group_row * sp_sz;
ShuffleChannel<T>
<<<blocks, threads, 0, ctx.cuda_device_context().stream()>>>(
count,
feature_map_size,
input_grad_data,
output_grad_data,
group_row,
group_column,
sp_sz);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
shuffle_channel,
ops::ShuffleChannelOpCUDAKernel<phi::GPUContext, float>,
ops::ShuffleChannelOpCUDAKernel<phi::GPUContext, double>);
REGISTER_OP_CUDA_KERNEL(
shuffle_channel_grad,
ops::ShuffleChannelGradOpCUDAKernel<phi::GPUContext, float>,
ops::ShuffleChannelGradOpCUDAKernel<phi::GPUContext, double>);
|
3a14c3bfe509b6be2e7f1e9df8480ee4c3e9721f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_common.hpp"
#include "orc_gpu.hpp"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
constexpr unsigned int init_threads_per_group = 32;
constexpr unsigned int init_groups_per_block = 4;
constexpr unsigned int init_threads_per_block = init_threads_per_group * init_groups_per_block;
__global__ void __launch_bounds__(init_threads_per_block)
gpu_init_statistics_groups(statistics_group* groups,
const stats_column_desc* cols,
device_2dspan<rowgroup_rows const> rowgroup_bounds)
{
__shared__ __align__(4) statistics_group group_g[init_groups_per_block];
uint32_t const col_id = blockIdx.y;
uint32_t const chunk_id = (blockIdx.x * init_groups_per_block) + threadIdx.y;
uint32_t const t = threadIdx.x;
auto const num_rowgroups = rowgroup_bounds.size().first;
statistics_group* group = &group_g[threadIdx.y];
if (chunk_id < num_rowgroups and t == 0) {
group->col = &cols[col_id];
group->start_row = rowgroup_bounds[chunk_id][col_id].begin;
group->num_rows = rowgroup_bounds[chunk_id][col_id].size();
groups[col_id * num_rowgroups + chunk_id] = *group;
}
}
/**
* @brief Get the buffer size and offsets of encoded statistics
*
* @param[in,out] groups Statistics merge groups
* @param[in] statistics_count Number of statistics buffers
*/
constexpr unsigned int buffersize_reduction_dim = 32;
constexpr unsigned int block_size = buffersize_reduction_dim * buffersize_reduction_dim;
constexpr unsigned int pb_fld_hdrlen = 1;
constexpr unsigned int pb_fld_hdrlen16 = 2; // > 127-byte length
constexpr unsigned int pb_fld_hdrlen32 = 5; // > 16KB length
constexpr unsigned int pb_fldlen_int64 = 10;
constexpr unsigned int pb_fldlen_float64 = 8;
constexpr unsigned int pb_fldlen_decimal = 40; // Assume decimal2string fits in 40 characters
constexpr unsigned int pb_fldlen_bucket1 = 1 + pb_fldlen_int64;
constexpr unsigned int pb_fldlen_common = 2 * pb_fld_hdrlen + pb_fldlen_int64;
template <unsigned int block_size>
__global__ void __launch_bounds__(block_size, 1)
gpu_init_statistics_buffersize(statistics_merge_group* groups,
const statistics_chunk* chunks,
uint32_t statistics_count)
{
using block_scan = hipcub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>;
__shared__ typename block_scan::TempStorage temp_storage;
volatile uint32_t stats_size = 0;
uint32_t t = threadIdx.x;
__syncthreads();
for (uint32_t start = 0; start < statistics_count; start += block_size) {
uint32_t stats_len = 0, stats_pos;
uint32_t idx = start + t;
if (idx < statistics_count) {
statistics_dtype const dtype = groups[idx].stats_dtype;
switch (dtype) {
case dtype_bool: stats_len = pb_fldlen_common + pb_fld_hdrlen + pb_fldlen_bucket1; break;
case dtype_int8:
case dtype_int16:
case dtype_int32:
case dtype_date32:
case dtype_int64:
case dtype_timestamp64:
stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_int64);
break;
case dtype_float32:
case dtype_float64:
stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_float64);
break;
case dtype_decimal64:
case dtype_decimal128:
stats_len = pb_fldlen_common + pb_fld_hdrlen16 + 3 * (pb_fld_hdrlen + pb_fldlen_decimal);
break;
case dtype_string:
stats_len = pb_fldlen_common + pb_fld_hdrlen32 + 3 * (pb_fld_hdrlen + pb_fldlen_int64) +
chunks[idx].min_value.str_val.length + chunks[idx].max_value.str_val.length;
break;
case dtype_none: stats_len = pb_fldlen_common;
default: break;
}
}
uint32_t tmp_stats_size;
block_scan(temp_storage).ExclusiveSum(stats_len, stats_pos, tmp_stats_size);
stats_pos += stats_size;
stats_size += tmp_stats_size;
if (idx < statistics_count) {
groups[idx].start_chunk = stats_pos;
groups[idx].num_chunks = stats_len;
}
__syncthreads();
}
}
struct stats_state_s {
uint8_t* base; ///< Output buffer start
uint8_t* end; ///< Output buffer end
statistics_chunk chunk;
statistics_merge_group group;
statistics_dtype stats_dtype; //!< Statistics data type for this column
// ORC stats
uint64_t numberOfValues;
uint8_t hasNull;
};
/*
* Protobuf encoding - see
* https://developers.google.com/protocol-buffers/docs/encoding
*/
// Protobuf varint encoding for unsigned int
__device__ inline uint8_t* pb_encode_uint(uint8_t* p, uint64_t v)
{
while (v > 0x7f) {
*p++ = ((uint32_t)v | 0x80);
v >>= 7;
}
*p++ = v;
return p;
}
// Protobuf field encoding for unsigned int
__device__ inline uint8_t* pb_put_uint(uint8_t* p, uint32_t id, uint64_t v)
{
p[0] = id * 8 + static_cast<ProtofType>(ProtofType::VARINT); // NOTE: Assumes id < 16
return pb_encode_uint(p + 1, v);
}
// Protobuf field encoding for signed int
__device__ inline uint8_t* pb_put_int(uint8_t* p, uint32_t id, int64_t v)
{
int64_t s = (v < 0);
return pb_put_uint(p, id, (v ^ -s) * 2 + s);
}
// Protobuf field encoding for 'packed' unsigned int (single value)
__device__ inline uint8_t* pb_put_packed_uint(uint8_t* p, uint32_t id, uint64_t v)
{
uint8_t* p2 = pb_encode_uint(p + 2, v);
p[0] = id * 8 + ProtofType::FIXEDLEN;
p[1] = static_cast<uint8_t>(p2 - (p + 2));
return p2;
}
// Protobuf field encoding for binary/string
__device__ inline uint8_t* pb_put_binary(uint8_t* p, uint32_t id, const void* bytes, uint32_t len)
{
p[0] = id * 8 + ProtofType::FIXEDLEN;
p = pb_encode_uint(p + 1, len);
memcpy(p, bytes, len);
return p + len;
}
// Protobuf field encoding for 64-bit raw encoding (double)
__device__ inline uint8_t* pb_put_fixed64(uint8_t* p, uint32_t id, const void* raw64)
{
p[0] = id * 8 + ProtofType::FIXED64;
memcpy(p + 1, raw64, 8);
return p + 9;
}
/**
* @brief Encode statistics in ORC protobuf format
*
* @param[in,out] groups Statistics merge groups
* @param[in,out] chunks Statistics data
* @param[in] statistics_count Number of statistics buffers
*
* ORC statistics format from https://orc.apache.org/specification/ORCv1/
*
* message ColumnStatistics {
* // the number of values
* optional uint64 numberOfValues = 1;
* // At most one of these has a value for any column
* optional IntegerStatistics intStatistics = 2;
* optional DoubleStatistics doubleStatistics = 3;
* optional StringStatistics stringStatistics = 4;
* optional BucketStatistics bucketStatistics = 5;
* optional DecimalStatistics decimalStatistics = 6;
* optional DateStatistics dateStatistics = 7;
* optional BinaryStatistics binaryStatistics = 8;
* optional TimestampStatistics timestampStatistics = 9;
* optional bool hasNull = 10;
* }
*/
constexpr unsigned int encode_threads_per_chunk = 32;
constexpr unsigned int encode_chunks_per_block = 4;
constexpr unsigned int encode_threads_per_block =
encode_threads_per_chunk * encode_chunks_per_block;
__global__ void __launch_bounds__(encode_threads_per_block)
gpu_encode_statistics(uint8_t* blob_bfr,
statistics_merge_group* groups,
const statistics_chunk* chunks,
uint32_t statistics_count)
{
__shared__ __align__(8) stats_state_s state_g[encode_chunks_per_block];
uint32_t t = threadIdx.x;
uint32_t idx = blockIdx.x * encode_chunks_per_block + threadIdx.y;
stats_state_s* const s = &state_g[threadIdx.y];
// Encode and update actual bfr size
if (idx < statistics_count && t == 0) {
s->chunk = chunks[idx];
s->group = groups[idx];
s->stats_dtype = s->group.stats_dtype;
s->base = blob_bfr + s->group.start_chunk;
s->end = blob_bfr + s->group.start_chunk + s->group.num_chunks;
uint8_t* cur = pb_put_uint(s->base, 1, s->chunk.non_nulls);
uint8_t* fld_start = cur;
switch (s->stats_dtype) {
case dtype_int8:
case dtype_int16:
case dtype_int32:
case dtype_int64:
// intStatistics = 2
// message IntegerStatistics {
// optional sint64 minimum = 1;
// optional sint64 maximum = 2;
// optional sint64 sum = 3;
// }
if (s->chunk.has_minmax || s->chunk.has_sum) {
*cur = 2 * 8 + ProtofType::FIXEDLEN;
cur += 2;
if (s->chunk.has_minmax) {
cur = pb_put_int(cur, 1, s->chunk.min_value.i_val);
cur = pb_put_int(cur, 2, s->chunk.max_value.i_val);
}
if (s->chunk.has_sum) { cur = pb_put_int(cur, 3, s->chunk.sum.i_val); }
fld_start[1] = cur - (fld_start + 2);
}
break;
case dtype_float32:
case dtype_float64:
// doubleStatistics = 3
// message DoubleStatistics {
// optional double minimum = 1;
// optional double maximum = 2;
// optional double sum = 3;
// }
if (s->chunk.has_minmax) {
*cur = 3 * 8 + ProtofType::FIXEDLEN;
cur += 2;
cur = pb_put_fixed64(cur, 1, &s->chunk.min_value.fp_val);
cur = pb_put_fixed64(cur, 2, &s->chunk.max_value.fp_val);
fld_start[1] = cur - (fld_start + 2);
}
break;
case dtype_string:
// stringStatistics = 4
// message StringStatistics {
// optional string minimum = 1;
// optional string maximum = 2;
// optional sint64 sum = 3; // sum will store the total length of all strings
// }
if (s->chunk.has_minmax && s->chunk.has_sum) {
uint32_t sz = (pb_put_uint(cur, 3, s->chunk.sum.i_val) - cur) +
(pb_put_uint(cur, 1, s->chunk.min_value.str_val.length) - cur) +
(pb_put_uint(cur, 2, s->chunk.max_value.str_val.length) - cur) +
s->chunk.min_value.str_val.length + s->chunk.max_value.str_val.length;
cur[0] = 4 * 8 + ProtofType::FIXEDLEN;
cur = pb_encode_uint(cur + 1, sz);
cur = pb_put_binary(
cur, 1, s->chunk.min_value.str_val.ptr, s->chunk.min_value.str_val.length);
cur = pb_put_binary(
cur, 2, s->chunk.max_value.str_val.ptr, s->chunk.max_value.str_val.length);
cur = pb_put_uint(cur, 3, s->chunk.sum.i_val);
}
break;
case dtype_bool:
// bucketStatistics = 5
// message BucketStatistics {
// repeated uint64 count = 1 [packed=true];
// }
if (s->chunk.has_sum) { // Sum is equal to the number of 'true' values
cur[0] = 5 * 8 + ProtofType::FIXEDLEN;
cur = pb_put_packed_uint(cur + 2, 1, s->chunk.sum.u_val);
fld_start[1] = cur - (fld_start + 2);
}
break;
case dtype_decimal64:
case dtype_decimal128:
// decimalStatistics = 6
// message DecimalStatistics {
// optional string minimum = 1;
// optional string maximum = 2;
// optional string sum = 3;
// }
if (s->chunk.has_minmax) {
// TODO: Decimal support (decimal min/max stored as strings)
}
break;
case dtype_date32:
// dateStatistics = 7
// message DateStatistics { // min,max values saved as days since epoch
// optional sint32 minimum = 1;
// optional sint32 maximum = 2;
// }
if (s->chunk.has_minmax) {
cur[0] = 7 * 8 + ProtofType::FIXEDLEN;
cur += 2;
cur = pb_put_int(cur, 1, s->chunk.min_value.i_val);
cur = pb_put_int(cur, 2, s->chunk.max_value.i_val);
fld_start[1] = cur - (fld_start + 2);
}
break;
case dtype_timestamp64:
// timestampStatistics = 9
// message TimestampStatistics {
// optional sint64 minimum = 1; // min,max values saved as milliseconds since epoch
// optional sint64 maximum = 2;
// optional sint64 minimumUtc = 3; // min,max values saved as milliseconds since UNIX epoch
// optional sint64 maximumUtc = 4;
// }
if (s->chunk.has_minmax) {
cur[0] = 9 * 8 + ProtofType::FIXEDLEN;
cur += 2;
cur = pb_put_int(cur, 3, s->chunk.min_value.i_val); // minimumUtc
cur = pb_put_int(cur, 4, s->chunk.max_value.i_val); // maximumUtc
fld_start[1] = cur - (fld_start + 2);
}
break;
default: break;
}
groups[idx].num_chunks = static_cast<uint32_t>(cur - s->base);
}
}
void orc_init_statistics_groups(statistics_group* groups,
const stats_column_desc* cols,
device_2dspan<rowgroup_rows const> rowgroup_bounds,
rmm::cuda_stream_view stream)
{
dim3 dim_grid((rowgroup_bounds.size().first + init_groups_per_block - 1) / init_groups_per_block,
rowgroup_bounds.size().second);
dim3 dim_block(init_threads_per_group, init_groups_per_block);
hipLaunchKernelGGL(( gpu_init_statistics_groups), dim3(dim_grid), dim3(dim_block), 0, stream.value(),
groups, cols, rowgroup_bounds);
}
/**
* @brief Launches kernels to return statistics buffer offsets and sizes
*
* @param[in,out] groups Statistics merge groups
* @param[in] chunks Statistics chunks
* @param[in] statistics_count Number of statistics buffers to encode
* @param[in] stream CUDA stream used for device memory operations and kernel launches
*/
void orc_init_statistics_buffersize(statistics_merge_group* groups,
const statistics_chunk* chunks,
uint32_t statistics_count,
rmm::cuda_stream_view stream)
{
hipLaunchKernelGGL(( gpu_init_statistics_buffersize<block_size>)
, dim3(1), dim3(block_size), 0, stream.value(), groups, chunks, statistics_count);
}
/**
* @brief Launches kernel to encode statistics in ORC protobuf format
*
* @param[out] blob_bfr Output buffer for statistics blobs
* @param[in,out] groups Statistics merge groups
* @param[in,out] chunks Statistics data
* @param[in] statistics_count Number of statistics buffers
*/
void orc_encode_statistics(uint8_t* blob_bfr,
statistics_merge_group* groups,
const statistics_chunk* chunks,
uint32_t statistics_count,
rmm::cuda_stream_view stream)
{
unsigned int num_blocks =
(statistics_count + encode_chunks_per_block - 1) / encode_chunks_per_block;
dim3 dim_block(encode_threads_per_chunk, encode_chunks_per_block);
hipLaunchKernelGGL(( gpu_encode_statistics), dim3(num_blocks), dim3(dim_block), 0, stream.value(),
blob_bfr, groups, chunks, statistics_count);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
| 3a14c3bfe509b6be2e7f1e9df8480ee4c3e9721f.cu | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_common.hpp"
#include "orc_gpu.hpp"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
constexpr unsigned int init_threads_per_group = 32;
constexpr unsigned int init_groups_per_block = 4;
constexpr unsigned int init_threads_per_block = init_threads_per_group * init_groups_per_block;
__global__ void __launch_bounds__(init_threads_per_block)
gpu_init_statistics_groups(statistics_group* groups,
const stats_column_desc* cols,
device_2dspan<rowgroup_rows const> rowgroup_bounds)
{
__shared__ __align__(4) statistics_group group_g[init_groups_per_block];
uint32_t const col_id = blockIdx.y;
uint32_t const chunk_id = (blockIdx.x * init_groups_per_block) + threadIdx.y;
uint32_t const t = threadIdx.x;
auto const num_rowgroups = rowgroup_bounds.size().first;
statistics_group* group = &group_g[threadIdx.y];
if (chunk_id < num_rowgroups and t == 0) {
group->col = &cols[col_id];
group->start_row = rowgroup_bounds[chunk_id][col_id].begin;
group->num_rows = rowgroup_bounds[chunk_id][col_id].size();
groups[col_id * num_rowgroups + chunk_id] = *group;
}
}
/**
* @brief Get the buffer size and offsets of encoded statistics
*
* @param[in,out] groups Statistics merge groups
* @param[in] statistics_count Number of statistics buffers
*/
constexpr unsigned int buffersize_reduction_dim = 32;
constexpr unsigned int block_size = buffersize_reduction_dim * buffersize_reduction_dim;
constexpr unsigned int pb_fld_hdrlen = 1;
constexpr unsigned int pb_fld_hdrlen16 = 2; // > 127-byte length
constexpr unsigned int pb_fld_hdrlen32 = 5; // > 16KB length
constexpr unsigned int pb_fldlen_int64 = 10;
constexpr unsigned int pb_fldlen_float64 = 8;
constexpr unsigned int pb_fldlen_decimal = 40; // Assume decimal2string fits in 40 characters
constexpr unsigned int pb_fldlen_bucket1 = 1 + pb_fldlen_int64;
constexpr unsigned int pb_fldlen_common = 2 * pb_fld_hdrlen + pb_fldlen_int64;
template <unsigned int block_size>
__global__ void __launch_bounds__(block_size, 1)
gpu_init_statistics_buffersize(statistics_merge_group* groups,
const statistics_chunk* chunks,
uint32_t statistics_count)
{
using block_scan = cub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>;
__shared__ typename block_scan::TempStorage temp_storage;
volatile uint32_t stats_size = 0;
uint32_t t = threadIdx.x;
__syncthreads();
for (uint32_t start = 0; start < statistics_count; start += block_size) {
uint32_t stats_len = 0, stats_pos;
uint32_t idx = start + t;
if (idx < statistics_count) {
statistics_dtype const dtype = groups[idx].stats_dtype;
switch (dtype) {
case dtype_bool: stats_len = pb_fldlen_common + pb_fld_hdrlen + pb_fldlen_bucket1; break;
case dtype_int8:
case dtype_int16:
case dtype_int32:
case dtype_date32:
case dtype_int64:
case dtype_timestamp64:
stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_int64);
break;
case dtype_float32:
case dtype_float64:
stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_float64);
break;
case dtype_decimal64:
case dtype_decimal128:
stats_len = pb_fldlen_common + pb_fld_hdrlen16 + 3 * (pb_fld_hdrlen + pb_fldlen_decimal);
break;
case dtype_string:
stats_len = pb_fldlen_common + pb_fld_hdrlen32 + 3 * (pb_fld_hdrlen + pb_fldlen_int64) +
chunks[idx].min_value.str_val.length + chunks[idx].max_value.str_val.length;
break;
case dtype_none: stats_len = pb_fldlen_common;
default: break;
}
}
uint32_t tmp_stats_size;
block_scan(temp_storage).ExclusiveSum(stats_len, stats_pos, tmp_stats_size);
stats_pos += stats_size;
stats_size += tmp_stats_size;
if (idx < statistics_count) {
groups[idx].start_chunk = stats_pos;
groups[idx].num_chunks = stats_len;
}
__syncthreads();
}
}
struct stats_state_s {
uint8_t* base; ///< Output buffer start
uint8_t* end; ///< Output buffer end
statistics_chunk chunk;
statistics_merge_group group;
statistics_dtype stats_dtype; //!< Statistics data type for this column
// ORC stats
uint64_t numberOfValues;
uint8_t hasNull;
};
/*
* Protobuf encoding - see
* https://developers.google.com/protocol-buffers/docs/encoding
*/
// Protobuf varint encoding for unsigned int
__device__ inline uint8_t* pb_encode_uint(uint8_t* p, uint64_t v)
{
while (v > 0x7f) {
*p++ = ((uint32_t)v | 0x80);
v >>= 7;
}
*p++ = v;
return p;
}
// Protobuf field encoding for unsigned int
__device__ inline uint8_t* pb_put_uint(uint8_t* p, uint32_t id, uint64_t v)
{
p[0] = id * 8 + static_cast<ProtofType>(ProtofType::VARINT); // NOTE: Assumes id < 16
return pb_encode_uint(p + 1, v);
}
// Protobuf field encoding for signed int
__device__ inline uint8_t* pb_put_int(uint8_t* p, uint32_t id, int64_t v)
{
int64_t s = (v < 0);
return pb_put_uint(p, id, (v ^ -s) * 2 + s);
}
// Protobuf field encoding for 'packed' unsigned int (single value)
__device__ inline uint8_t* pb_put_packed_uint(uint8_t* p, uint32_t id, uint64_t v)
{
uint8_t* p2 = pb_encode_uint(p + 2, v);
p[0] = id * 8 + ProtofType::FIXEDLEN;
p[1] = static_cast<uint8_t>(p2 - (p + 2));
return p2;
}
// Protobuf field encoding for binary/string
__device__ inline uint8_t* pb_put_binary(uint8_t* p, uint32_t id, const void* bytes, uint32_t len)
{
p[0] = id * 8 + ProtofType::FIXEDLEN;
p = pb_encode_uint(p + 1, len);
memcpy(p, bytes, len);
return p + len;
}
// Protobuf field encoding for 64-bit raw encoding (double)
__device__ inline uint8_t* pb_put_fixed64(uint8_t* p, uint32_t id, const void* raw64)
{
p[0] = id * 8 + ProtofType::FIXED64;
memcpy(p + 1, raw64, 8);
return p + 9;
}
/**
* @brief Encode statistics in ORC protobuf format
*
* @param[in,out] groups Statistics merge groups
* @param[in,out] chunks Statistics data
* @param[in] statistics_count Number of statistics buffers
*
* ORC statistics format from https://orc.apache.org/specification/ORCv1/
*
* message ColumnStatistics {
* // the number of values
* optional uint64 numberOfValues = 1;
* // At most one of these has a value for any column
* optional IntegerStatistics intStatistics = 2;
* optional DoubleStatistics doubleStatistics = 3;
* optional StringStatistics stringStatistics = 4;
* optional BucketStatistics bucketStatistics = 5;
* optional DecimalStatistics decimalStatistics = 6;
* optional DateStatistics dateStatistics = 7;
* optional BinaryStatistics binaryStatistics = 8;
* optional TimestampStatistics timestampStatistics = 9;
* optional bool hasNull = 10;
* }
*/
constexpr unsigned int encode_threads_per_chunk = 32;
constexpr unsigned int encode_chunks_per_block = 4;
constexpr unsigned int encode_threads_per_block =
encode_threads_per_chunk * encode_chunks_per_block;
__global__ void __launch_bounds__(encode_threads_per_block)
gpu_encode_statistics(uint8_t* blob_bfr,
statistics_merge_group* groups,
const statistics_chunk* chunks,
uint32_t statistics_count)
{
__shared__ __align__(8) stats_state_s state_g[encode_chunks_per_block];
uint32_t t = threadIdx.x;
uint32_t idx = blockIdx.x * encode_chunks_per_block + threadIdx.y;
stats_state_s* const s = &state_g[threadIdx.y];
// Encode and update actual bfr size
if (idx < statistics_count && t == 0) {
s->chunk = chunks[idx];
s->group = groups[idx];
s->stats_dtype = s->group.stats_dtype;
s->base = blob_bfr + s->group.start_chunk;
s->end = blob_bfr + s->group.start_chunk + s->group.num_chunks;
uint8_t* cur = pb_put_uint(s->base, 1, s->chunk.non_nulls);
uint8_t* fld_start = cur;
switch (s->stats_dtype) {
case dtype_int8:
case dtype_int16:
case dtype_int32:
case dtype_int64:
// intStatistics = 2
// message IntegerStatistics {
// optional sint64 minimum = 1;
// optional sint64 maximum = 2;
// optional sint64 sum = 3;
// }
if (s->chunk.has_minmax || s->chunk.has_sum) {
*cur = 2 * 8 + ProtofType::FIXEDLEN;
cur += 2;
if (s->chunk.has_minmax) {
cur = pb_put_int(cur, 1, s->chunk.min_value.i_val);
cur = pb_put_int(cur, 2, s->chunk.max_value.i_val);
}
if (s->chunk.has_sum) { cur = pb_put_int(cur, 3, s->chunk.sum.i_val); }
fld_start[1] = cur - (fld_start + 2);
}
break;
case dtype_float32:
case dtype_float64:
// doubleStatistics = 3
// message DoubleStatistics {
// optional double minimum = 1;
// optional double maximum = 2;
// optional double sum = 3;
// }
if (s->chunk.has_minmax) {
*cur = 3 * 8 + ProtofType::FIXEDLEN;
cur += 2;
cur = pb_put_fixed64(cur, 1, &s->chunk.min_value.fp_val);
cur = pb_put_fixed64(cur, 2, &s->chunk.max_value.fp_val);
fld_start[1] = cur - (fld_start + 2);
}
break;
case dtype_string:
// stringStatistics = 4
// message StringStatistics {
// optional string minimum = 1;
// optional string maximum = 2;
// optional sint64 sum = 3; // sum will store the total length of all strings
// }
if (s->chunk.has_minmax && s->chunk.has_sum) {
uint32_t sz = (pb_put_uint(cur, 3, s->chunk.sum.i_val) - cur) +
(pb_put_uint(cur, 1, s->chunk.min_value.str_val.length) - cur) +
(pb_put_uint(cur, 2, s->chunk.max_value.str_val.length) - cur) +
s->chunk.min_value.str_val.length + s->chunk.max_value.str_val.length;
cur[0] = 4 * 8 + ProtofType::FIXEDLEN;
cur = pb_encode_uint(cur + 1, sz);
cur = pb_put_binary(
cur, 1, s->chunk.min_value.str_val.ptr, s->chunk.min_value.str_val.length);
cur = pb_put_binary(
cur, 2, s->chunk.max_value.str_val.ptr, s->chunk.max_value.str_val.length);
cur = pb_put_uint(cur, 3, s->chunk.sum.i_val);
}
break;
case dtype_bool:
// bucketStatistics = 5
// message BucketStatistics {
// repeated uint64 count = 1 [packed=true];
// }
if (s->chunk.has_sum) { // Sum is equal to the number of 'true' values
cur[0] = 5 * 8 + ProtofType::FIXEDLEN;
cur = pb_put_packed_uint(cur + 2, 1, s->chunk.sum.u_val);
fld_start[1] = cur - (fld_start + 2);
}
break;
case dtype_decimal64:
case dtype_decimal128:
// decimalStatistics = 6
// message DecimalStatistics {
// optional string minimum = 1;
// optional string maximum = 2;
// optional string sum = 3;
// }
if (s->chunk.has_minmax) {
// TODO: Decimal support (decimal min/max stored as strings)
}
break;
case dtype_date32:
// dateStatistics = 7
// message DateStatistics { // min,max values saved as days since epoch
// optional sint32 minimum = 1;
// optional sint32 maximum = 2;
// }
if (s->chunk.has_minmax) {
cur[0] = 7 * 8 + ProtofType::FIXEDLEN;
cur += 2;
cur = pb_put_int(cur, 1, s->chunk.min_value.i_val);
cur = pb_put_int(cur, 2, s->chunk.max_value.i_val);
fld_start[1] = cur - (fld_start + 2);
}
break;
case dtype_timestamp64:
// timestampStatistics = 9
// message TimestampStatistics {
// optional sint64 minimum = 1; // min,max values saved as milliseconds since epoch
// optional sint64 maximum = 2;
// optional sint64 minimumUtc = 3; // min,max values saved as milliseconds since UNIX epoch
// optional sint64 maximumUtc = 4;
// }
if (s->chunk.has_minmax) {
cur[0] = 9 * 8 + ProtofType::FIXEDLEN;
cur += 2;
cur = pb_put_int(cur, 3, s->chunk.min_value.i_val); // minimumUtc
cur = pb_put_int(cur, 4, s->chunk.max_value.i_val); // maximumUtc
fld_start[1] = cur - (fld_start + 2);
}
break;
default: break;
}
groups[idx].num_chunks = static_cast<uint32_t>(cur - s->base);
}
}
void orc_init_statistics_groups(statistics_group* groups,
const stats_column_desc* cols,
device_2dspan<rowgroup_rows const> rowgroup_bounds,
rmm::cuda_stream_view stream)
{
dim3 dim_grid((rowgroup_bounds.size().first + init_groups_per_block - 1) / init_groups_per_block,
rowgroup_bounds.size().second);
dim3 dim_block(init_threads_per_group, init_groups_per_block);
gpu_init_statistics_groups<<<dim_grid, dim_block, 0, stream.value()>>>(
groups, cols, rowgroup_bounds);
}
/**
* @brief Launches kernels to return statistics buffer offsets and sizes
*
* @param[in,out] groups Statistics merge groups
* @param[in] chunks Statistics chunks
* @param[in] statistics_count Number of statistics buffers to encode
* @param[in] stream CUDA stream used for device memory operations and kernel launches
*/
void orc_init_statistics_buffersize(statistics_merge_group* groups,
const statistics_chunk* chunks,
uint32_t statistics_count,
rmm::cuda_stream_view stream)
{
gpu_init_statistics_buffersize<block_size>
<<<1, block_size, 0, stream.value()>>>(groups, chunks, statistics_count);
}
/**
* @brief Launches kernel to encode statistics in ORC protobuf format
*
* @param[out] blob_bfr Output buffer for statistics blobs
* @param[in,out] groups Statistics merge groups
* @param[in,out] chunks Statistics data
* @param[in] statistics_count Number of statistics buffers
*/
void orc_encode_statistics(uint8_t* blob_bfr,
statistics_merge_group* groups,
const statistics_chunk* chunks,
uint32_t statistics_count,
rmm::cuda_stream_view stream)
{
unsigned int num_blocks =
(statistics_count + encode_chunks_per_block - 1) / encode_chunks_per_block;
dim3 dim_block(encode_threads_per_chunk, encode_chunks_per_block);
gpu_encode_statistics<<<num_blocks, dim_block, 0, stream.value()>>>(
blob_bfr, groups, chunks, statistics_count);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
df3c72a8fa2fe405281cdf194310c3380ccbb8f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/ztrtri_diag_batched.cu, normal z -> c, Wed Jan 2 14:18:51 2019
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named ctrtri_diag.cu to avoid name conflict with src/ctrtri.o
in the library. The actual kernels are in ctrtri_lower.cu and ctrtri_upper.cu
*/
#include "magma_internal.h"
#define TRTRI_BATCHED
#include "ctrtri.cuh"
/***************************************************************************//**
Purpose
-------
CTRTRI_DIAG inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in ctrsm.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array A of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = MagmaUpper, the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = MagmaLower, the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = MagmaUnit, the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of each array A. LDDA >= max(1,N).
@param[out]
dinvA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array dinvA of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
resetozero INTEGER
If not zero, each array dinvA will be reset to all zeros
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trtri_diag_batched
*******************************************************************************/
extern "C" void
magmablas_ctrtri_diag_batched(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
magmaFloatComplex const * const *dA_array, magma_int_t ldda,
magmaFloatComplex **dinvA_array,
magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = magma_ceildiv( n, IB );
if ( resetozero ) {
magmablas_claset_batched(MagmaFull, magma_roundup( n, NB ), NB, MAGMA_C_ZERO, MAGMA_C_ZERO, dinvA_array, magma_roundup( n, NB ), batchCount, queue);
}
// if someone want to use cudamemset he need to set the whole vectors
// of initial size otherwise it is a bug and thus need to have dinvA_length
// in input parameter and has been tested and was slower.
//was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_claset_batched
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
hipLaunchKernelGGL(( ctrtri_diag_lower_kernel_batched)
, dim3(diaggrid), dim3(IB), 0, queue->cuda_stream() ,
diag, n, dA_array, ldda, dinvA_array );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_cgemm16_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_cgemm16_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_cgemm32_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_cgemm32_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_cgemm64_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_cgemm64_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_cgemm_above64_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_cgemm_above64_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_cgemm_above64_part3_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
hipLaunchKernelGGL(( ctrtri_diag_upper_kernel_batched)
, dim3(diaggrid), dim3(IB), 0, queue->cuda_stream() ,
diag, n, dA_array, ldda, dinvA_array );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_cgemm16_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_cgemm16_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_cgemm32_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_cgemm32_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_cgemm64_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_cgemm64_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_cgemm_above64_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_cgemm_above64_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_cgemm_above64_part3_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
| df3c72a8fa2fe405281cdf194310c3380ccbb8f6.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/ztrtri_diag_batched.cu, normal z -> c, Wed Jan 2 14:18:51 2019
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named ctrtri_diag.cu to avoid name conflict with src/ctrtri.o
in the library. The actual kernels are in ctrtri_lower.cu and ctrtri_upper.cu
*/
#include "magma_internal.h"
#define TRTRI_BATCHED
#include "ctrtri.cuh"
/***************************************************************************//**
Purpose
-------
CTRTRI_DIAG inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in ctrsm.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array A of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = MagmaUpper, the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = MagmaLower, the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = MagmaUnit, the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of each array A. LDDA >= max(1,N).
@param[out]
dinvA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array dinvA of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
resetozero INTEGER
If not zero, each array dinvA will be reset to all zeros
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trtri_diag_batched
*******************************************************************************/
extern "C" void
magmablas_ctrtri_diag_batched(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
magmaFloatComplex const * const *dA_array, magma_int_t ldda,
magmaFloatComplex **dinvA_array,
magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = magma_ceildiv( n, IB );
if ( resetozero ) {
magmablas_claset_batched(MagmaFull, magma_roundup( n, NB ), NB, MAGMA_C_ZERO, MAGMA_C_ZERO, dinvA_array, magma_roundup( n, NB ), batchCount, queue);
}
// if someone want to use cudamemset he need to set the whole vectors
// of initial size otherwise it is a bug and thus need to have dinvA_length
// in input parameter and has been tested and was slower.
//was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_claset_batched
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
ctrtri_diag_lower_kernel_batched
<<< diaggrid, IB, 0, queue->cuda_stream() >>>
( diag, n, dA_array, ldda, dinvA_array );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
triple_cgemm16_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_cgemm16_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
triple_cgemm32_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_cgemm32_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
triple_cgemm64_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_cgemm64_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
triple_cgemm_above64_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_cgemm_above64_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_cgemm_above64_part3_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
ctrtri_diag_upper_kernel_batched
<<< diaggrid, IB, 0, queue->cuda_stream() >>>
( diag, n, dA_array, ldda, dinvA_array );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
triple_cgemm16_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_cgemm16_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
triple_cgemm32_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_cgemm32_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
triple_cgemm64_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_cgemm64_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
triple_cgemm_above64_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_cgemm_above64_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_cgemm_above64_part3_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
|
9a7cd18bf9ab19a91bd20c6a069875b71af216e0.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <string.h>
#include <math.h>
#include <chrono>
using namespace std;
using namespace std::chrono;
#include "inputs.h"
#include "callbacks.cu"
#include "helperfunctions.h"
/*******************************
Main function
*********************************/
int main(int argc, char **argv){
//Host variables
hipfftDoubleComplex *small_cube;
hipfftDoubleComplex *result;
int count;
int correct;
//Device variables
hipfftDoubleComplex *d_result;
hipfftDoubleComplex *d_a;
int final_samples;
final_samples = (K*K + (NX*NY - K*K)/(DS*DS))*K + (NX*NY/(DS*DS))*(NZ-K)/DS;
hipfftDoubleComplex* unsampled_result;
//allocating host side arrays
result = (hipfftDoubleComplex*)malloc(sizeof(hipfftDoubleComplex)*(final_samples));
unsampled_result=(hipfftDoubleComplex*)malloc(sizeof(hipfftDoubleComplex)*NX*NY*((NZ-K)/DS));
small_cube = (hipfftDoubleComplex*)malloc(sizeof(hipfftDoubleComplex)*(K*K*K));
// Choosing CUDA device with newer architect
//int dev = findCudaDevice(argc, (const char **)argv);
//allocating device side arrays
hipMalloc((void**)&d_a, sizeof(hipfftDoubleComplex)*K*K*K);
if (hipGetLastError() != hipSuccess){
fprintf(stderr, "Cuda error: Failed to allocate\n");
return 0;
}
//**TEMPORARY** the output is going to materialize the full cube for simplicity
hipMalloc((void**)&d_result, sizeof(hipfftDoubleComplex)*final_samples);
if (hipGetLastError() != hipSuccess){
fprintf(stderr, "Cuda error: Failed to allocate\n");
return 0;
}
cout<<"creating data"<<endl;
//create small data cube inside larger data cube
count = 0;
for(int i=0;i<K;i++){
for(int j=0;j<K;j++){
for(int k=0;k<K;k++){
small_cube[K*K*i + K*j + k].x = i*j*k + 0.3; //same value as data
small_cube[K*K*i + K*j + k].y=0;
}}}
/*
for(int i=0;i<K;i++){
for(int j=0;j<K;j++){
for(int k=0;k<K;k++){
cout<< "data " << data[NX*NY*i + NX*j + k ].x << endl;
}}}
for(int i=0;i<K;i++){
for(int j=0;j<K;j++){
for(int k=0;k<K;k++){
cout<< "small cube " <<small_cube[K*K*i + K*j + k].x << endl;
}}}
*/
// Running cuFFT
cout << "Run cufft" <<endl;
auto start = high_resolution_clock::now();
hipError_t cudaStatus = minibatch_CuFFT(argc, argv, small_cube, result, d_a, d_result, unsampled_result);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "CuFFT failed!");
return 1;
}
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
/*
cout<< "copy result into double array"<< endl;
//put result in cufft_output
count = 0;
for(int i=0;i<NZ;i++){
for(int j=0;j<NY;j++){
for(int k=0;k<NX;k++){
cufft_output[count]= result[NX*NY*i + NX*j + k ].x;
cufft_output[count+1] = result[NX*NY*i + NX*j + k].y;
count = count + 2;
}}}
*/
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
if(TO_PRINT==1){
printResult(result, final_samples);
cout<< "CUFFT unsampled first plane"<<endl;
count = 0;
while(count<NX*NY){
cout<< count << ": CUFFT :" << unsampled_result[count].x <<"," << unsampled_result[count].y << endl;
count = count + 1;
}
}
else{
//output is too large, only print few values
cout<< "First few values of CUFFT output"<<endl;
count = 0;
while(count<20){
cout<< count << ": CUFFT:" << unsampled_result[count].x <<"," << unsampled_result[count].y << endl;
count = count + 1;
}
}
cout << double(duration.count())/1000000 << "Seconds" << endl;
delete [] result;
delete [] small_cube;
return 0;
}
| 9a7cd18bf9ab19a91bd20c6a069875b71af216e0.cu | #include <iostream>
#include <string.h>
#include <math.h>
#include <chrono>
using namespace std;
using namespace std::chrono;
#include "inputs.h"
#include "callbacks.cu"
#include "helperfunctions.h"
/*******************************
Main function
*********************************/
int main(int argc, char **argv){
//Host variables
cufftDoubleComplex *small_cube;
cufftDoubleComplex *result;
int count;
int correct;
//Device variables
cufftDoubleComplex *d_result;
cufftDoubleComplex *d_a;
int final_samples;
final_samples = (K*K + (NX*NY - K*K)/(DS*DS))*K + (NX*NY/(DS*DS))*(NZ-K)/DS;
cufftDoubleComplex* unsampled_result;
//allocating host side arrays
result = (cufftDoubleComplex*)malloc(sizeof(cufftDoubleComplex)*(final_samples));
unsampled_result=(cufftDoubleComplex*)malloc(sizeof(cufftDoubleComplex)*NX*NY*((NZ-K)/DS));
small_cube = (cufftDoubleComplex*)malloc(sizeof(cufftDoubleComplex)*(K*K*K));
// Choosing CUDA device with newer architect
//int dev = findCudaDevice(argc, (const char **)argv);
//allocating device side arrays
cudaMalloc((void**)&d_a, sizeof(cufftDoubleComplex)*K*K*K);
if (cudaGetLastError() != cudaSuccess){
fprintf(stderr, "Cuda error: Failed to allocate\n");
return 0;
}
//**TEMPORARY** the output is going to materialize the full cube for simplicity
cudaMalloc((void**)&d_result, sizeof(cufftDoubleComplex)*final_samples);
if (cudaGetLastError() != cudaSuccess){
fprintf(stderr, "Cuda error: Failed to allocate\n");
return 0;
}
cout<<"creating data"<<endl;
//create small data cube inside larger data cube
count = 0;
for(int i=0;i<K;i++){
for(int j=0;j<K;j++){
for(int k=0;k<K;k++){
small_cube[K*K*i + K*j + k].x = i*j*k + 0.3; //same value as data
small_cube[K*K*i + K*j + k].y=0;
}}}
/*
for(int i=0;i<K;i++){
for(int j=0;j<K;j++){
for(int k=0;k<K;k++){
cout<< "data " << data[NX*NY*i + NX*j + k ].x << endl;
}}}
for(int i=0;i<K;i++){
for(int j=0;j<K;j++){
for(int k=0;k<K;k++){
cout<< "small cube " <<small_cube[K*K*i + K*j + k].x << endl;
}}}
*/
// Running cuFFT
cout << "Run cufft" <<endl;
auto start = high_resolution_clock::now();
cudaError_t cudaStatus = minibatch_CuFFT(argc, argv, small_cube, result, d_a, d_result, unsampled_result);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "CuFFT failed!");
return 1;
}
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
/*
cout<< "copy result into double array"<< endl;
//put result in cufft_output
count = 0;
for(int i=0;i<NZ;i++){
for(int j=0;j<NY;j++){
for(int k=0;k<NX;k++){
cufft_output[count]= result[NX*NY*i + NX*j + k ].x;
cufft_output[count+1] = result[NX*NY*i + NX*j + k].y;
count = count + 2;
}}}
*/
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
if(TO_PRINT==1){
printResult(result, final_samples);
cout<< "CUFFT unsampled first plane"<<endl;
count = 0;
while(count<NX*NY){
cout<< count << ": CUFFT :" << unsampled_result[count].x <<"," << unsampled_result[count].y << endl;
count = count + 1;
}
}
else{
//output is too large, only print few values
cout<< "First few values of CUFFT output"<<endl;
count = 0;
while(count<20){
cout<< count << ": CUFFT:" << unsampled_result[count].x <<"," << unsampled_result[count].y << endl;
count = count + 1;
}
}
cout << double(duration.count())/1000000 << "Seconds" << endl;
delete [] result;
delete [] small_cube;
return 0;
}
|
d052ba9cfc19d938636b89a09166f6bec0250cee.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "elementwise_1D_1D_log.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
elementwise_1D_1D_log), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
elementwise_1D_1D_log), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
elementwise_1D_1D_log), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d052ba9cfc19d938636b89a09166f6bec0250cee.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "elementwise_1D_1D_log.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
elementwise_1D_1D_log<<<gridBlock,threadBlock>>>(in,out,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
elementwise_1D_1D_log<<<gridBlock,threadBlock>>>(in,out,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
elementwise_1D_1D_log<<<gridBlock,threadBlock>>>(in,out,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b4406f006beca985767bc5c26bd6afbd3fd94c21.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <sys/time.h>
#define ARRAY_SIZE 1000000
#define TPB 256
#define MARGIN 1e-6
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
float* saxpy(float* x, float* y, float a) {
int i = 0;
float* res = (float *)malloc(ARRAY_SIZE*sizeof(float));
for(i = 0; i < ARRAY_SIZE; i++) {
res[i] = a*x[i]+y[i];
}
return res;
}
__global__ void saxpy_gpu(float* res, float* x, float* y, float a) {
const int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < ARRAY_SIZE) {
res[id] = a*x[id]+y[id];
}
}
int main() {
float* res;
float* res2 = (float *)malloc(ARRAY_SIZE*sizeof(float));
float* x = (float *)malloc(ARRAY_SIZE*sizeof(float));
float* y = (float *)malloc(ARRAY_SIZE*sizeof(float));
float* res_gpu;
float* d_x;
float* d_y;
float a = 10;
double t1;
double t2;
double timeCPU;
double timeGPU;
printf("Filling arrays...\n");
for(int i = 0; i < ARRAY_SIZE; i++)
{
x[i] = i;
y[i] = 2*i;
}
printf("Done!\n");
printf("Computing in CPU...\n");
t1 = cpuSecond();
res = saxpy(x, y, a);
t2 = cpuSecond();
timeCPU = t2 - t1;
printf("Done! %f s\n", timeCPU);
printf("Computing in GPU...\n");
hipMalloc(&res_gpu, ARRAY_SIZE*sizeof(float));
hipMalloc(&d_x, ARRAY_SIZE*sizeof(float));
hipMalloc(&d_y, ARRAY_SIZE*sizeof(float));
hipMemcpy(d_x, x, ARRAY_SIZE*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, ARRAY_SIZE*sizeof(float), hipMemcpyHostToDevice);
t1 = cpuSecond();
hipLaunchKernelGGL(( saxpy_gpu), dim3((ARRAY_SIZE+TPB-1)/TPB), dim3(TPB), 0, 0, res_gpu, d_x, d_y, a);
hipDeviceSynchronize();
t2 = cpuSecond();
timeGPU = t2 - t1;
printf("Done! %f s\n", timeGPU);
hipMemcpy(res2, res_gpu, ARRAY_SIZE*sizeof(float), hipMemcpyDeviceToHost);
hipFree(res_gpu);
for(int i = 0; i<ARRAY_SIZE; i++) {
//printf("%d -> %f \t %f\n", i, res[i], res2[i]);
if(res[i] - res2[i] > MARGIN) {
printf("This is bad, %d\n", i);
exit(0);
}
}
printf("Hurray!\n");
} | b4406f006beca985767bc5c26bd6afbd3fd94c21.cu | #include <stdio.h>
#include <sys/time.h>
#define ARRAY_SIZE 1000000
#define TPB 256
#define MARGIN 1e-6
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
float* saxpy(float* x, float* y, float a) {
int i = 0;
float* res = (float *)malloc(ARRAY_SIZE*sizeof(float));
for(i = 0; i < ARRAY_SIZE; i++) {
res[i] = a*x[i]+y[i];
}
return res;
}
__global__ void saxpy_gpu(float* res, float* x, float* y, float a) {
const int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < ARRAY_SIZE) {
res[id] = a*x[id]+y[id];
}
}
int main() {
float* res;
float* res2 = (float *)malloc(ARRAY_SIZE*sizeof(float));
float* x = (float *)malloc(ARRAY_SIZE*sizeof(float));
float* y = (float *)malloc(ARRAY_SIZE*sizeof(float));
float* res_gpu;
float* d_x;
float* d_y;
float a = 10;
double t1;
double t2;
double timeCPU;
double timeGPU;
printf("Filling arrays...\n");
for(int i = 0; i < ARRAY_SIZE; i++)
{
x[i] = i;
y[i] = 2*i;
}
printf("Done!\n");
printf("Computing in CPU...\n");
t1 = cpuSecond();
res = saxpy(x, y, a);
t2 = cpuSecond();
timeCPU = t2 - t1;
printf("Done! %f s\n", timeCPU);
printf("Computing in GPU...\n");
cudaMalloc(&res_gpu, ARRAY_SIZE*sizeof(float));
cudaMalloc(&d_x, ARRAY_SIZE*sizeof(float));
cudaMalloc(&d_y, ARRAY_SIZE*sizeof(float));
cudaMemcpy(d_x, x, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice);
t1 = cpuSecond();
saxpy_gpu<<<(ARRAY_SIZE+TPB-1)/TPB, TPB>>>(res_gpu, d_x, d_y, a);
cudaDeviceSynchronize();
t2 = cpuSecond();
timeGPU = t2 - t1;
printf("Done! %f s\n", timeGPU);
cudaMemcpy(res2, res_gpu, ARRAY_SIZE*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(res_gpu);
for(int i = 0; i<ARRAY_SIZE; i++) {
//printf("%d -> %f \t %f\n", i, res[i], res2[i]);
if(res[i] - res2[i] > MARGIN) {
printf("This is bad, %d\n", i);
exit(0);
}
}
printf("Hurray!\n");
} |
bbab5f98b0b59d18dc375e039ff060e19cf0ee08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/op_registry.h"
#include "paddle/platform/assert.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
struct Pair {
__device__ __forceinline__ Pair() {}
__device__ __forceinline__ Pair(T value, int64_t id) : v(value), id(id) {}
__device__ __forceinline__ void set(T value, int64_t id) {
v = value;
id = id;
}
__device__ __forceinline__ void operator=(const Pair<T>& in) {
v = in.v;
id = in.id;
}
__device__ __forceinline__ bool operator<(const T value) const {
return (v < value);
}
__device__ __forceinline__ bool operator<(const Pair<T>& in) const {
return (v < in.v) || ((v == in.v) && (id > in.id));
}
__device__ __forceinline__ bool operator>(const Pair<T>& in) const {
return (v > in.v) || ((v == in.v) && (id < in.id));
}
T v;
int64_t id;
};
template <typename T>
__device__ __forceinline__ void AddTo(Pair<T> topk[], const Pair<T>& p,
int beam_size) {
for (int k = beam_size - 2; k >= 0; k--) {
if (topk[k] < p) {
topk[k + 1] = topk[k];
} else {
topk[k + 1] = p;
return;
}
}
topk[0] = p;
}
template <typename T, int beam_size>
__device__ __forceinline__ void AddTo(Pair<T> topk[], const Pair<T>& p) {
for (int k = beam_size - 2; k >= 0; k--) {
if (topk[k] < p) {
topk[k + 1] = topk[k];
} else {
topk[k + 1] = p;
return;
}
}
topk[0] = p;
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* src, int idx,
int dim, int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < src[idx]) {
Pair<T> tmp(src[idx], idx);
AddTo<T>(topk, tmp, beam_size);
}
idx += BlockSize;
}
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* src, int idx,
int dim, const Pair<T>& max,
int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < src[idx]) {
Pair<T> tmp(src[idx], idx);
if (tmp < max) {
AddTo<T>(topk, tmp, beam_size);
}
}
idx += BlockSize;
}
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* val, int* col,
int idx, int dim, int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < val[idx]) {
Pair<T> tmp(val[idx], col[idx]);
AddTo<T>(topk, tmp, beam_size);
}
idx += BlockSize;
}
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* val, int* col,
int idx, int dim, const Pair<T>& max,
int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < val[idx]) {
Pair<T> tmp(val[idx], col[idx]);
if (tmp < max) {
AddTo<T>(topk, tmp, beam_size);
}
}
idx += BlockSize;
}
}
template <typename T, int MaxLength, int BlockSize>
__device__ __forceinline__ void ThreadGetTopK(Pair<T> topk[], int& beam,
int beam_size, const T* src,
bool& firstStep, bool& is_empty,
Pair<T>& max, int dim,
const int tid) {
if (beam > 0) {
int length = beam < beam_size ? beam : beam_size;
if (firstStep) {
firstStep = false;
GetTopK<T, BlockSize>(topk, src, tid, dim, length);
} else {
for (int k = 0; k < MaxLength; k++) {
if (k < MaxLength - beam) {
topk[k] = topk[k + beam];
} else {
topk[k].set(-INFINITY, -1);
}
}
if (!is_empty) {
GetTopK<T, BlockSize>(topk + MaxLength - beam, src, tid, dim, max,
length);
}
}
max = topk[MaxLength - 1];
if (max.v == -1) is_empty = true;
beam = 0;
}
}
template <typename T, int MaxLength, int BlockSize>
__device__ __forceinline__ void ThreadGetTopK(Pair<T> topk[], int& beam,
int beam_size, const T* val,
int* col, bool& firstStep,
bool& is_empty, Pair<T>& max,
int dim, const int tid) {
if (beam > 0) {
int length = beam < beam_size ? beam : beam_size;
if (firstStep) {
firstStep = false;
GetTopK<T, BlockSize>(topk, val, col, tid, dim, length);
} else {
for (int k = 0; k < MaxLength; k++) {
if (k < MaxLength - beam) {
topk[k] = topk[k + beam];
} else {
topk[k].set(-INFINITY, -1);
}
}
if (!is_empty) {
GetTopK<T, BlockSize>(topk + MaxLength - beam, val, col, tid, dim, max,
length);
}
}
max = topk[MaxLength - 1];
if (max.v == -1) is_empty = true;
beam = 0;
}
}
template <typename T, int MaxLength, int BlockSize>
__device__ __forceinline__ void BlockReduce(Pair<T>* sh_topk, int* maxid,
Pair<T> topk[], T** topVal,
int64_t** topIds, int& beam, int& k,
const int tid, const int warp) {
while (true) {
__syncthreads();
if (tid < BlockSize / 2) {
if (sh_topk[tid] < sh_topk[tid + BlockSize / 2]) {
maxid[tid] = tid + BlockSize / 2;
} else {
maxid[tid] = tid;
}
}
__syncthreads();
for (int stride = BlockSize / 4; stride > 0; stride = stride / 2) {
if (tid < stride) {
if (sh_topk[maxid[tid]] < sh_topk[maxid[tid + stride]]) {
maxid[tid] = maxid[tid + stride];
}
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
**topVal = sh_topk[maxid[0]].v;
**topIds = sh_topk[maxid[0]].id;
(*topVal)++;
(*topIds)++;
}
if (tid == maxid[0]) beam++;
if (--k == 0) break;
__syncthreads();
if (tid == maxid[0]) {
if (beam < MaxLength) {
sh_topk[tid] = topk[beam];
}
}
if (maxid[0] / 32 == warp) {
if (__shfl(beam, (maxid[0]) % 32, 32) == MaxLength) break;
}
}
}
/**
* Each block compute one sample.
* In a block:
* 1. every thread get top MaxLength value;
* 2. merge to sh_topk, block reduce and get max value;
* 3. go to the second setp, until one thread's topk value is null;
* 4. go to the first setp, until get the topk value.
*/
template <typename T, int MaxLength, int BlockSize>
__global__ void KeMatrixTopK(T* output, int output_stride, int64_t* indices,
const T* src, int lds, int dim, int k) {
__shared__ Pair<T> sh_topk[BlockSize];
__shared__ int maxid[BlockSize / 2];
const int tid = threadIdx.x;
const int warp = threadIdx.x / 32;
output += blockIdx.x * output_stride;
indices += blockIdx.x * k;
Pair<T> topk[MaxLength];
int beam = MaxLength;
Pair<T> max;
bool is_empty = false;
bool firststep = true;
for (int k = 0; k < MaxLength; k++) {
topk[k].set(-INFINITY, -1);
}
while (k) {
ThreadGetTopK<T, MaxLength, BlockSize>(topk, beam, k,
src + blockIdx.x * lds, firststep,
is_empty, max, dim, tid);
sh_topk[tid] = topk[0];
BlockReduce<T, MaxLength, BlockSize>(sh_topk, maxid, topk, &output,
&indices, beam, k, tid, warp);
}
}
template <typename T>
class TopkOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use CUDAPlace.");
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto* indices = ctx.Output<Tensor>("Indices");
size_t k = static_cast<int>(ctx.Attr<int>("k"));
const T* input_data = input->data<T>();
T* output_data = output->mutable_data<T>(ctx.GetPlace());
// FIXME(typhoonzero): data is always converted to type T?
int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace());
size_t input_height = input->dims()[0];
size_t input_width = input->dims()[1];
if (k > input_width) k = input_width;
// NOTE: pass lds and dim same to input width.
// NOTE: old matrix implementation of stride is different to eigen.
// TODO(typhoonzero): refine this kernel.
dim3 threads(256, 1);
dim3 grid(input_height, 1);
hipLaunchKernelGGL(( KeMatrixTopK<T, 5, 256>),
dim3(grid), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream(), output_data, output->dims()[1],
indices_data, input_data,
input_width, input_width, int(k));
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(top_k, paddle::operators::TopkOpCUDAKernel<float>);
| bbab5f98b0b59d18dc375e039ff060e19cf0ee08.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/op_registry.h"
#include "paddle/platform/assert.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
struct Pair {
__device__ __forceinline__ Pair() {}
__device__ __forceinline__ Pair(T value, int64_t id) : v(value), id(id) {}
__device__ __forceinline__ void set(T value, int64_t id) {
v = value;
id = id;
}
__device__ __forceinline__ void operator=(const Pair<T>& in) {
v = in.v;
id = in.id;
}
__device__ __forceinline__ bool operator<(const T value) const {
return (v < value);
}
__device__ __forceinline__ bool operator<(const Pair<T>& in) const {
return (v < in.v) || ((v == in.v) && (id > in.id));
}
__device__ __forceinline__ bool operator>(const Pair<T>& in) const {
return (v > in.v) || ((v == in.v) && (id < in.id));
}
T v;
int64_t id;
};
template <typename T>
__device__ __forceinline__ void AddTo(Pair<T> topk[], const Pair<T>& p,
int beam_size) {
for (int k = beam_size - 2; k >= 0; k--) {
if (topk[k] < p) {
topk[k + 1] = topk[k];
} else {
topk[k + 1] = p;
return;
}
}
topk[0] = p;
}
template <typename T, int beam_size>
__device__ __forceinline__ void AddTo(Pair<T> topk[], const Pair<T>& p) {
for (int k = beam_size - 2; k >= 0; k--) {
if (topk[k] < p) {
topk[k + 1] = topk[k];
} else {
topk[k + 1] = p;
return;
}
}
topk[0] = p;
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* src, int idx,
int dim, int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < src[idx]) {
Pair<T> tmp(src[idx], idx);
AddTo<T>(topk, tmp, beam_size);
}
idx += BlockSize;
}
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* src, int idx,
int dim, const Pair<T>& max,
int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < src[idx]) {
Pair<T> tmp(src[idx], idx);
if (tmp < max) {
AddTo<T>(topk, tmp, beam_size);
}
}
idx += BlockSize;
}
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* val, int* col,
int idx, int dim, int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < val[idx]) {
Pair<T> tmp(val[idx], col[idx]);
AddTo<T>(topk, tmp, beam_size);
}
idx += BlockSize;
}
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* val, int* col,
int idx, int dim, const Pair<T>& max,
int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < val[idx]) {
Pair<T> tmp(val[idx], col[idx]);
if (tmp < max) {
AddTo<T>(topk, tmp, beam_size);
}
}
idx += BlockSize;
}
}
template <typename T, int MaxLength, int BlockSize>
__device__ __forceinline__ void ThreadGetTopK(Pair<T> topk[], int& beam,
int beam_size, const T* src,
bool& firstStep, bool& is_empty,
Pair<T>& max, int dim,
const int tid) {
if (beam > 0) {
int length = beam < beam_size ? beam : beam_size;
if (firstStep) {
firstStep = false;
GetTopK<T, BlockSize>(topk, src, tid, dim, length);
} else {
for (int k = 0; k < MaxLength; k++) {
if (k < MaxLength - beam) {
topk[k] = topk[k + beam];
} else {
topk[k].set(-INFINITY, -1);
}
}
if (!is_empty) {
GetTopK<T, BlockSize>(topk + MaxLength - beam, src, tid, dim, max,
length);
}
}
max = topk[MaxLength - 1];
if (max.v == -1) is_empty = true;
beam = 0;
}
}
template <typename T, int MaxLength, int BlockSize>
__device__ __forceinline__ void ThreadGetTopK(Pair<T> topk[], int& beam,
int beam_size, const T* val,
int* col, bool& firstStep,
bool& is_empty, Pair<T>& max,
int dim, const int tid) {
if (beam > 0) {
int length = beam < beam_size ? beam : beam_size;
if (firstStep) {
firstStep = false;
GetTopK<T, BlockSize>(topk, val, col, tid, dim, length);
} else {
for (int k = 0; k < MaxLength; k++) {
if (k < MaxLength - beam) {
topk[k] = topk[k + beam];
} else {
topk[k].set(-INFINITY, -1);
}
}
if (!is_empty) {
GetTopK<T, BlockSize>(topk + MaxLength - beam, val, col, tid, dim, max,
length);
}
}
max = topk[MaxLength - 1];
if (max.v == -1) is_empty = true;
beam = 0;
}
}
template <typename T, int MaxLength, int BlockSize>
__device__ __forceinline__ void BlockReduce(Pair<T>* sh_topk, int* maxid,
Pair<T> topk[], T** topVal,
int64_t** topIds, int& beam, int& k,
const int tid, const int warp) {
while (true) {
__syncthreads();
if (tid < BlockSize / 2) {
if (sh_topk[tid] < sh_topk[tid + BlockSize / 2]) {
maxid[tid] = tid + BlockSize / 2;
} else {
maxid[tid] = tid;
}
}
__syncthreads();
for (int stride = BlockSize / 4; stride > 0; stride = stride / 2) {
if (tid < stride) {
if (sh_topk[maxid[tid]] < sh_topk[maxid[tid + stride]]) {
maxid[tid] = maxid[tid + stride];
}
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
**topVal = sh_topk[maxid[0]].v;
**topIds = sh_topk[maxid[0]].id;
(*topVal)++;
(*topIds)++;
}
if (tid == maxid[0]) beam++;
if (--k == 0) break;
__syncthreads();
if (tid == maxid[0]) {
if (beam < MaxLength) {
sh_topk[tid] = topk[beam];
}
}
if (maxid[0] / 32 == warp) {
if (__shfl(beam, (maxid[0]) % 32, 32) == MaxLength) break;
}
}
}
/**
* Each block compute one sample.
* In a block:
* 1. every thread get top MaxLength value;
* 2. merge to sh_topk, block reduce and get max value;
* 3. go to the second setp, until one thread's topk value is null;
* 4. go to the first setp, until get the topk value.
*/
template <typename T, int MaxLength, int BlockSize>
__global__ void KeMatrixTopK(T* output, int output_stride, int64_t* indices,
const T* src, int lds, int dim, int k) {
__shared__ Pair<T> sh_topk[BlockSize];
__shared__ int maxid[BlockSize / 2];
const int tid = threadIdx.x;
const int warp = threadIdx.x / 32;
output += blockIdx.x * output_stride;
indices += blockIdx.x * k;
Pair<T> topk[MaxLength];
int beam = MaxLength;
Pair<T> max;
bool is_empty = false;
bool firststep = true;
for (int k = 0; k < MaxLength; k++) {
topk[k].set(-INFINITY, -1);
}
while (k) {
ThreadGetTopK<T, MaxLength, BlockSize>(topk, beam, k,
src + blockIdx.x * lds, firststep,
is_empty, max, dim, tid);
sh_topk[tid] = topk[0];
BlockReduce<T, MaxLength, BlockSize>(sh_topk, maxid, topk, &output,
&indices, beam, k, tid, warp);
}
}
template <typename T>
class TopkOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use CUDAPlace.");
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto* indices = ctx.Output<Tensor>("Indices");
size_t k = static_cast<int>(ctx.Attr<int>("k"));
const T* input_data = input->data<T>();
T* output_data = output->mutable_data<T>(ctx.GetPlace());
// FIXME(typhoonzero): data is always converted to type T?
int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace());
size_t input_height = input->dims()[0];
size_t input_width = input->dims()[1];
if (k > input_width) k = input_width;
// NOTE: pass lds and dim same to input width.
// NOTE: old matrix implementation of stride is different to eigen.
// TODO(typhoonzero): refine this kernel.
dim3 threads(256, 1);
dim3 grid(input_height, 1);
KeMatrixTopK<T, 5, 256><<<
grid, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream()>>>(output_data, output->dims()[1],
indices_data, input_data,
input_width, input_width, int(k));
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(top_k, paddle::operators::TopkOpCUDAKernel<float>);
|
53533942c35c85a83ce93c4ad7312164cc8a93d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "common.h"
__global__ void simple_kernel(){
printf("Hello from kernel!\n");
}
int main(){
int dev = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if(deviceProp.concurrentKernels == 0){
printf("GPU doesn't support concurrent execution\n");
printf("Kernel exec will be serialized\n");
}
hipStream_t str1, str2, str3;
hipStreamCreate(&str1);
hipStreamCreate(&str2);
hipStreamCreate(&str3);
hipLaunchKernelGGL(( simple_kernel) , dim3(1), dim3(1), 0, str1, );
hipLaunchKernelGGL(( simple_kernel) , dim3(1), dim3(1), 0, str2, );
hipLaunchKernelGGL(( simple_kernel) , dim3(1), dim3(1), 0, str3, );
hipStreamDestroy(str1);
hipStreamDestroy(str2);
hipStreamDestroy(str3);
hipDeviceSynchronize();
hipDeviceReset();
return 0;
}
| 53533942c35c85a83ce93c4ad7312164cc8a93d6.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "common.h"
__global__ void simple_kernel(){
printf("Hello from kernel!\n");
}
int main(){
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if(deviceProp.concurrentKernels == 0){
printf("GPU doesn't support concurrent execution\n");
printf("Kernel exec will be serialized\n");
}
cudaStream_t str1, str2, str3;
cudaStreamCreate(&str1);
cudaStreamCreate(&str2);
cudaStreamCreate(&str3);
simple_kernel <<<1, 1, 0, str1>>>();
simple_kernel <<<1, 1, 0, str2>>>();
simple_kernel <<<1, 1, 0, str3>>>();
cudaStreamDestroy(str1);
cudaStreamDestroy(str2);
cudaStreamDestroy(str3);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
133731fc9b92a9ccbf1bb9ba0c8b4e365a222be8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=======================================================================
// Copyright (c) 2020 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include <iostream>
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#include "egblas/mse.hpp"
#include "egblas/cuda_check.hpp"
#include "egblas/utils.hpp"
#include "sum_reduce.hpp"
template<typename T>
__device__ T mse_loss(T output, T label){
return (label - output) * (label - output);
}
template<typename T>
__device__ T mse_error(T output, T label){
return fabsf(label - output);
}
template <class T, size_t blockSize, bool Reduce>
__global__ void mse_loss_kernel(size_t n, const T* output, size_t incx, const T* labels, size_t incy, T* r_output) {
extern __shared__ volatile unsigned char shared_data_raw[];
volatile T* shared_data = reinterpret_cast<volatile T*>(shared_data_raw);
size_t tid = threadIdx.x;
size_t i = blockIdx.x * (2 * blockDim.x) + threadIdx.x;
size_t gridSize = blockSize * 2 * gridDim.x;
// Perform first level of reduction,
// reading from global memory and writing to shared memory
T mySum = 0;
if (Reduce) {
// In case of reductions, this a simple sum (labels are ignored)
while (i < n) {
mySum += output[i * incx];
if (i + blockSize < n) {
mySum += output[(i + blockSize) * incx];
}
i += gridSize;
}
} else {
// In the basic case, perform reduction and MSE loss
while (i < n) {
mySum += mse_loss(output[i * incx], labels[i * incy]);
if (i + blockSize < n) {
mySum += mse_loss(output[(i + blockSize) * incx], labels[(i + blockSize) * incx]);
}
i += gridSize;
}
}
shared_data[tid] = mySum;
__syncthreads();
sum_reduce_impl<T, blockSize>(r_output, shared_data);
}
template <class T, size_t blockSize, bool Reduce>
__global__ void mse_loss_kernel1(size_t n, const T* output, const T* labels, T* r_output) {
extern __shared__ volatile unsigned char shared_data_raw[];
volatile T* shared_data = reinterpret_cast<volatile T*>(shared_data_raw);
size_t tid = threadIdx.x;
size_t i = blockIdx.x * (2 * blockDim.x) + threadIdx.x;
size_t gridSize = blockSize * 2 * gridDim.x;
// Perform first level of reduction,
// reading from global memory and writing to shared memory
T mySum = 0;
if (Reduce) {
// In case of reductions, this a simple sum (labels are ignored)
while (i < n) {
mySum += output[i];
if (i + blockSize < n) {
mySum += output[i + blockSize];
}
i += gridSize;
}
} else {
// In the basic case, perform reduction and MSE loss
while (i < n) {
mySum += mse_loss(output[i], labels[i]);
if (i + blockSize < n) {
mySum += mse_loss(output[i + blockSize], labels[i + blockSize]);
}
i += gridSize;
}
}
shared_data[tid] = mySum;
__syncthreads();
sum_reduce_impl<T, blockSize>(r_output, shared_data);
}
template <class T, size_t blockSize, bool Reduce>
__global__ void mse_error_kernel(size_t n, const T* output, size_t incx, const T* labels, size_t incy, T* r_output) {
extern __shared__ volatile unsigned char shared_data_raw[];
volatile T* shared_data = reinterpret_cast<volatile T*>(shared_data_raw);
size_t tid = threadIdx.x;
size_t i = blockIdx.x * (2 * blockDim.x) + threadIdx.x;
size_t gridSize = blockSize * 2 * gridDim.x;
// Perform first level of reduction,
// reading from global memory and writing to shared memory
T mySum = 0;
if (Reduce) {
// In case of reductions, this a simple sum (labels are ignored)
while (i < n) {
mySum += output[i * incx];
if (i + blockSize < n) {
mySum += output[(i + blockSize) * incx];
}
i += gridSize;
}
} else {
// In the basic case, perform reduction and MSE error
while (i < n) {
mySum += mse_error(output[i * incx], labels[i * incy]);
if (i + blockSize < n) {
mySum += mse_error(output[(i + blockSize) * incx], labels[(i + blockSize) * incx]);
}
i += gridSize;
}
}
shared_data[tid] = mySum;
__syncthreads();
sum_reduce_impl<T, blockSize>(r_output, shared_data);
}
template <class T, size_t blockSize, bool Reduce>
__global__ void mse_error_kernel1(size_t n, const T* output, const T* labels, T* r_output) {
extern __shared__ volatile unsigned char shared_data_raw[];
volatile T* shared_data = reinterpret_cast<volatile T*>(shared_data_raw);
size_t tid = threadIdx.x;
size_t i = blockIdx.x * (2 * blockDim.x) + threadIdx.x;
size_t gridSize = blockSize * 2 * gridDim.x;
// Perform first level of reduction,
// reading from global memory and writing to shared memory
T mySum = 0;
if (Reduce) {
// In case of reductions, this a simple sum (labels are ignored)
while (i < n) {
mySum += output[i];
if (i + blockSize < n) {
mySum += output[i + blockSize];
}
i += gridSize;
}
} else {
// In the basic case, perform reduction and MSE error
while (i < n) {
mySum += mse_error(output[i], labels[i]);
if (i + blockSize < n) {
mySum += mse_error(output[i + blockSize], labels[i + blockSize]);
}
i += gridSize;
}
}
shared_data[tid] = mySum;
__syncthreads();
sum_reduce_impl<T, blockSize>(r_output, shared_data);
}
template <typename T, bool Reduce>
void invoke_mse_loss_kernel(size_t n, const T* output, size_t incx, const T* labels, size_t incy, T* r_output, size_t numThreads, size_t numBlocks) {
int sharedSize = (numThreads <= 32) ? 64 * sizeof(T) : numThreads * sizeof(T);
switch (numThreads) {
case 512:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_loss_kernel1<T, 512, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_loss_kernel<T, 512, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 256:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_loss_kernel1<T, 256, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_loss_kernel<T, 256, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 128:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_loss_kernel1<T, 128, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_loss_kernel<T, 128, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 64:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_loss_kernel1<T, 64, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_loss_kernel<T, 64, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 32:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_loss_kernel1<T, 32, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_loss_kernel<T, 32, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 16:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_loss_kernel1<T, 16, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_loss_kernel<T, 16, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 8:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_loss_kernel1<T, 8, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_loss_kernel<T, 8, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 4:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_loss_kernel1<T, 4, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_loss_kernel<T, 4, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 2:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_loss_kernel1<T, 2, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_loss_kernel<T, 2, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 1:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_loss_kernel1<T, 1, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_loss_kernel<T, 1, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
}
}
template <typename T, bool Reduce>
void invoke_mse_error_kernel(size_t n, const T* output, size_t incx, const T* labels, size_t incy, T* r_output, size_t numThreads, size_t numBlocks) {
int sharedSize = (numThreads <= 32) ? 64 * sizeof(T) : numThreads * sizeof(T);
switch (numThreads) {
case 512:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_error_kernel1<T, 512, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_error_kernel<T, 512, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 256:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_error_kernel1<T, 256, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_error_kernel<T, 256, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 128:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_error_kernel1<T, 128, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_error_kernel<T, 128, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 64:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_error_kernel1<T, 64, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_error_kernel<T, 64, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 32:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_error_kernel1<T, 32, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_error_kernel<T, 32, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 16:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_error_kernel1<T, 16, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_error_kernel<T, 16, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 8:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_error_kernel1<T, 8, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_error_kernel<T, 8, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 4:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_error_kernel1<T, 4, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_error_kernel<T, 4, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 2:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_error_kernel1<T, 2, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_error_kernel<T, 2, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
case 1:
if (incx == 1 && incy == 1) {
hipLaunchKernelGGL(( mse_error_kernel1<T, 1, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, labels, r_output);
} else {
hipLaunchKernelGGL(( mse_error_kernel<T, 1, Reduce>), dim3(numBlocks), dim3(numThreads), sharedSize, 0, n, output, incx, labels, incy, r_output);
}
break;
}
}
template <bool Loss, typename T>
T mse_kernel_run(size_t n, const T* output, size_t incx, const T* labels, size_t incy) {
T result = 0;
const size_t cpu_threshold = Loss ? 1 : 1024;
if (Loss && n < cpu_threshold && incx == 1 && incy == 1) {
T* host_output = new T[n];
T* host_labels = new T[n];
cuda_check(hipMemcpy(host_output, output, n * sizeof(T), hipMemcpyDeviceToHost));
cuda_check(hipMemcpy(host_labels, labels, n * sizeof(T), hipMemcpyDeviceToHost));
for (size_t i = 0; i < n; i++) {
result += (host_labels[i] - host_output[i]) * (host_labels[i] - host_output[i]);
}
delete[] host_output;
delete[] host_labels;
return result;
}
if (!Loss && n < cpu_threshold && incx == 1 && incy == 1) {
T* host_output = new T[n];
T* host_labels = new T[n];
cuda_check(hipMemcpy(host_output, output, n * sizeof(T), hipMemcpyDeviceToHost));
cuda_check(hipMemcpy(host_labels, labels, n * sizeof(T), hipMemcpyDeviceToHost));
for (size_t i = 0; i < n; i++) {
result += fabsf(host_labels[i] - host_output[i]);
}
delete[] host_output;
delete[] host_labels;
return result;
}
const size_t maxThreads = 512;
const size_t maxBlocks = 64;
// Compute the launch configuration of the kernel
size_t numThreads = n < maxThreads * 2 ? nextPow2((n + 1) / 2) : maxThreads;
size_t numBlocks = ::min((n + numThreads * 2 - 1) / (numThreads * 2), maxBlocks);
// Allocate memory on the device
T* tmp_gpu;
cuda_check(hipMalloc((void**)&tmp_gpu, numBlocks * sizeof(T)));
// Run the first reduction on GPU
if (Loss) {
invoke_mse_loss_kernel<T, false>(n, output, incx, labels, incy, tmp_gpu, numThreads, numBlocks);
} else {
invoke_mse_error_kernel<T, false>(n, output, incx, labels, incy, tmp_gpu, numThreads, numBlocks);
}
size_t s = numBlocks;
// Run the following reductions on GPU
while(s > cpu_threshold){
// Compute again the configuration of the reduction kernel
numThreads = s < maxThreads * 2 ? nextPow2((s + 1) / 2) : maxThreads;
numBlocks = ::min((s + numThreads * 2 - 1) / (numThreads * 2), maxBlocks);
if (Loss) {
invoke_mse_loss_kernel<T, true>(s, tmp_gpu, 1, tmp_gpu, 1, tmp_gpu, numThreads, numBlocks);
} else {
invoke_mse_error_kernel<T, true>(s, tmp_gpu, 1, tmp_gpu, 1, tmp_gpu, numThreads, numBlocks);
}
s = (s + numThreads * 2 - 1) / (numThreads * 2);
}
if(s > 1){
T* host_data = new T[s];
cuda_check(hipMemcpy(host_data, tmp_gpu, s * sizeof(T), hipMemcpyDeviceToHost));
for (size_t i = 0; i < s; i++) {
result += host_data[i];
}
delete[] host_data;
} else {
cuda_check(hipMemcpy(&result, tmp_gpu, 1 * sizeof(T), hipMemcpyDeviceToHost));
}
cuda_check(hipFree(tmp_gpu));
return result;
}
float egblas_mse_sloss(size_t n, float alpha, const float* output, size_t incx, const float* labels, size_t incy) {
return alpha * mse_kernel_run<true>(n, output, incx, labels, incy);
}
double egblas_mse_dloss(size_t n, double alpha, const double* output, size_t incx, const double* labels, size_t incy) {
return alpha * mse_kernel_run<true>(n, output, incx, labels, incy);
}
float egblas_mse_serror(size_t n, float alpha, const float* output, size_t incx, const float* labels, size_t incy) {
return alpha * mse_kernel_run<false>(n, output, incx, labels, incy);
}
double egblas_mse_derror(size_t n, double alpha, const double* output, size_t incx, const double* labels, size_t incy) {
return alpha * mse_kernel_run<false>(n, output, incx, labels, incy);
}
template <typename T>
std::pair<T, T> mse_kernel_both_run(size_t n, const T* output, size_t incx, const T* labels, size_t incy) {
T loss = 0;
T error = 0;
const size_t cpu_threshold = 1024;
if (n < cpu_threshold && incx == 1 && incy == 1) {
T* host_output = new T[n];
T* host_labels = new T[n];
cuda_check(hipMemcpy(host_output, output, n * sizeof(T), hipMemcpyDeviceToHost));
cuda_check(hipMemcpy(host_labels, labels, n * sizeof(T), hipMemcpyDeviceToHost));
for (size_t i = 0; i < n; i++) {
loss += (host_labels[i] - host_output[i]) * (host_labels[i] - host_output[i]);
error += fabsf(host_labels[i] - host_output[i]);
}
delete[] host_output;
delete[] host_labels;
return std::make_pair(loss, error);
}
const size_t maxThreads = 512;
const size_t maxBlocks = 64;
// Compute the launch configuration of the kernel
size_t numThreads = n < maxThreads * 2 ? nextPow2((n + 1) / 2) : maxThreads;
size_t numBlocks = ::min((n + numThreads * 2 - 1) / (numThreads * 2), maxBlocks);
// Allocate memory on the device
T* tmp_gpu;
cuda_check(hipMalloc((void**)&tmp_gpu, 2 * numBlocks * sizeof(T)));
T* tmp_loss = tmp_gpu;
T* tmp_error = tmp_gpu + numBlocks;
// Run the first reduction on GPU
invoke_mse_loss_kernel<T, false>(n, output, incx, labels, incy, tmp_loss, numThreads, numBlocks);
invoke_mse_error_kernel<T, false>(n, output, incx, labels, incy, tmp_error, numThreads, numBlocks);
size_t s = numBlocks;
// Run the following reductions on GPU
while(s > cpu_threshold){
// Compute again the configuration of the reduction kernel
numThreads = s < maxThreads * 2 ? nextPow2((s + 1) / 2) : maxThreads;
numBlocks = ::min((s + numThreads * 2 - 1) / (numThreads * 2), maxBlocks);
invoke_mse_loss_kernel<T, true>(s, tmp_loss, 1, tmp_loss, 1, tmp_loss, numThreads, numBlocks);
invoke_mse_error_kernel<T, true>(s, tmp_error, 1, tmp_error, 1, tmp_error, numThreads, numBlocks);
s = (s + numThreads * 2 - 1) / (numThreads * 2);
}
if(s > 1){
T* host_data = new T[2 * numBlocks];
cuda_check(hipMemcpy(host_data, tmp_gpu, 2 * numBlocks * sizeof(T), hipMemcpyDeviceToHost));
for (size_t i = 0; i < s; i++) {
loss += host_data[i];
}
for (size_t i = 0; i < s; i++) {
error += host_data[numBlocks + i];
}
delete[] host_data;
} else {
cuda_check(hipMemcpy(&loss, tmp_loss, 1 * sizeof(T), hipMemcpyDeviceToHost));
cuda_check(hipMemcpy(&error, tmp_error, 1 * sizeof(T), hipMemcpyDeviceToHost));
}
cuda_check(hipFree(tmp_gpu));
return std::make_pair(loss, error);
}
std::pair<float, float> egblas_smse(size_t n, float alpha, float beta, const float* output, size_t incx, const float* labels, size_t incy) {
auto res = mse_kernel_both_run(n, output, incx, labels, incy);
return std::make_pair(alpha * res.first, beta * res.second);
}
std::pair<double, double> egblas_dmse(size_t n, double alpha, double beta, const double* output, size_t incx, const double* labels, size_t incy) {
auto res = mse_kernel_both_run(n, output, incx, labels, incy);
return std::make_pair(alpha * res.first, beta * res.second);
}
| 133731fc9b92a9ccbf1bb9ba0c8b4e365a222be8.cu | //=======================================================================
// Copyright (c) 2020 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include <iostream>
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#include "egblas/mse.hpp"
#include "egblas/cuda_check.hpp"
#include "egblas/utils.hpp"
#include "sum_reduce.hpp"
template<typename T>
__device__ T mse_loss(T output, T label){
return (label - output) * (label - output);
}
template<typename T>
__device__ T mse_error(T output, T label){
return fabsf(label - output);
}
template <class T, size_t blockSize, bool Reduce>
__global__ void mse_loss_kernel(size_t n, const T* output, size_t incx, const T* labels, size_t incy, T* r_output) {
extern __shared__ volatile unsigned char shared_data_raw[];
volatile T* shared_data = reinterpret_cast<volatile T*>(shared_data_raw);
size_t tid = threadIdx.x;
size_t i = blockIdx.x * (2 * blockDim.x) + threadIdx.x;
size_t gridSize = blockSize * 2 * gridDim.x;
// Perform first level of reduction,
// reading from global memory and writing to shared memory
T mySum = 0;
if (Reduce) {
// In case of reductions, this a simple sum (labels are ignored)
while (i < n) {
mySum += output[i * incx];
if (i + blockSize < n) {
mySum += output[(i + blockSize) * incx];
}
i += gridSize;
}
} else {
// In the basic case, perform reduction and MSE loss
while (i < n) {
mySum += mse_loss(output[i * incx], labels[i * incy]);
if (i + blockSize < n) {
mySum += mse_loss(output[(i + blockSize) * incx], labels[(i + blockSize) * incx]);
}
i += gridSize;
}
}
shared_data[tid] = mySum;
__syncthreads();
sum_reduce_impl<T, blockSize>(r_output, shared_data);
}
template <class T, size_t blockSize, bool Reduce>
__global__ void mse_loss_kernel1(size_t n, const T* output, const T* labels, T* r_output) {
extern __shared__ volatile unsigned char shared_data_raw[];
volatile T* shared_data = reinterpret_cast<volatile T*>(shared_data_raw);
size_t tid = threadIdx.x;
size_t i = blockIdx.x * (2 * blockDim.x) + threadIdx.x;
size_t gridSize = blockSize * 2 * gridDim.x;
// Perform first level of reduction,
// reading from global memory and writing to shared memory
T mySum = 0;
if (Reduce) {
// In case of reductions, this a simple sum (labels are ignored)
while (i < n) {
mySum += output[i];
if (i + blockSize < n) {
mySum += output[i + blockSize];
}
i += gridSize;
}
} else {
// In the basic case, perform reduction and MSE loss
while (i < n) {
mySum += mse_loss(output[i], labels[i]);
if (i + blockSize < n) {
mySum += mse_loss(output[i + blockSize], labels[i + blockSize]);
}
i += gridSize;
}
}
shared_data[tid] = mySum;
__syncthreads();
sum_reduce_impl<T, blockSize>(r_output, shared_data);
}
template <class T, size_t blockSize, bool Reduce>
__global__ void mse_error_kernel(size_t n, const T* output, size_t incx, const T* labels, size_t incy, T* r_output) {
extern __shared__ volatile unsigned char shared_data_raw[];
volatile T* shared_data = reinterpret_cast<volatile T*>(shared_data_raw);
size_t tid = threadIdx.x;
size_t i = blockIdx.x * (2 * blockDim.x) + threadIdx.x;
size_t gridSize = blockSize * 2 * gridDim.x;
// Perform first level of reduction,
// reading from global memory and writing to shared memory
T mySum = 0;
if (Reduce) {
// In case of reductions, this a simple sum (labels are ignored)
while (i < n) {
mySum += output[i * incx];
if (i + blockSize < n) {
mySum += output[(i + blockSize) * incx];
}
i += gridSize;
}
} else {
// In the basic case, perform reduction and MSE error
while (i < n) {
mySum += mse_error(output[i * incx], labels[i * incy]);
if (i + blockSize < n) {
mySum += mse_error(output[(i + blockSize) * incx], labels[(i + blockSize) * incx]);
}
i += gridSize;
}
}
shared_data[tid] = mySum;
__syncthreads();
sum_reduce_impl<T, blockSize>(r_output, shared_data);
}
template <class T, size_t blockSize, bool Reduce>
__global__ void mse_error_kernel1(size_t n, const T* output, const T* labels, T* r_output) {
extern __shared__ volatile unsigned char shared_data_raw[];
volatile T* shared_data = reinterpret_cast<volatile T*>(shared_data_raw);
size_t tid = threadIdx.x;
size_t i = blockIdx.x * (2 * blockDim.x) + threadIdx.x;
size_t gridSize = blockSize * 2 * gridDim.x;
// Perform first level of reduction,
// reading from global memory and writing to shared memory
T mySum = 0;
if (Reduce) {
// In case of reductions, this a simple sum (labels are ignored)
while (i < n) {
mySum += output[i];
if (i + blockSize < n) {
mySum += output[i + blockSize];
}
i += gridSize;
}
} else {
// In the basic case, perform reduction and MSE error
while (i < n) {
mySum += mse_error(output[i], labels[i]);
if (i + blockSize < n) {
mySum += mse_error(output[i + blockSize], labels[i + blockSize]);
}
i += gridSize;
}
}
shared_data[tid] = mySum;
__syncthreads();
sum_reduce_impl<T, blockSize>(r_output, shared_data);
}
template <typename T, bool Reduce>
void invoke_mse_loss_kernel(size_t n, const T* output, size_t incx, const T* labels, size_t incy, T* r_output, size_t numThreads, size_t numBlocks) {
int sharedSize = (numThreads <= 32) ? 64 * sizeof(T) : numThreads * sizeof(T);
switch (numThreads) {
case 512:
if (incx == 1 && incy == 1) {
mse_loss_kernel1<T, 512, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_loss_kernel<T, 512, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 256:
if (incx == 1 && incy == 1) {
mse_loss_kernel1<T, 256, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_loss_kernel<T, 256, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 128:
if (incx == 1 && incy == 1) {
mse_loss_kernel1<T, 128, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_loss_kernel<T, 128, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 64:
if (incx == 1 && incy == 1) {
mse_loss_kernel1<T, 64, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_loss_kernel<T, 64, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 32:
if (incx == 1 && incy == 1) {
mse_loss_kernel1<T, 32, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_loss_kernel<T, 32, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 16:
if (incx == 1 && incy == 1) {
mse_loss_kernel1<T, 16, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_loss_kernel<T, 16, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 8:
if (incx == 1 && incy == 1) {
mse_loss_kernel1<T, 8, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_loss_kernel<T, 8, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 4:
if (incx == 1 && incy == 1) {
mse_loss_kernel1<T, 4, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_loss_kernel<T, 4, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 2:
if (incx == 1 && incy == 1) {
mse_loss_kernel1<T, 2, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_loss_kernel<T, 2, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 1:
if (incx == 1 && incy == 1) {
mse_loss_kernel1<T, 1, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_loss_kernel<T, 1, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
}
}
template <typename T, bool Reduce>
void invoke_mse_error_kernel(size_t n, const T* output, size_t incx, const T* labels, size_t incy, T* r_output, size_t numThreads, size_t numBlocks) {
int sharedSize = (numThreads <= 32) ? 64 * sizeof(T) : numThreads * sizeof(T);
switch (numThreads) {
case 512:
if (incx == 1 && incy == 1) {
mse_error_kernel1<T, 512, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_error_kernel<T, 512, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 256:
if (incx == 1 && incy == 1) {
mse_error_kernel1<T, 256, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_error_kernel<T, 256, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 128:
if (incx == 1 && incy == 1) {
mse_error_kernel1<T, 128, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_error_kernel<T, 128, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 64:
if (incx == 1 && incy == 1) {
mse_error_kernel1<T, 64, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_error_kernel<T, 64, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 32:
if (incx == 1 && incy == 1) {
mse_error_kernel1<T, 32, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_error_kernel<T, 32, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 16:
if (incx == 1 && incy == 1) {
mse_error_kernel1<T, 16, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_error_kernel<T, 16, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 8:
if (incx == 1 && incy == 1) {
mse_error_kernel1<T, 8, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_error_kernel<T, 8, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 4:
if (incx == 1 && incy == 1) {
mse_error_kernel1<T, 4, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_error_kernel<T, 4, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 2:
if (incx == 1 && incy == 1) {
mse_error_kernel1<T, 2, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_error_kernel<T, 2, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
case 1:
if (incx == 1 && incy == 1) {
mse_error_kernel1<T, 1, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, labels, r_output);
} else {
mse_error_kernel<T, 1, Reduce><<<numBlocks, numThreads, sharedSize>>>(n, output, incx, labels, incy, r_output);
}
break;
}
}
template <bool Loss, typename T>
T mse_kernel_run(size_t n, const T* output, size_t incx, const T* labels, size_t incy) {
T result = 0;
const size_t cpu_threshold = Loss ? 1 : 1024;
if (Loss && n < cpu_threshold && incx == 1 && incy == 1) {
T* host_output = new T[n];
T* host_labels = new T[n];
cuda_check(cudaMemcpy(host_output, output, n * sizeof(T), cudaMemcpyDeviceToHost));
cuda_check(cudaMemcpy(host_labels, labels, n * sizeof(T), cudaMemcpyDeviceToHost));
for (size_t i = 0; i < n; i++) {
result += (host_labels[i] - host_output[i]) * (host_labels[i] - host_output[i]);
}
delete[] host_output;
delete[] host_labels;
return result;
}
if (!Loss && n < cpu_threshold && incx == 1 && incy == 1) {
T* host_output = new T[n];
T* host_labels = new T[n];
cuda_check(cudaMemcpy(host_output, output, n * sizeof(T), cudaMemcpyDeviceToHost));
cuda_check(cudaMemcpy(host_labels, labels, n * sizeof(T), cudaMemcpyDeviceToHost));
for (size_t i = 0; i < n; i++) {
result += fabsf(host_labels[i] - host_output[i]);
}
delete[] host_output;
delete[] host_labels;
return result;
}
const size_t maxThreads = 512;
const size_t maxBlocks = 64;
// Compute the launch configuration of the kernel
size_t numThreads = n < maxThreads * 2 ? nextPow2((n + 1) / 2) : maxThreads;
size_t numBlocks = std::min((n + numThreads * 2 - 1) / (numThreads * 2), maxBlocks);
// Allocate memory on the device
T* tmp_gpu;
cuda_check(cudaMalloc((void**)&tmp_gpu, numBlocks * sizeof(T)));
// Run the first reduction on GPU
if (Loss) {
invoke_mse_loss_kernel<T, false>(n, output, incx, labels, incy, tmp_gpu, numThreads, numBlocks);
} else {
invoke_mse_error_kernel<T, false>(n, output, incx, labels, incy, tmp_gpu, numThreads, numBlocks);
}
size_t s = numBlocks;
// Run the following reductions on GPU
while(s > cpu_threshold){
// Compute again the configuration of the reduction kernel
numThreads = s < maxThreads * 2 ? nextPow2((s + 1) / 2) : maxThreads;
numBlocks = std::min((s + numThreads * 2 - 1) / (numThreads * 2), maxBlocks);
if (Loss) {
invoke_mse_loss_kernel<T, true>(s, tmp_gpu, 1, tmp_gpu, 1, tmp_gpu, numThreads, numBlocks);
} else {
invoke_mse_error_kernel<T, true>(s, tmp_gpu, 1, tmp_gpu, 1, tmp_gpu, numThreads, numBlocks);
}
s = (s + numThreads * 2 - 1) / (numThreads * 2);
}
if(s > 1){
T* host_data = new T[s];
cuda_check(cudaMemcpy(host_data, tmp_gpu, s * sizeof(T), cudaMemcpyDeviceToHost));
for (size_t i = 0; i < s; i++) {
result += host_data[i];
}
delete[] host_data;
} else {
cuda_check(cudaMemcpy(&result, tmp_gpu, 1 * sizeof(T), cudaMemcpyDeviceToHost));
}
cuda_check(cudaFree(tmp_gpu));
return result;
}
float egblas_mse_sloss(size_t n, float alpha, const float* output, size_t incx, const float* labels, size_t incy) {
return alpha * mse_kernel_run<true>(n, output, incx, labels, incy);
}
double egblas_mse_dloss(size_t n, double alpha, const double* output, size_t incx, const double* labels, size_t incy) {
return alpha * mse_kernel_run<true>(n, output, incx, labels, incy);
}
float egblas_mse_serror(size_t n, float alpha, const float* output, size_t incx, const float* labels, size_t incy) {
return alpha * mse_kernel_run<false>(n, output, incx, labels, incy);
}
double egblas_mse_derror(size_t n, double alpha, const double* output, size_t incx, const double* labels, size_t incy) {
return alpha * mse_kernel_run<false>(n, output, incx, labels, incy);
}
template <typename T>
std::pair<T, T> mse_kernel_both_run(size_t n, const T* output, size_t incx, const T* labels, size_t incy) {
T loss = 0;
T error = 0;
const size_t cpu_threshold = 1024;
if (n < cpu_threshold && incx == 1 && incy == 1) {
T* host_output = new T[n];
T* host_labels = new T[n];
cuda_check(cudaMemcpy(host_output, output, n * sizeof(T), cudaMemcpyDeviceToHost));
cuda_check(cudaMemcpy(host_labels, labels, n * sizeof(T), cudaMemcpyDeviceToHost));
for (size_t i = 0; i < n; i++) {
loss += (host_labels[i] - host_output[i]) * (host_labels[i] - host_output[i]);
error += fabsf(host_labels[i] - host_output[i]);
}
delete[] host_output;
delete[] host_labels;
return std::make_pair(loss, error);
}
const size_t maxThreads = 512;
const size_t maxBlocks = 64;
// Compute the launch configuration of the kernel
size_t numThreads = n < maxThreads * 2 ? nextPow2((n + 1) / 2) : maxThreads;
size_t numBlocks = std::min((n + numThreads * 2 - 1) / (numThreads * 2), maxBlocks);
// Allocate memory on the device
T* tmp_gpu;
cuda_check(cudaMalloc((void**)&tmp_gpu, 2 * numBlocks * sizeof(T)));
T* tmp_loss = tmp_gpu;
T* tmp_error = tmp_gpu + numBlocks;
// Run the first reduction on GPU
invoke_mse_loss_kernel<T, false>(n, output, incx, labels, incy, tmp_loss, numThreads, numBlocks);
invoke_mse_error_kernel<T, false>(n, output, incx, labels, incy, tmp_error, numThreads, numBlocks);
size_t s = numBlocks;
// Run the following reductions on GPU
while(s > cpu_threshold){
// Compute again the configuration of the reduction kernel
numThreads = s < maxThreads * 2 ? nextPow2((s + 1) / 2) : maxThreads;
numBlocks = std::min((s + numThreads * 2 - 1) / (numThreads * 2), maxBlocks);
invoke_mse_loss_kernel<T, true>(s, tmp_loss, 1, tmp_loss, 1, tmp_loss, numThreads, numBlocks);
invoke_mse_error_kernel<T, true>(s, tmp_error, 1, tmp_error, 1, tmp_error, numThreads, numBlocks);
s = (s + numThreads * 2 - 1) / (numThreads * 2);
}
if(s > 1){
T* host_data = new T[2 * numBlocks];
cuda_check(cudaMemcpy(host_data, tmp_gpu, 2 * numBlocks * sizeof(T), cudaMemcpyDeviceToHost));
for (size_t i = 0; i < s; i++) {
loss += host_data[i];
}
for (size_t i = 0; i < s; i++) {
error += host_data[numBlocks + i];
}
delete[] host_data;
} else {
cuda_check(cudaMemcpy(&loss, tmp_loss, 1 * sizeof(T), cudaMemcpyDeviceToHost));
cuda_check(cudaMemcpy(&error, tmp_error, 1 * sizeof(T), cudaMemcpyDeviceToHost));
}
cuda_check(cudaFree(tmp_gpu));
return std::make_pair(loss, error);
}
std::pair<float, float> egblas_smse(size_t n, float alpha, float beta, const float* output, size_t incx, const float* labels, size_t incy) {
auto res = mse_kernel_both_run(n, output, incx, labels, incy);
return std::make_pair(alpha * res.first, beta * res.second);
}
std::pair<double, double> egblas_dmse(size_t n, double alpha, double beta, const double* output, size_t incx, const double* labels, size_t incy) {
auto res = mse_kernel_both_run(n, output, incx, labels, incy);
return std::make_pair(alpha * res.first, beta * res.second);
}
|
91da1ca6dab7a30b6d88cdc8b85120e028b370f3.hip | // !!! This is a file automatically generated by hipify!!!
#define BLOCK_SIZE 64
#define _DEBUG
#include "cutil.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "cuda_memory.h"
#ifdef WIN32
#include "win32time.h"
#else
#include <sys/time.h>
#endif
#include "MirroredArray.h"
#include "hash_table.cu"
#ifdef LIBRARY
extern "C"
#ifdef WIN32
__declspec(dllexport)
#endif
#endif
void initCuda(int argc, char **argv) {
CUT_DEVICE_INIT(argc, argv);
hipDeviceProp_t prop;
CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceProperties(&prop, 0));
printf("Device name: %s\n", prop.name);
printf("Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Max threads dim: %d %d %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Max grid size: %d %d %d \n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("Shared memory per block: %d Kb\n", (int)(prop.sharedMemPerBlock/1024));
printf("Total global memory: %d Kb\n", (int)(prop.totalGlobalMem/1024));
printf("Warp size: %d\n", prop.warpSize);
printf("Memory pitch: %d\n", (int)prop.memPitch);
printf("Registers per block: %d\n", prop.regsPerBlock);
printf("Clock rate: %d\n", prop.clockRate);
printf("Texture alignment: %d\n", (int)prop.textureAlignment);
fflush(stdout);
}
struct MatrixEntry {
int index;
float weight;
};
template<int pd>
__global__ static void createMatrix(const int w, const int h,
const float *positions,
const float *values,
const float *scaleFactor,
MatrixEntry *matrix) {
// scanline order
//const int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// 8x8 blocks
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
const int idx = y*w + x;
const bool outOfBounds = (x >= w) || (y >= h);
float myElevated[pd+1];
const float *myPosition = positions + idx*pd;
int myGreedy[pd+1];
int myRank[pd+1];
float myBarycentric[pd+2];
__shared__ short keys[pd*BLOCK_SIZE];
short *myKey = keys + threadId * pd;
if (!outOfBounds) {
myElevated[pd] = -pd*(myPosition[pd-1])*scaleFactor[pd-1];
for (int i = pd-1; i > 0; i--) {
myElevated[i] = (myElevated[i+1] -
i*(myPosition[i-1])*scaleFactor[i-1] +
(i+2)*(myPosition[i])*scaleFactor[i]);
}
myElevated[0] = myElevated[1] + 2*(myPosition[0])*scaleFactor[0];
// find the closest zero-colored lattice point
// greedily search for the closest zero-colored lattice point
signed short sum = 0;
for (int i = 0; i <= pd; i++) {
float v = myElevated[i]*(1.0f/(pd+1));
float up = ceilf(v) * (pd+1);
float down = floorf(v) * (pd+1);
if (up - myElevated[i] < myElevated[i] - down) {
myGreedy[i] = (signed short)up;
} else {
myGreedy[i] = (signed short)down;
}
sum += myGreedy[i];
}
sum /= pd+1;
// sort differential to find the permutation between this simplex and the canonical one
for (int i = 0; i <= pd; i++) {
myRank[i] = 0;
for (int j = 0; j <= pd; j++) {
if (myElevated[i] - myGreedy[i] < myElevated[j] - myGreedy[j] ||
(myElevated[i] - myGreedy[i] == myElevated[j] - myGreedy[j]
&& i > j)) {
myRank[i]++;
}
}
}
if (sum > 0) { // sum too large, need to bring down the ones with the smallest differential
for (int i = 0; i <= pd; i++) {
if (myRank[i] >= pd + 1 - sum) {
myGreedy[i] -= pd+1;
myRank[i] += sum - (pd+1);
} else {
myRank[i] += sum;
}
}
} else if (sum < 0) { // sum too small, need to bring up the ones with largest differential
for (int i = 0; i <= pd; i++) {
if (myRank[i] < -sum) {
myGreedy[i] += pd+1;
myRank[i] += (pd+1) + sum;
} else {
myRank[i] += sum;
}
}
}
#ifdef LINEAR_D_MEMORY
for (int i = 0; i <= pd; i++) {
table_zeros[idx*(pd+1)+i] = myGreedy[i];
table_rank[idx*(pd+1)+i] = myRank[i];
}
#endif
// turn delta into barycentric coords
for (int i = 0; i <= pd+1; i++) {
myBarycentric[i] = 0;
}
for (int i = 0; i <= pd; i++) {
float delta = (myElevated[i] - myGreedy[i]) * (1.0f/(pd+1));
myBarycentric[pd-myRank[i]] += delta;
myBarycentric[pd+1-myRank[i]] -= delta;
}
myBarycentric[0] += 1.0f + myBarycentric[pd+1];
}
#ifdef USE_ADDITIVE_HASH
unsigned int cumulative_hash = hash<pd>(myGreedy);
#endif
for (int color = 0; color <= pd; color++) {
// Compute the location of the lattice point explicitly (all but
// the last coordinate - it's redundant because they sum to zero)
if (!outOfBounds) {
for (int i = 0; i < pd; i++) {
myKey[i] = myGreedy[i] + color;
if (myRank[i] > pd-color) myKey[i] -= (pd+1);
}
}
#ifdef USE_ADDITIVE_HASH
for (int i = 0; i < pd; i++) {
if (myRank[i] == pd-color) cumulative_hash += hOffset[i];
}
#endif
if (!outOfBounds) {
MatrixEntry r;
#ifdef USE_ADDITIVE_HASH
r.index = hashTableInsert<pd>(cumulative_hash, myKey, idx*(pd+1)+color);
#else
r.index = hashTableInsert<pd>(myKey, idx*(pd+1)+color);
#endif
r.weight = myBarycentric[color];
matrix[idx*(pd+1) + color] = r;
}
}
}
template<int kd>
__global__ static void cleanHashTable(int n, MatrixEntry *matrix) {
const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
if (idx >= n) return;
// find my hash table entry
int *e = table_entries + idx;
// Check if I created my own key in the previous phase
if (*e >= 0) {
// Rehash my key and reset the pointer in order to merge with
// any other pixel that created a different entry under the
// same key. If the computation was serial this would never
// happen, but sometimes race conditions can make the same key
// be inserted twice. hashTableRetrieve always returns the
// earlier, so it's no problem as long as we rehash now.
#ifdef LINEAR_D_MEMORY
// Get my key
short myKey[kd];
generateKey<kd>(*e, myKey);
*e = hashTableRetrieve<kd>(myKey);
#else
*e = hashTableRetrieve<kd>(table_keys + *e*kd);
#endif
}
}
template<int pd, int vd>
__global__ static void splat(const int w, const int h, float *values, MatrixEntry *matrix) {
//const int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// 8x8 blocks
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + (blockIdx.y/(pd+1)) * blockDim.y;
//const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
const int color = blockIdx.y % (pd+1);
const int idx = y*w + x;
const bool outOfBounds = (x >= w) || (y >= h);
if (outOfBounds) return;
float *myValue = values + idx*vd;
MatrixEntry r = matrix[idx*(pd+1)+color];
matrix[idx*(pd+1)+color].index = r.index = table_entries[r.index];
float *val = table_values + r.index*(vd+1);
for (int j = 0; j < vd; j++) {
atomicAdd(val+j, myValue[j]*r.weight);
}
atomicAdd(val+vd, r.weight);
}
template<int pd, int vd>
__global__ static void splatCache(const int w, const int h, float *values, MatrixEntry *matrix) {
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + (blockIdx.y/(pd+1)) * blockDim.y;
const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
const int color = blockIdx.y % (pd+1);
const int idx = y*w + x;
const bool outOfBounds = (x >= w) || (y >= h);
__shared__ int sharedOffsets[BLOCK_SIZE];
__shared__ float sharedValues[BLOCK_SIZE*(vd+1)];
int myOffset = -1;
float *myValue = sharedValues + threadId*(vd+1);
if (!outOfBounds) {
float *value = values + idx*vd;
MatrixEntry r = matrix[idx*(pd+1)+color];
// convert the matrix entry from a pointer into the entries array to a pointer into the keys/values array
matrix[idx*(pd+1)+color].index = r.index = table_entries[r.index];
// record the offset into the keys/values array in shared space
myOffset = sharedOffsets[threadId] = r.index*(vd+1);
for (int j = 0; j < vd; j++) {
myValue[j] = value[j]*r.weight;
}
myValue[vd] = r.weight;
} else {
sharedOffsets[threadId] = -1;
}
__syncthreads();
// am I the first thread in this block to care about this key?
if (outOfBounds) return;
for (int i = 0; i < BLOCK_SIZE; i++) {
if (i < threadId) {
if (myOffset == sharedOffsets[i]) {
// somebody else with higher priority cares about this key
return;
}
} else if (i > threadId) {
if (myOffset == sharedOffsets[i]) {
// someone else with lower priority cares about this key, accumulate it into mine
for (int j = 0; j <= vd; j++) {
sharedValues[threadId*(vd+1) + j] += sharedValues[i*(vd+1) + j];
}
}
}
}
// only the threads with something to write to main memory are still going
float *val = table_values + myOffset;
for (int j = 0; j <= vd; j++) {
atomicAdd(val+j, myValue[j]);
}
}
template<int pd, int vd>
__global__ static void blur(int n, float *newValues, MatrixEntry *matrix, int color) {
const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
if (idx >= n) return;
// Check if I'm valid
if (matrix[idx].index != idx) return;
// find my key and the keys of my neighbours
short myKey[pd+1];
short np[pd+1];
short nm[pd+1];
#ifdef LINEAR_D_MEMORY
generateKey<pd>(idx, myKey);
for (int i = 0; i < pd; i++) {
np[i] = myKey[i]+1;
nm[i] = myKey[i]-1;
}
#else
for (int i = 0; i < pd; i++) {
myKey[i] = table_keys[idx*pd+i];
np[i] = myKey[i]+1;
nm[i] = myKey[i]-1;
}
#endif
np[color] -= pd+1;
nm[color] += pd+1;
#ifdef USE_ADDITIVE_HASH
unsigned int hCurrent = hash<pd>(myKey);
int offNp = hashTableRetrieveWithHash<pd>(hCurrent+hOffset[color],np);
int offNm = hashTableRetrieveWithHash<pd>(hCurrent-hOffset[color],nm);
#else
int offNp = hashTableRetrieve<pd>(np);
int offNm = hashTableRetrieve<pd>(nm);
#endif
float *valMe = table_values + (vd+1)*idx;
float *valNp = table_values + (vd+1)*offNp;
float *valNm = table_values + (vd+1)*offNm;
float *valOut = newValues + (vd+1)*idx;
if (offNp >= 0 && offNm >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNp[i] + (valMe[i]*2) + valNm[i])/4;
}
} else if (offNp >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNp[i] + (valMe[i]*2))/4;
}
} else if (offNm >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNm[i] + (valMe[i]*2))/4;
}
} else {
for (int i = 0; i <= vd; i++) {
valOut[i] = valMe[i]*2;
}
}
}
template<int pd, int vd>
__global__ static void slice(const int w, const int h, float *values, MatrixEntry *matrix) {
//const int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
const int idx = y*w + x;
const bool outOfBounds = (x >= w) || (y >= h);
if (outOfBounds) return;
__shared__ float localValue[BLOCK_SIZE*vd];
float *myValue = localValue + threadId*vd;
float myWeight = 0;
for (int i = 0; i < vd; i++) {
myValue[i] = 0;
}
for (int i = 0; i <= pd; i++) {
MatrixEntry r = matrix[idx*(pd+1) + i];
float *val = table_values + r.index*(vd+1);
for (int j = 0; j < vd; j++) {
myValue[j] += r.weight*val[j];
}
myWeight += r.weight*val[vd];
}
myWeight = 1.0f/myWeight;
for (int j = 0; j < vd; j++)
values[idx*vd + j] = myValue[j]*myWeight;
}
template<int vd, int pd>
void filter_(float *im, float *ref, int w, int h, bool accurate) {
int n = w*h;
float blurVariance = accurate ? 0.5 : 0;
MirroredArray<float> scaleFactor(pd);
//MirroredArray<float> offset(pd);
for (int i = 0; i < pd; i++) {
scaleFactor.host[i] = (pd+1)*sqrtf((1.0/6 + blurVariance)/((i+1)*(i+2)));
//offset.host[i] = ((double)rand()/RAND_MAX)*(pd+1)*2;
}
scaleFactor.hostToDevice();
//offset.hostToDevice();
MirroredArray<float> values(im, n*vd);
MirroredArray<float> positions(ref, n*pd);
MirroredArray<MatrixEntry> matrix(n*(pd+1));
createHashTable<pd, vd+1>(n*(pd+1));
// Populate constant memory for hash helpers
unsigned long long int __host_two32 = ((unsigned long long int)1)<<32;
unsigned int __host_div_c = 2*(n*(pd+1));
unsigned int __host_div_l = ceilf(logf((float)__host_div_c) / logf(2.0f));
unsigned int __host_div_m = (__host_two32<<__host_div_l)/__host_div_c - __host_two32 + 1;
CUDA_SAFE_CALL(hipMemcpyToSymbol((char*)&__div_c, &__host_div_c, sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol((char*)&__div_l, &__host_div_l, sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol((char*)&__div_m, &__host_div_m, sizeof(unsigned int)));
// Populate constant memory with hash of offset vectors
unsigned int hOffset_host[pd+1];
signed short offset[pd+1];
for (int i = 0; i < pd; offset[i] = 1, i++);
for (int i = 0; i <= pd; i++) {
offset[i] -= pd+1; hOffset_host[i] = hash<pd>(offset); offset[i] += pd+1;
}
CUDA_SAFE_CALL(hipMemcpyToSymbol((char*)&hOffset, &hOffset_host, sizeof(unsigned int)*(pd+1)));
dim3 blocks((w-1)/8+1, (h-1)/8+1, 1);
dim3 blockSize(8, 8, 1);
timeval t[7];
gettimeofday(t+0, NULL);
hipLaunchKernelGGL(( createMatrix<pd>), dim3(blocks), dim3(blockSize), 0, 0, w, h, positions.device,
values.device,
scaleFactor.device,
matrix.device);
CUT_CHECK_ERROR("Matrix creation failed\n");
gettimeofday(t+1, NULL);
//HashTable hostTable;
//int hashTableEntries;
//CUDA_SAFE_CALL(hipMemcpy(&hostTable, table, sizeof(HashTable), hipMemcpyDeviceToHost));
//CUDA_SAFE_CALL(hipMemcpy(&hashTableEntries, hostTable_filled, sizeof(int), hipMemcpyDeviceToHost));
//printf("Hash table has %d entries\n", hashTableEntries);
// fix duplicate hash table entries
int cleanBlockSize = 32;
dim3 cleanBlocks((n-1)/cleanBlockSize+1, 2*(pd+1), 1);
hipLaunchKernelGGL(( cleanHashTable<pd>), dim3(cleanBlocks), dim3(cleanBlockSize), 0, 0, 2*n*(pd+1), matrix.device);
CUT_CHECK_ERROR("clean failed\n");
gettimeofday(t+2, NULL);
// splat splits by color, so extend the y coordinate to our blocks to represent that
blocks.y *= pd+1;
hipLaunchKernelGGL(( splatCache<pd, vd>), dim3(blocks), dim3(blockSize), 0, 0, w, h, values.device, matrix.device);
//splat<pd, vd><<<blocks, blockSize>>>(w, h, values.device, matrix.device);
CUT_CHECK_ERROR("splat failed\n");
gettimeofday(t+3, NULL);
if (accurate) {
float *newValues;
allocateCudaMemory((void**)&(newValues), n*(pd+1)*(vd+1)*sizeof(float));
CUDA_SAFE_CALL(hipMemset((void *)newValues, 0, n*(pd+1)*(vd+1)*sizeof(float)));
for (int color = 0; color <= pd; color++) {
hipLaunchKernelGGL(( blur<pd, vd>), dim3(cleanBlocks), dim3(cleanBlockSize), 0, 0, n*(pd+1), newValues, matrix.device, color);
CUT_CHECK_ERROR("blur failed\n");
newValues = swapHashTableValues(newValues);
}
}
gettimeofday(t+4, NULL);
blocks.y /= (pd+1);
hipLaunchKernelGGL(( slice<pd, vd>), dim3(blocks), dim3(blockSize), 0, 0, w, h, values.device, matrix.device);
CUT_CHECK_ERROR("slice failed\n");
gettimeofday(t+5, NULL);
double total = (t[5].tv_sec - t[0].tv_sec)*1000.0 + (t[5].tv_usec - t[0].tv_usec)/1000.0;
printf("Total time: %3.3f ms\n", total);
char *names[5] = {"Create",
"Clean ",
"Splat ",
"Blur ",
"Slice "};
for (int i = 1; i < 6; i++) {
printf("%s: %3.3f ms\n", names[i-1], (t[i].tv_sec - t[i-1].tv_sec)*1000.0 + (t[i].tv_usec - t[i-1].tv_usec)/1000.0);
}
printf("Total GPU memory usage: %u bytes\n", (unsigned int)GPU_MEMORY_ALLOCATION);
values.deviceToHost();
destroyHashTable();
}
#ifdef LIBRARY
extern "C"
#ifdef WIN32
__declspec(dllexport)
#endif
#endif
void filter(float *im, float *ref, int pd, int vd, int w, int h, bool accurate) {
switch (vd*1000 + pd) {
case 1001: filter_<1, 1>(im, ref, w, h, accurate); break;
case 2001: filter_<2, 1>(im, ref, w, h, accurate); break;
case 3001: filter_<3, 1>(im, ref, w, h, accurate); break;
case 1002: filter_<1, 2>(im, ref, w, h, accurate); break;
case 2002: filter_<2, 2>(im, ref, w, h, accurate); break;
case 3002: filter_<3, 2>(im, ref, w, h, accurate); break;
case 1003: filter_<1, 3>(im, ref, w, h, accurate); break;
case 2003: filter_<2, 3>(im, ref, w, h, accurate); break;
case 3003: filter_<3, 3>(im, ref, w, h, accurate); break;
case 1004: filter_<1, 4>(im, ref, w, h, accurate); break;
case 2004: filter_<2, 4>(im, ref, w, h, accurate); break;
case 3004: filter_<3, 4>(im, ref, w, h, accurate); break;
case 1005: filter_<1, 5>(im, ref, w, h, accurate); break;
case 2005: filter_<2, 5>(im, ref, w, h, accurate); break;
case 3005: filter_<3, 5>(im, ref, w, h, accurate); break;
case 1006: filter_<1, 6>(im, ref, w, h, accurate); break;
case 2006: filter_<2, 6>(im, ref, w, h, accurate); break;
case 3006: filter_<3, 6>(im, ref, w, h, accurate); break;
case 1007: filter_<1, 7>(im, ref, w, h, accurate); break;
case 2007: filter_<2, 7>(im, ref, w, h, accurate); break;
case 3007: filter_<3, 7>(im, ref, w, h, accurate); break;
case 1008: filter_<1, 8>(im, ref, w, h, accurate); break;
case 2008: filter_<2, 8>(im, ref, w, h, accurate); break;
case 3008: filter_<3, 8>(im, ref, w, h, accurate); break;
case 1009: filter_<1, 9>(im, ref, w, h, accurate); break;
case 2009: filter_<2, 9>(im, ref, w, h, accurate); break;
case 3009: filter_<3, 9>(im, ref, w, h, accurate); break;
case 1010: filter_<1, 10>(im, ref, w, h, accurate); break;
case 2010: filter_<2, 10>(im, ref, w, h, accurate); break;
case 3010: filter_<3, 10>(im, ref, w, h, accurate); break;
case 1011: filter_<1, 11>(im, ref, w, h, accurate); break;
case 2011: filter_<2, 11>(im, ref, w, h, accurate); break;
case 3011: filter_<3, 11>(im, ref, w, h, accurate); break;
case 1012: filter_<1, 12>(im, ref, w, h, accurate); break;
case 2012: filter_<2, 12>(im, ref, w, h, accurate); break;
case 3012: filter_<3, 12>(im, ref, w, h, accurate); break;
case 1013: filter_<1, 13>(im, ref, w, h, accurate); break;
case 2013: filter_<2, 13>(im, ref, w, h, accurate); break;
case 3013: filter_<3, 13>(im, ref, w, h, accurate); break;
case 1014: filter_<1, 14>(im, ref, w, h, accurate); break;
case 2014: filter_<2, 14>(im, ref, w, h, accurate); break;
case 3014: filter_<3, 14>(im, ref, w, h, accurate); break;
case 1015: filter_<1, 15>(im, ref, w, h, accurate); break;
case 2015: filter_<2, 15>(im, ref, w, h, accurate); break;
case 3015: filter_<3, 15>(im, ref, w, h, accurate); break;
case 1016: filter_<1, 16>(im, ref, w, h, accurate); break;
case 2016: filter_<2, 16>(im, ref, w, h, accurate); break;
case 3016: filter_<3, 16>(im, ref, w, h, accurate); break;
default:
printf("Unsupported channel counts. Reference image must have 1 to 16 channels, input image must have 1 to 3 channels\n");
}
}
// Below here is a program for testing it on the command line
#ifndef LIBRARY
struct header {
int frames, width, height, channels, type;
};
void loadTMP(const char *filename, float **data, header *h) {
FILE *f = fopen(filename, "rb");
fread(h, sizeof(header), 1, f);
size_t size = h->frames*h->width*h->channels*h->height;
*data = new float[size];
fread(*data, sizeof(float), size, f);
fclose(f);
}
void saveTMP(const char *filename, float *data, header h) {
FILE *f = fopen(filename, "wb");
fwrite(&h, sizeof(header), 1, f);
size_t size = h.frames*h.width*h.channels*h.height;
fwrite(data, sizeof(float), size, f);
fclose(f);
}
int main(int argc, char **argv) {
initCuda(1, argv);
if (argc < 4) {
printf("Usage: permutohedral input.tmp ref.tmp output.tmp {accurate}\n");
return 1;
}
bool accurate = argc == 5;
srand(time(NULL));
float *im, *ref;
header imHeader, refHeader;
loadTMP(argv[1], &im, &imHeader);
loadTMP(argv[2], &ref, &refHeader);
filter(im, ref, refHeader.channels, imHeader.channels, imHeader.width, imHeader.height, accurate);
saveTMP(argv[3], im, imHeader);
return 0;
}
#endif
| 91da1ca6dab7a30b6d88cdc8b85120e028b370f3.cu |
#define BLOCK_SIZE 64
#define _DEBUG
#include "cutil.h"
#include <cuda_runtime.h>
#include <stdio.h>
#include "cuda_memory.h"
#ifdef WIN32
#include "win32time.h"
#else
#include <sys/time.h>
#endif
#include "MirroredArray.h"
#include "hash_table.cu"
#ifdef LIBRARY
extern "C"
#ifdef WIN32
__declspec(dllexport)
#endif
#endif
void initCuda(int argc, char **argv) {
CUT_DEVICE_INIT(argc, argv);
cudaDeviceProp prop;
CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceProperties(&prop, 0));
printf("Device name: %s\n", prop.name);
printf("Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Max threads dim: %d %d %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Max grid size: %d %d %d \n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("Shared memory per block: %d Kb\n", (int)(prop.sharedMemPerBlock/1024));
printf("Total global memory: %d Kb\n", (int)(prop.totalGlobalMem/1024));
printf("Warp size: %d\n", prop.warpSize);
printf("Memory pitch: %d\n", (int)prop.memPitch);
printf("Registers per block: %d\n", prop.regsPerBlock);
printf("Clock rate: %d\n", prop.clockRate);
printf("Texture alignment: %d\n", (int)prop.textureAlignment);
fflush(stdout);
}
struct MatrixEntry {
int index;
float weight;
};
template<int pd>
__global__ static void createMatrix(const int w, const int h,
const float *positions,
const float *values,
const float *scaleFactor,
MatrixEntry *matrix) {
// scanline order
//const int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// 8x8 blocks
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
const int idx = y*w + x;
const bool outOfBounds = (x >= w) || (y >= h);
float myElevated[pd+1];
const float *myPosition = positions + idx*pd;
int myGreedy[pd+1];
int myRank[pd+1];
float myBarycentric[pd+2];
__shared__ short keys[pd*BLOCK_SIZE];
short *myKey = keys + threadId * pd;
if (!outOfBounds) {
myElevated[pd] = -pd*(myPosition[pd-1])*scaleFactor[pd-1];
for (int i = pd-1; i > 0; i--) {
myElevated[i] = (myElevated[i+1] -
i*(myPosition[i-1])*scaleFactor[i-1] +
(i+2)*(myPosition[i])*scaleFactor[i]);
}
myElevated[0] = myElevated[1] + 2*(myPosition[0])*scaleFactor[0];
// find the closest zero-colored lattice point
// greedily search for the closest zero-colored lattice point
signed short sum = 0;
for (int i = 0; i <= pd; i++) {
float v = myElevated[i]*(1.0f/(pd+1));
float up = ceilf(v) * (pd+1);
float down = floorf(v) * (pd+1);
if (up - myElevated[i] < myElevated[i] - down) {
myGreedy[i] = (signed short)up;
} else {
myGreedy[i] = (signed short)down;
}
sum += myGreedy[i];
}
sum /= pd+1;
// sort differential to find the permutation between this simplex and the canonical one
for (int i = 0; i <= pd; i++) {
myRank[i] = 0;
for (int j = 0; j <= pd; j++) {
if (myElevated[i] - myGreedy[i] < myElevated[j] - myGreedy[j] ||
(myElevated[i] - myGreedy[i] == myElevated[j] - myGreedy[j]
&& i > j)) {
myRank[i]++;
}
}
}
if (sum > 0) { // sum too large, need to bring down the ones with the smallest differential
for (int i = 0; i <= pd; i++) {
if (myRank[i] >= pd + 1 - sum) {
myGreedy[i] -= pd+1;
myRank[i] += sum - (pd+1);
} else {
myRank[i] += sum;
}
}
} else if (sum < 0) { // sum too small, need to bring up the ones with largest differential
for (int i = 0; i <= pd; i++) {
if (myRank[i] < -sum) {
myGreedy[i] += pd+1;
myRank[i] += (pd+1) + sum;
} else {
myRank[i] += sum;
}
}
}
#ifdef LINEAR_D_MEMORY
for (int i = 0; i <= pd; i++) {
table_zeros[idx*(pd+1)+i] = myGreedy[i];
table_rank[idx*(pd+1)+i] = myRank[i];
}
#endif
// turn delta into barycentric coords
for (int i = 0; i <= pd+1; i++) {
myBarycentric[i] = 0;
}
for (int i = 0; i <= pd; i++) {
float delta = (myElevated[i] - myGreedy[i]) * (1.0f/(pd+1));
myBarycentric[pd-myRank[i]] += delta;
myBarycentric[pd+1-myRank[i]] -= delta;
}
myBarycentric[0] += 1.0f + myBarycentric[pd+1];
}
#ifdef USE_ADDITIVE_HASH
unsigned int cumulative_hash = hash<pd>(myGreedy);
#endif
for (int color = 0; color <= pd; color++) {
// Compute the location of the lattice point explicitly (all but
// the last coordinate - it's redundant because they sum to zero)
if (!outOfBounds) {
for (int i = 0; i < pd; i++) {
myKey[i] = myGreedy[i] + color;
if (myRank[i] > pd-color) myKey[i] -= (pd+1);
}
}
#ifdef USE_ADDITIVE_HASH
for (int i = 0; i < pd; i++) {
if (myRank[i] == pd-color) cumulative_hash += hOffset[i];
}
#endif
if (!outOfBounds) {
MatrixEntry r;
#ifdef USE_ADDITIVE_HASH
r.index = hashTableInsert<pd>(cumulative_hash, myKey, idx*(pd+1)+color);
#else
r.index = hashTableInsert<pd>(myKey, idx*(pd+1)+color);
#endif
r.weight = myBarycentric[color];
matrix[idx*(pd+1) + color] = r;
}
}
}
template<int kd>
__global__ static void cleanHashTable(int n, MatrixEntry *matrix) {
const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
if (idx >= n) return;
// find my hash table entry
int *e = table_entries + idx;
// Check if I created my own key in the previous phase
if (*e >= 0) {
// Rehash my key and reset the pointer in order to merge with
// any other pixel that created a different entry under the
// same key. If the computation was serial this would never
// happen, but sometimes race conditions can make the same key
// be inserted twice. hashTableRetrieve always returns the
// earlier, so it's no problem as long as we rehash now.
#ifdef LINEAR_D_MEMORY
// Get my key
short myKey[kd];
generateKey<kd>(*e, myKey);
*e = hashTableRetrieve<kd>(myKey);
#else
*e = hashTableRetrieve<kd>(table_keys + *e*kd);
#endif
}
}
template<int pd, int vd>
__global__ static void splat(const int w, const int h, float *values, MatrixEntry *matrix) {
//const int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// 8x8 blocks
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + (blockIdx.y/(pd+1)) * blockDim.y;
//const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
const int color = blockIdx.y % (pd+1);
const int idx = y*w + x;
const bool outOfBounds = (x >= w) || (y >= h);
if (outOfBounds) return;
float *myValue = values + idx*vd;
MatrixEntry r = matrix[idx*(pd+1)+color];
matrix[idx*(pd+1)+color].index = r.index = table_entries[r.index];
float *val = table_values + r.index*(vd+1);
for (int j = 0; j < vd; j++) {
atomicAdd(val+j, myValue[j]*r.weight);
}
atomicAdd(val+vd, r.weight);
}
template<int pd, int vd>
__global__ static void splatCache(const int w, const int h, float *values, MatrixEntry *matrix) {
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + (blockIdx.y/(pd+1)) * blockDim.y;
const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
const int color = blockIdx.y % (pd+1);
const int idx = y*w + x;
const bool outOfBounds = (x >= w) || (y >= h);
__shared__ int sharedOffsets[BLOCK_SIZE];
__shared__ float sharedValues[BLOCK_SIZE*(vd+1)];
int myOffset = -1;
float *myValue = sharedValues + threadId*(vd+1);
if (!outOfBounds) {
float *value = values + idx*vd;
MatrixEntry r = matrix[idx*(pd+1)+color];
// convert the matrix entry from a pointer into the entries array to a pointer into the keys/values array
matrix[idx*(pd+1)+color].index = r.index = table_entries[r.index];
// record the offset into the keys/values array in shared space
myOffset = sharedOffsets[threadId] = r.index*(vd+1);
for (int j = 0; j < vd; j++) {
myValue[j] = value[j]*r.weight;
}
myValue[vd] = r.weight;
} else {
sharedOffsets[threadId] = -1;
}
__syncthreads();
// am I the first thread in this block to care about this key?
if (outOfBounds) return;
for (int i = 0; i < BLOCK_SIZE; i++) {
if (i < threadId) {
if (myOffset == sharedOffsets[i]) {
// somebody else with higher priority cares about this key
return;
}
} else if (i > threadId) {
if (myOffset == sharedOffsets[i]) {
// someone else with lower priority cares about this key, accumulate it into mine
for (int j = 0; j <= vd; j++) {
sharedValues[threadId*(vd+1) + j] += sharedValues[i*(vd+1) + j];
}
}
}
}
// only the threads with something to write to main memory are still going
float *val = table_values + myOffset;
for (int j = 0; j <= vd; j++) {
atomicAdd(val+j, myValue[j]);
}
}
template<int pd, int vd>
__global__ static void blur(int n, float *newValues, MatrixEntry *matrix, int color) {
const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
if (idx >= n) return;
// Check if I'm valid
if (matrix[idx].index != idx) return;
// find my key and the keys of my neighbours
short myKey[pd+1];
short np[pd+1];
short nm[pd+1];
#ifdef LINEAR_D_MEMORY
generateKey<pd>(idx, myKey);
for (int i = 0; i < pd; i++) {
np[i] = myKey[i]+1;
nm[i] = myKey[i]-1;
}
#else
for (int i = 0; i < pd; i++) {
myKey[i] = table_keys[idx*pd+i];
np[i] = myKey[i]+1;
nm[i] = myKey[i]-1;
}
#endif
np[color] -= pd+1;
nm[color] += pd+1;
#ifdef USE_ADDITIVE_HASH
unsigned int hCurrent = hash<pd>(myKey);
int offNp = hashTableRetrieveWithHash<pd>(hCurrent+hOffset[color],np);
int offNm = hashTableRetrieveWithHash<pd>(hCurrent-hOffset[color],nm);
#else
int offNp = hashTableRetrieve<pd>(np);
int offNm = hashTableRetrieve<pd>(nm);
#endif
float *valMe = table_values + (vd+1)*idx;
float *valNp = table_values + (vd+1)*offNp;
float *valNm = table_values + (vd+1)*offNm;
float *valOut = newValues + (vd+1)*idx;
if (offNp >= 0 && offNm >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNp[i] + (valMe[i]*2) + valNm[i])/4;
}
} else if (offNp >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNp[i] + (valMe[i]*2))/4;
}
} else if (offNm >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNm[i] + (valMe[i]*2))/4;
}
} else {
for (int i = 0; i <= vd; i++) {
valOut[i] = valMe[i]*2;
}
}
}
template<int pd, int vd>
__global__ static void slice(const int w, const int h, float *values, MatrixEntry *matrix) {
//const int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
const int idx = y*w + x;
const bool outOfBounds = (x >= w) || (y >= h);
if (outOfBounds) return;
__shared__ float localValue[BLOCK_SIZE*vd];
float *myValue = localValue + threadId*vd;
float myWeight = 0;
for (int i = 0; i < vd; i++) {
myValue[i] = 0;
}
for (int i = 0; i <= pd; i++) {
MatrixEntry r = matrix[idx*(pd+1) + i];
float *val = table_values + r.index*(vd+1);
for (int j = 0; j < vd; j++) {
myValue[j] += r.weight*val[j];
}
myWeight += r.weight*val[vd];
}
myWeight = 1.0f/myWeight;
for (int j = 0; j < vd; j++)
values[idx*vd + j] = myValue[j]*myWeight;
}
template<int vd, int pd>
void filter_(float *im, float *ref, int w, int h, bool accurate) {
int n = w*h;
float blurVariance = accurate ? 0.5 : 0;
MirroredArray<float> scaleFactor(pd);
//MirroredArray<float> offset(pd);
for (int i = 0; i < pd; i++) {
scaleFactor.host[i] = (pd+1)*sqrtf((1.0/6 + blurVariance)/((i+1)*(i+2)));
//offset.host[i] = ((double)rand()/RAND_MAX)*(pd+1)*2;
}
scaleFactor.hostToDevice();
//offset.hostToDevice();
MirroredArray<float> values(im, n*vd);
MirroredArray<float> positions(ref, n*pd);
MirroredArray<MatrixEntry> matrix(n*(pd+1));
createHashTable<pd, vd+1>(n*(pd+1));
// Populate constant memory for hash helpers
unsigned long long int __host_two32 = ((unsigned long long int)1)<<32;
unsigned int __host_div_c = 2*(n*(pd+1));
unsigned int __host_div_l = ceilf(logf((float)__host_div_c) / logf(2.0f));
unsigned int __host_div_m = (__host_two32<<__host_div_l)/__host_div_c - __host_two32 + 1;
CUDA_SAFE_CALL(cudaMemcpyToSymbol((char*)&__div_c, &__host_div_c, sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol((char*)&__div_l, &__host_div_l, sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol((char*)&__div_m, &__host_div_m, sizeof(unsigned int)));
// Populate constant memory with hash of offset vectors
unsigned int hOffset_host[pd+1];
signed short offset[pd+1];
for (int i = 0; i < pd; offset[i] = 1, i++);
for (int i = 0; i <= pd; i++) {
offset[i] -= pd+1; hOffset_host[i] = hash<pd>(offset); offset[i] += pd+1;
}
CUDA_SAFE_CALL(cudaMemcpyToSymbol((char*)&hOffset, &hOffset_host, sizeof(unsigned int)*(pd+1)));
dim3 blocks((w-1)/8+1, (h-1)/8+1, 1);
dim3 blockSize(8, 8, 1);
timeval t[7];
gettimeofday(t+0, NULL);
createMatrix<pd><<<blocks, blockSize>>>(w, h, positions.device,
values.device,
scaleFactor.device,
matrix.device);
CUT_CHECK_ERROR("Matrix creation failed\n");
gettimeofday(t+1, NULL);
//HashTable hostTable;
//int hashTableEntries;
//CUDA_SAFE_CALL(cudaMemcpy(&hostTable, table, sizeof(HashTable), cudaMemcpyDeviceToHost));
//CUDA_SAFE_CALL(cudaMemcpy(&hashTableEntries, hostTable_filled, sizeof(int), cudaMemcpyDeviceToHost));
//printf("Hash table has %d entries\n", hashTableEntries);
// fix duplicate hash table entries
int cleanBlockSize = 32;
dim3 cleanBlocks((n-1)/cleanBlockSize+1, 2*(pd+1), 1);
cleanHashTable<pd><<<cleanBlocks, cleanBlockSize>>>(2*n*(pd+1), matrix.device);
CUT_CHECK_ERROR("clean failed\n");
gettimeofday(t+2, NULL);
// splat splits by color, so extend the y coordinate to our blocks to represent that
blocks.y *= pd+1;
splatCache<pd, vd><<<blocks, blockSize>>>(w, h, values.device, matrix.device);
//splat<pd, vd><<<blocks, blockSize>>>(w, h, values.device, matrix.device);
CUT_CHECK_ERROR("splat failed\n");
gettimeofday(t+3, NULL);
if (accurate) {
float *newValues;
allocateCudaMemory((void**)&(newValues), n*(pd+1)*(vd+1)*sizeof(float));
CUDA_SAFE_CALL(cudaMemset((void *)newValues, 0, n*(pd+1)*(vd+1)*sizeof(float)));
for (int color = 0; color <= pd; color++) {
blur<pd, vd><<<cleanBlocks, cleanBlockSize>>>(n*(pd+1), newValues, matrix.device, color);
CUT_CHECK_ERROR("blur failed\n");
newValues = swapHashTableValues(newValues);
}
}
gettimeofday(t+4, NULL);
blocks.y /= (pd+1);
slice<pd, vd><<<blocks, blockSize>>>(w, h, values.device, matrix.device);
CUT_CHECK_ERROR("slice failed\n");
gettimeofday(t+5, NULL);
double total = (t[5].tv_sec - t[0].tv_sec)*1000.0 + (t[5].tv_usec - t[0].tv_usec)/1000.0;
printf("Total time: %3.3f ms\n", total);
char *names[5] = {"Create",
"Clean ",
"Splat ",
"Blur ",
"Slice "};
for (int i = 1; i < 6; i++) {
printf("%s: %3.3f ms\n", names[i-1], (t[i].tv_sec - t[i-1].tv_sec)*1000.0 + (t[i].tv_usec - t[i-1].tv_usec)/1000.0);
}
printf("Total GPU memory usage: %u bytes\n", (unsigned int)GPU_MEMORY_ALLOCATION);
values.deviceToHost();
destroyHashTable();
}
#ifdef LIBRARY
extern "C"
#ifdef WIN32
__declspec(dllexport)
#endif
#endif
void filter(float *im, float *ref, int pd, int vd, int w, int h, bool accurate) {
switch (vd*1000 + pd) {
case 1001: filter_<1, 1>(im, ref, w, h, accurate); break;
case 2001: filter_<2, 1>(im, ref, w, h, accurate); break;
case 3001: filter_<3, 1>(im, ref, w, h, accurate); break;
case 1002: filter_<1, 2>(im, ref, w, h, accurate); break;
case 2002: filter_<2, 2>(im, ref, w, h, accurate); break;
case 3002: filter_<3, 2>(im, ref, w, h, accurate); break;
case 1003: filter_<1, 3>(im, ref, w, h, accurate); break;
case 2003: filter_<2, 3>(im, ref, w, h, accurate); break;
case 3003: filter_<3, 3>(im, ref, w, h, accurate); break;
case 1004: filter_<1, 4>(im, ref, w, h, accurate); break;
case 2004: filter_<2, 4>(im, ref, w, h, accurate); break;
case 3004: filter_<3, 4>(im, ref, w, h, accurate); break;
case 1005: filter_<1, 5>(im, ref, w, h, accurate); break;
case 2005: filter_<2, 5>(im, ref, w, h, accurate); break;
case 3005: filter_<3, 5>(im, ref, w, h, accurate); break;
case 1006: filter_<1, 6>(im, ref, w, h, accurate); break;
case 2006: filter_<2, 6>(im, ref, w, h, accurate); break;
case 3006: filter_<3, 6>(im, ref, w, h, accurate); break;
case 1007: filter_<1, 7>(im, ref, w, h, accurate); break;
case 2007: filter_<2, 7>(im, ref, w, h, accurate); break;
case 3007: filter_<3, 7>(im, ref, w, h, accurate); break;
case 1008: filter_<1, 8>(im, ref, w, h, accurate); break;
case 2008: filter_<2, 8>(im, ref, w, h, accurate); break;
case 3008: filter_<3, 8>(im, ref, w, h, accurate); break;
case 1009: filter_<1, 9>(im, ref, w, h, accurate); break;
case 2009: filter_<2, 9>(im, ref, w, h, accurate); break;
case 3009: filter_<3, 9>(im, ref, w, h, accurate); break;
case 1010: filter_<1, 10>(im, ref, w, h, accurate); break;
case 2010: filter_<2, 10>(im, ref, w, h, accurate); break;
case 3010: filter_<3, 10>(im, ref, w, h, accurate); break;
case 1011: filter_<1, 11>(im, ref, w, h, accurate); break;
case 2011: filter_<2, 11>(im, ref, w, h, accurate); break;
case 3011: filter_<3, 11>(im, ref, w, h, accurate); break;
case 1012: filter_<1, 12>(im, ref, w, h, accurate); break;
case 2012: filter_<2, 12>(im, ref, w, h, accurate); break;
case 3012: filter_<3, 12>(im, ref, w, h, accurate); break;
case 1013: filter_<1, 13>(im, ref, w, h, accurate); break;
case 2013: filter_<2, 13>(im, ref, w, h, accurate); break;
case 3013: filter_<3, 13>(im, ref, w, h, accurate); break;
case 1014: filter_<1, 14>(im, ref, w, h, accurate); break;
case 2014: filter_<2, 14>(im, ref, w, h, accurate); break;
case 3014: filter_<3, 14>(im, ref, w, h, accurate); break;
case 1015: filter_<1, 15>(im, ref, w, h, accurate); break;
case 2015: filter_<2, 15>(im, ref, w, h, accurate); break;
case 3015: filter_<3, 15>(im, ref, w, h, accurate); break;
case 1016: filter_<1, 16>(im, ref, w, h, accurate); break;
case 2016: filter_<2, 16>(im, ref, w, h, accurate); break;
case 3016: filter_<3, 16>(im, ref, w, h, accurate); break;
default:
printf("Unsupported channel counts. Reference image must have 1 to 16 channels, input image must have 1 to 3 channels\n");
}
}
// Below here is a program for testing it on the command line
#ifndef LIBRARY
struct header {
int frames, width, height, channels, type;
};
void loadTMP(const char *filename, float **data, header *h) {
FILE *f = fopen(filename, "rb");
fread(h, sizeof(header), 1, f);
size_t size = h->frames*h->width*h->channels*h->height;
*data = new float[size];
fread(*data, sizeof(float), size, f);
fclose(f);
}
void saveTMP(const char *filename, float *data, header h) {
FILE *f = fopen(filename, "wb");
fwrite(&h, sizeof(header), 1, f);
size_t size = h.frames*h.width*h.channels*h.height;
fwrite(data, sizeof(float), size, f);
fclose(f);
}
int main(int argc, char **argv) {
initCuda(1, argv);
if (argc < 4) {
printf("Usage: permutohedral input.tmp ref.tmp output.tmp {accurate}\n");
return 1;
}
bool accurate = argc == 5;
srand(time(NULL));
float *im, *ref;
header imHeader, refHeader;
loadTMP(argv[1], &im, &imHeader);
loadTMP(argv[2], &ref, &refHeader);
filter(im, ref, refHeader.channels, imHeader.channels, imHeader.width, imHeader.height, accurate);
saveTMP(argv[3], im, imHeader);
return 0;
}
#endif
|
8ce5453a190400a99009c6ffffe8f39ccb2acc58.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "dfa_kernel.h"
__device__
void fit(int L, const double * __restrict__ x, const double * __restrict__ y,
double *ang_coeff, double *intercept)
{
double sumx = 0.0;
double sumx2 = 0.0;
double sumxy = 0.0;
double sumy = 0.0;
double sumy2 = 0.0;
for(int i = 0; i < L; i++)
{
sumx += x[i];
sumx2 += x[i] * x[i];
sumxy += x[i] * y[i];
sumy += y[i];
sumy2 += y[i] * y[i];
}
double denom = (L * sumx2 - sumx * sumx);
if(denom == 0.0)
{
*ang_coeff = 0.0;
*intercept = 0.0;
return;
}
*ang_coeff = (L * sumxy - sumx * sumy) / (double)denom;
*intercept = (sumy * sumx2 - sumx * sumxy) / (double)denom;
}
__global__
void DFAKernel(const double * __restrict__ y, const double * __restrict__ t,
int currWinSize, int Ns, double * __restrict__ f)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
//int stridex = blockDim.x * gridDim.x;
//int ty = blockIdx.y * blockDim.y + threadIdx.y;
//int stridey = blockDim.y * gridDim.y;
//for(int tid = tx; tid < Ns; tid += stridex)
if((tx < Ns))// && (ty < currWinSize))
{
//f[tx] = 0.0;
int startLim = tx * currWinSize;
double m = 0.0, q = 0.0;
fit(currWinSize, t + startLim, y + startLim, &m, &q);
for(int j = 0; j < currWinSize; j++)
{
double var = y[startLim + j] - (q + m * t[startLim + j]);
//double var = y[startLim + ty] - (q + m * t[startLim + ty]);
//f[tx * currWinSize + ty] = pow(var, 2.0);
f[tx] += pow(var, 2.0);
}
}
}
void cudaDFA(double *y, double *t, int currWinSize,
int Ns, double *f)
{
int threadsPerBlock = 512;
int blocksPerGrid = (Ns + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( DFAKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, y, t, currWinSize, Ns, f);
hipDeviceSynchronize();
}
| 8ce5453a190400a99009c6ffffe8f39ccb2acc58.cu | #include <stdio.h>
#include "dfa_kernel.h"
__device__
void fit(int L, const double * __restrict__ x, const double * __restrict__ y,
double *ang_coeff, double *intercept)
{
double sumx = 0.0;
double sumx2 = 0.0;
double sumxy = 0.0;
double sumy = 0.0;
double sumy2 = 0.0;
for(int i = 0; i < L; i++)
{
sumx += x[i];
sumx2 += x[i] * x[i];
sumxy += x[i] * y[i];
sumy += y[i];
sumy2 += y[i] * y[i];
}
double denom = (L * sumx2 - sumx * sumx);
if(denom == 0.0)
{
*ang_coeff = 0.0;
*intercept = 0.0;
return;
}
*ang_coeff = (L * sumxy - sumx * sumy) / (double)denom;
*intercept = (sumy * sumx2 - sumx * sumxy) / (double)denom;
}
__global__
void DFAKernel(const double * __restrict__ y, const double * __restrict__ t,
int currWinSize, int Ns, double * __restrict__ f)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
//int stridex = blockDim.x * gridDim.x;
//int ty = blockIdx.y * blockDim.y + threadIdx.y;
//int stridey = blockDim.y * gridDim.y;
//for(int tid = tx; tid < Ns; tid += stridex)
if((tx < Ns))// && (ty < currWinSize))
{
//f[tx] = 0.0;
int startLim = tx * currWinSize;
double m = 0.0, q = 0.0;
fit(currWinSize, t + startLim, y + startLim, &m, &q);
for(int j = 0; j < currWinSize; j++)
{
double var = y[startLim + j] - (q + m * t[startLim + j]);
//double var = y[startLim + ty] - (q + m * t[startLim + ty]);
//f[tx * currWinSize + ty] = pow(var, 2.0);
f[tx] += pow(var, 2.0);
}
}
}
void cudaDFA(double *y, double *t, int currWinSize,
int Ns, double *f)
{
int threadsPerBlock = 512;
int blocksPerGrid = (Ns + threadsPerBlock - 1) / threadsPerBlock;
DFAKernel<<<blocksPerGrid, threadsPerBlock>>>(y, t, currWinSize, Ns, f);
cudaDeviceSynchronize();
}
|
bc91e2ec0a1a20e393a49d67858e512b354944f2.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2019 Saman Ashkiani
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <unistd.h>
#include <algorithm>
#include <cstdlib>
#include <ctime>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <string>
#include "CommandLine.h"
#include "experiments.cuh"
int main(int argc, char** argv) {
int mode = 0; // type of experiment
uint32_t num_iter = 1;
bool verbose = false;
int device_idx = 0;
uint32_t num_keys = (1 << 22);
uint32_t n_start = 20; // num_keys = 1 << n_start;
uint32_t n_end = 20;
uint32_t num_queries = num_keys;
float expected_chain = 0.6f;
float existing_ratio = 1.0f;
// mode 1 parameters:
float lf_bulk_step = 0.1f;
uint32_t lf_bulk_num_sample = 10;
// mode 3 parameters:
int num_batch = 2;
int init_batch = 1;
float insert_ratio = 0.1f;
float delete_ratio = 0.1f;
float search_exist_ratio = 0.4f;
float lf_conc_step = 0.1f;
int lf_conc_num_sample = 10;
if (cmdOptionExists(argv, argc + argv, "-mode"))
mode = atoi(getCmdOption(argv, argv + argc, "-mode"));
if (cmdOptionExists(argv, argc + argv, "-num_key"))
num_keys = atoi(getCmdOption(argv, argv + argc, "-num_key"));
if (cmdOptionExists(argv, argc + argv, "-num_query"))
num_queries = atoi(getCmdOption(argv, argv + argc, "-num_query"));
else {
num_queries = num_keys;
}
if (cmdOptionExists(argv, argc + argv, "-expected_chain"))
expected_chain = atof(getCmdOption(argv, argv + argc, "-expected_chain"));
assert(expected_chain > 0);
if (cmdOptionExists(argv, argc + argv, "-query_ratio"))
existing_ratio = atof(getCmdOption(argv, argv + argc, "-query_ratio"));
if (cmdOptionExists(argv, argc + argv, "-verbose")) {
verbose = (atoi(getCmdOption(argv, argv + argc, "-verbose")) != 0) ? true : false;
}
if (cmdOptionExists(argv, argc + argv, "-device"))
device_idx = atoi(getCmdOption(argv, argv + argc, "-device"));
if (cmdOptionExists(argv, argc + argv, "-iter")) {
num_iter = atoi(getCmdOption(argv, argv + argc, "-iter"));
}
if (cmdOptionExists(argv, argc + argv, "-nStart")) {
n_start = atoi(getCmdOption(argv, argv + argc, "-nStart"));
// for mode 0:
num_keys = (1 << n_start);
num_queries = num_keys;
}
if (cmdOptionExists(argv, argc + argv, "-nEnd")) {
n_end = atoi(getCmdOption(argv, argv + argc, "-nEnd"));
}
if (cmdOptionExists(argv, argc + argv, "-num_batch")) {
num_batch = atoi(getCmdOption(argv, argv + argc, "-num_batch"));
}
if (cmdOptionExists(argv, argc + argv, "-init_batch")) {
init_batch = atoi(getCmdOption(argv, argv + argc, "-init_batch"));
}
if (cmdOptionExists(argv, argc + argv, "-insert_ratio"))
insert_ratio = atof(getCmdOption(argv, argv + argc, "-insert_ratio"));
if (cmdOptionExists(argv, argc + argv, "-delete_ratio"))
delete_ratio = atof(getCmdOption(argv, argv + argc, "-delete_ratio"));
if (cmdOptionExists(argv, argc + argv, "-search_exist_ratio"))
search_exist_ratio =
atof(getCmdOption(argv, argv + argc, "-search_exist_ratio"));
if (cmdOptionExists(argv, argc + argv, "-lf_conc_step"))
lf_conc_step = atof(getCmdOption(argv, argv + argc, "-lf_conc_step"));
if (cmdOptionExists(argv, argc + argv, "-lf_conc_num_sample"))
lf_conc_num_sample =
atoi(getCmdOption(argv, argv + argc, "-lf_conc_num_sample"));
if (cmdOptionExists(argv, argc + argv, "-lf_bulk_step"))
lf_bulk_step = atof(getCmdOption(argv, argv + argc, "-lf_bulk_step"));
if (cmdOptionExists(argv, argc + argv, "-lf_bulk_num_sample"))
lf_bulk_num_sample =
atoi(getCmdOption(argv, argv + argc, "-lf_bulk_num_sample"));
// input argument for the file to be used for storing the results
std::string filename("");
if (cmdOptionExists(argv, argc + argv, "-filename")) {
filename.append(getCmdOption(argv, argv + argc, "-filename"));
std::cout << filename << std::endl;
} else {
// setting the filename to be the current time:
filename += "bench/";
auto time = std::time(nullptr);
auto tm = *std::localtime(&time);
std::ostringstream temp;
temp << std::put_time(&tm, "%d-%m-%Y_%H-%M-%S");
filename += ("out_" + temp.str() + ".json");
}
//=========
int devCount;
hipGetDeviceCount(&devCount);
hipDeviceProp_t devProp;
if (devCount) {
hipSetDevice(device_idx); // be changed later
hipGetDeviceProperties(&devProp, device_idx);
}
printf("Device: %s\n", devProp.name);
printf("Experiment mode = %d\n", mode);
using KeyT = uint32_t;
using ValueT = uint32_t;
// running the actual experiment
switch (mode) {
case 0: // singleton experiment
singleton_experiment<KeyT, ValueT>(num_keys, num_queries, expected_chain,
filename, device_idx, existing_ratio,
num_iter,
/*run_cudpp = */ false, verbose);
break;
case 1: // bulk build, num elements fixed, load factor changing
load_factor_bulk_experiment<KeyT, ValueT>(
num_keys, num_queries, filename, device_idx, existing_ratio, num_iter,
false, lf_bulk_num_sample, lf_bulk_step);
break;
case 2: // bulk build, load factor fixed, num elements changing
build_search_bulk_experiment<KeyT, ValueT>(
1 << n_start, 1 << n_end, filename, expected_chain, existing_ratio,
device_idx, num_iter,
/* run_cudpp = */ false,
/* verbose = */ verbose);
break;
case 3: // concurrent experiment:
concurrent_batched_op_load_factor_experiment<KeyT, ValueT>(
/*max_num_keys = */ 1 << n_end, /*batch_size = */ 1 << n_start,
num_batch, init_batch, insert_ratio, delete_ratio, search_exist_ratio,
filename, device_idx, lf_conc_step, lf_conc_num_sample, num_iter,
verbose);
break;
default:
std::cout << "Error: invalid mode." << std::endl;
break;
}
} | bc91e2ec0a1a20e393a49d67858e512b354944f2.cu | /*
* Copyright 2019 Saman Ashkiani
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <unistd.h>
#include <algorithm>
#include <cstdlib>
#include <ctime>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <string>
#include "CommandLine.h"
#include "experiments.cuh"
int main(int argc, char** argv) {
int mode = 0; // type of experiment
uint32_t num_iter = 1;
bool verbose = false;
int device_idx = 0;
uint32_t num_keys = (1 << 22);
uint32_t n_start = 20; // num_keys = 1 << n_start;
uint32_t n_end = 20;
uint32_t num_queries = num_keys;
float expected_chain = 0.6f;
float existing_ratio = 1.0f;
// mode 1 parameters:
float lf_bulk_step = 0.1f;
uint32_t lf_bulk_num_sample = 10;
// mode 3 parameters:
int num_batch = 2;
int init_batch = 1;
float insert_ratio = 0.1f;
float delete_ratio = 0.1f;
float search_exist_ratio = 0.4f;
float lf_conc_step = 0.1f;
int lf_conc_num_sample = 10;
if (cmdOptionExists(argv, argc + argv, "-mode"))
mode = atoi(getCmdOption(argv, argv + argc, "-mode"));
if (cmdOptionExists(argv, argc + argv, "-num_key"))
num_keys = atoi(getCmdOption(argv, argv + argc, "-num_key"));
if (cmdOptionExists(argv, argc + argv, "-num_query"))
num_queries = atoi(getCmdOption(argv, argv + argc, "-num_query"));
else {
num_queries = num_keys;
}
if (cmdOptionExists(argv, argc + argv, "-expected_chain"))
expected_chain = atof(getCmdOption(argv, argv + argc, "-expected_chain"));
assert(expected_chain > 0);
if (cmdOptionExists(argv, argc + argv, "-query_ratio"))
existing_ratio = atof(getCmdOption(argv, argv + argc, "-query_ratio"));
if (cmdOptionExists(argv, argc + argv, "-verbose")) {
verbose = (atoi(getCmdOption(argv, argv + argc, "-verbose")) != 0) ? true : false;
}
if (cmdOptionExists(argv, argc + argv, "-device"))
device_idx = atoi(getCmdOption(argv, argv + argc, "-device"));
if (cmdOptionExists(argv, argc + argv, "-iter")) {
num_iter = atoi(getCmdOption(argv, argv + argc, "-iter"));
}
if (cmdOptionExists(argv, argc + argv, "-nStart")) {
n_start = atoi(getCmdOption(argv, argv + argc, "-nStart"));
// for mode 0:
num_keys = (1 << n_start);
num_queries = num_keys;
}
if (cmdOptionExists(argv, argc + argv, "-nEnd")) {
n_end = atoi(getCmdOption(argv, argv + argc, "-nEnd"));
}
if (cmdOptionExists(argv, argc + argv, "-num_batch")) {
num_batch = atoi(getCmdOption(argv, argv + argc, "-num_batch"));
}
if (cmdOptionExists(argv, argc + argv, "-init_batch")) {
init_batch = atoi(getCmdOption(argv, argv + argc, "-init_batch"));
}
if (cmdOptionExists(argv, argc + argv, "-insert_ratio"))
insert_ratio = atof(getCmdOption(argv, argv + argc, "-insert_ratio"));
if (cmdOptionExists(argv, argc + argv, "-delete_ratio"))
delete_ratio = atof(getCmdOption(argv, argv + argc, "-delete_ratio"));
if (cmdOptionExists(argv, argc + argv, "-search_exist_ratio"))
search_exist_ratio =
atof(getCmdOption(argv, argv + argc, "-search_exist_ratio"));
if (cmdOptionExists(argv, argc + argv, "-lf_conc_step"))
lf_conc_step = atof(getCmdOption(argv, argv + argc, "-lf_conc_step"));
if (cmdOptionExists(argv, argc + argv, "-lf_conc_num_sample"))
lf_conc_num_sample =
atoi(getCmdOption(argv, argv + argc, "-lf_conc_num_sample"));
if (cmdOptionExists(argv, argc + argv, "-lf_bulk_step"))
lf_bulk_step = atof(getCmdOption(argv, argv + argc, "-lf_bulk_step"));
if (cmdOptionExists(argv, argc + argv, "-lf_bulk_num_sample"))
lf_bulk_num_sample =
atoi(getCmdOption(argv, argv + argc, "-lf_bulk_num_sample"));
// input argument for the file to be used for storing the results
std::string filename("");
if (cmdOptionExists(argv, argc + argv, "-filename")) {
filename.append(getCmdOption(argv, argv + argc, "-filename"));
std::cout << filename << std::endl;
} else {
// setting the filename to be the current time:
filename += "bench/";
auto time = std::time(nullptr);
auto tm = *std::localtime(&time);
std::ostringstream temp;
temp << std::put_time(&tm, "%d-%m-%Y_%H-%M-%S");
filename += ("out_" + temp.str() + ".json");
}
//=========
int devCount;
cudaGetDeviceCount(&devCount);
cudaDeviceProp devProp;
if (devCount) {
cudaSetDevice(device_idx); // be changed later
cudaGetDeviceProperties(&devProp, device_idx);
}
printf("Device: %s\n", devProp.name);
printf("Experiment mode = %d\n", mode);
using KeyT = uint32_t;
using ValueT = uint32_t;
// running the actual experiment
switch (mode) {
case 0: // singleton experiment
singleton_experiment<KeyT, ValueT>(num_keys, num_queries, expected_chain,
filename, device_idx, existing_ratio,
num_iter,
/*run_cudpp = */ false, verbose);
break;
case 1: // bulk build, num elements fixed, load factor changing
load_factor_bulk_experiment<KeyT, ValueT>(
num_keys, num_queries, filename, device_idx, existing_ratio, num_iter,
false, lf_bulk_num_sample, lf_bulk_step);
break;
case 2: // bulk build, load factor fixed, num elements changing
build_search_bulk_experiment<KeyT, ValueT>(
1 << n_start, 1 << n_end, filename, expected_chain, existing_ratio,
device_idx, num_iter,
/* run_cudpp = */ false,
/* verbose = */ verbose);
break;
case 3: // concurrent experiment:
concurrent_batched_op_load_factor_experiment<KeyT, ValueT>(
/*max_num_keys = */ 1 << n_end, /*batch_size = */ 1 << n_start,
num_batch, init_batch, insert_ratio, delete_ratio, search_exist_ratio,
filename, device_idx, lf_conc_step, lf_conc_num_sample, num_iter,
verbose);
break;
default:
std::cout << "Error: invalid mode." << std::endl;
break;
}
} |
bcdbde150bb156e551ad82437d7171a210698396.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_complex.h>
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <pthread.h>
#if defined(HAS_OPENMP)
#include <omp.h>
#endif
#include "sem.h"
#include "datalogger.h"
#include "errorhandler.h"
#define cuCheckOpen(stmt) cuCheckHelper(stmt, Close(); return -1)
#define DATALOGGER_THREAD_PRIORITY 8
dsp::DataLogger::DataLogger(std::string Name) {
ModuleName = Name;
ConstructorHelper();
}
dsp::DataLogger::DataLogger() {
ModuleName = "DataLogger";
ConstructorHelper();
}
void dsp::DataLogger::ConstructorHelper() {
AllocateInputs(1);
AllocateOutputs(0);
ConfigExpectedInput(0, "Data", DATATYPE_ANY, VALUETYPE_ANY, VECTORLENGTH_ANY);
InsertParam("Filename", (void*)&Filename, CHAR_t, FilenameCapacity, 0);
InsertParam("CSV", (void*)&csv, BOOL_t, sizeof(bool), sizeof(bool));
}
dsp::DataLogger::~DataLogger() {
delete [] expectedInputs;
delete [] inputs;
delete [] outputs;
}
int dsp::DataLogger::Open() {
if (csv) fd = fopen(Filename, "w");
else fd = fopen(Filename, "wb");
if (fd == NULL) {
std::cerr << "[" << ModuleName << "] File " << Filename
<< " cannot be opened." << std::endl;
return -1;
}
cuCheckOpen(hipHostMalloc(&buf_h, bufSize*numBufs));
for (int i=0; i<numBufs; i++)
bufEnd[i] = 0;
currUpdateBuf = 0;
currThreadBuf = 0;
return 0;
}
int dsp::DataLogger::Close() {
if (fd == NULL) {
std::clog << "[" << ModuleName << "] Nothing to close." << std::endl;
return 0;
}
fclose(fd);
fd = NULL;
cuCheck(hipHostFree(buf_h));
return 0;
}
int dsp::DataLogger::Start(void* cuFlowStream) {
pthread_attr_t attr;
struct sched_param param;
std::clog << "[" << ModuleName << "] Starting ... " << std::flush;
errCheckMod(Open());
errCheckMod(pthread_attr_init(&attr));
int minPrio = sched_get_priority_min(SCHED_RR);
int maxPrio = sched_get_priority_max(SCHED_RR);
/* Posix min requirements is 32 priority levels, Although linux provides
99 levels, good to check for min/max available levels. */
param.sched_priority = (maxPrio - minPrio) *
DATALOGGER_THREAD_PRIORITY / 32 + minPrio;
errCheckMod(pthread_attr_setschedpolicy(&attr, SCHED_RR));
errCheckMod(pthread_attr_setschedparam(&attr, ¶m));
if (pthread_create(&thread, &attr, LoggerThreadEntry, this)) {
std::cerr << "[" << ModuleName << "] Error: Unable to create thread."
<< std::endl;
return -1;
}
while (KeepRunning == false);
std::clog << "Started." << std::endl;
return 0;
}
int dsp::DataLogger::Stop() {
std::clog << "[" << ModuleName << "] Stopping ... " << std::flush;
KeepRunning = false;
// signal thread to write remaining buffer and quit
errCheckMod(osindep_sem_post(&bufFullSem));
int ret = pthread_join(thread, NULL);
std::clog << "Stopped." << std::endl;
return ret;
}
void * dsp::DataLogger::LoggerThreadEntry(void * thisObj){
((dsp::DataLogger *)thisObj)->LoggerThread();
return NULL;
}
void dsp::DataLogger::LoggerThread() {
if (osindep_sem_init(&bufFullSem, 0, 0)) {
std::cerr << "[" << ModuleName << "] Cannot initialise semaphore." << std::endl;
return;
}
KeepRunning = true;
while (KeepRunning) {
// timeout after 1.5s
if (osindep_sem_waitforduration(&bufFullSem, 1500000000)){
//if (osindep_sem_waitforduration(&bufFullSem, 15000000000000)){ // Debug x10000
if (errno == ETIMEDOUT)
std::cerr << "[" << ModuleName
<< "] Error: sem_timewait timeout: buffersAvailSem" << std::endl;
else if (errno == EINTR)
std::cerr << "[" << ModuleName
<< "] Error: sem_timewait EINTR: buffersAvailSem" << std::endl;
else
std::cerr << "[" << ModuleName
<< "] Error: sem_timewait: buffersAvailSem" << std::endl;
KeepRunning = false;
break;
}
// Get the pointer to the buffer to write (since buffers are only segmented by the way pointers handle things)
void *ptr = (void*)((char*)buf_h + currThreadBuf*bufSize);
// Determine how many elements exist in this buffer (due to VectorLengths, the buffer may not be totally full)
size_t size = bufEnd[currThreadBuf];
if (csv) {
switch (inputs[0]->Datatype){
case FLOAT_t: size /= sizeof(float); break;
case DOUBLE_t: size /= sizeof(double); break;
case CHAR_t: size /= sizeof(char); break;
case INT_t: size /= sizeof(int); break;
case BOOL_t: size /= sizeof(bool); break;
default: continue; // other types no supported
}
// The divide-by-2 isn't needed here since bufEnd keeps track of how full the buffer is in bytes
//if (inputs[0]->ValueType == VALUE_CMPX)
// size >>= 1; //divide by 2
// Format the output based on datatype
int col = 0;
size_t idx = 0;
while (idx < size) {
switch (inputs[0]->Datatype){
case FLOAT_t: fprintf(fd, "%f", ((float*)ptr)[idx]); break;
case DOUBLE_t: fprintf(fd, "%f", ((double*)ptr)[idx]); break;
case CHAR_t: fprintf(fd, "%d", ((char*)ptr)[idx]); break;
case INT_t: fprintf(fd, "%d", ((int*)ptr)[idx]); break;
case BOOL_t: fprintf(fd, "%d", ((bool*)ptr)[idx]); break;
default: break; // other types no supported
}
if (inputs[0]->ValueType == VALUE_CMPX) {
idx++;
if (idx >= size) break;
switch (inputs[0]->Datatype){
case FLOAT_t: {
float val = ((float*)ptr)[idx];
if (val >= 0.0) fprintf(fd, "+%fj", val);
else fprintf(fd, "%fj", val);
} break;
case DOUBLE_t: {
double val = ((double*)ptr)[idx];
if (val >= 0.0) fprintf(fd, "+%fj", val);
else fprintf(fd, "%fj", val);
} break;
case CHAR_t: {
signed char val = ((char*)ptr)[idx];
if (val >= 0) fprintf(fd, "+%dj", val);
else fprintf(fd, "%dj", val);
} break;
case INT_t: {
signed int val = ((int*)ptr)[idx];
if (val >= 0) fprintf(fd, "+%dj", val);
else fprintf(fd, "%dj", val);
} break;
case BOOL_t: break; //bool should not be complex
default: break; // other types no supported
}
}
idx++;
col++;
if (col >= inputs[0]->VectorLength) {
fprintf(fd, "\n");
col = 0;
} else {
fprintf(fd, ", ");
}
}
} else {
fwrite(ptr, 1, size, fd);
}
bufEnd[currThreadBuf] = 0;
currThreadBuf++;
if (currThreadBuf >= numBufs) currThreadBuf = 0;
}
osindep_sem_destroy(&bufFullSem);
Close();
}
int dsp::DataLogger::Update(void* cuFlowStream) {
if (KeepRunning) {
// Determine how much space to allocate for each input element
size_t size;
switch (inputs[0]->Datatype){
case FLOAT_t: size = sizeof(float); break;
case DOUBLE_t: size = sizeof(double); break;
case CHAR_t: size = sizeof(char); break;
case INT_t: size = sizeof(int); break;
case BOOL_t: size = sizeof(bool); break;
default: // other types no supported
size = 0;
return 0;
}
// Multiply by the number of input elements
if (inputs[0]->ValueType == GRID) {
size *= 50625;
}
else {
size *= inputs[0]->VectorLength;
}
// If the inputs are complex values, the total datasize is twice sizeof(VALUE)
if (inputs[0]->ValueType == VALUE_CMPX) {
size <<= 1; // times 2 for complex
}
// Check if the current buffer is full, and switch to a new one if not
if ((bufEnd[currUpdateBuf] + size) >= bufSize) {
// signal thread to start writing previous update Buf
errCheckMod(osindep_sem_post(&bufFullSem));
currUpdateBuf++;
if (currUpdateBuf >= numBufs) currUpdateBuf = 0;
bufEnd[currUpdateBuf] = 0;
}
// Advance the pointer pointing to the beginning of the destination memory for copying
// Copy the data into the buffer so the Update function can return
// Note that the buffers are all one contiguous chunk of memory only segmented by the way pointers are handled
// So, dest = the beginning of the chunk of memory + advance through other buffers ahead of you
// + advance past the samples already written into this buffer
void *dest = (void*)((char*)buf_h + currUpdateBuf*bufSize + bufEnd[currUpdateBuf]);
if (inputs[0]->MemLoc == CUDA_DEVICE){
hipStream_t *cuStream = (hipStream_t*)cuFlowStream;
cuCheckM(hipMemcpyAsync(dest, inputs[0]->Data, size,
hipMemcpyDeviceToHost, *cuStream));
} else {
memcpy(dest, inputs[0]->Data, size);
}
// Advance the starting location of the buffer for the next iteration
bufEnd[currUpdateBuf] += size;
return 0;
}
else {
std::cerr << "[" << ModuleName << "] Update: Thread not running."
<< std::endl;
return -1;
}
}
| bcdbde150bb156e551ad82437d7171a210698396.cu |
#include <cuda_runtime.h>
#include <cuComplex.h>
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <pthread.h>
#if defined(HAS_OPENMP)
#include <omp.h>
#endif
#include "sem.h"
#include "datalogger.h"
#include "errorhandler.h"
#define cuCheckOpen(stmt) cuCheckHelper(stmt, Close(); return -1)
#define DATALOGGER_THREAD_PRIORITY 8
dsp::DataLogger::DataLogger(std::string Name) {
ModuleName = Name;
ConstructorHelper();
}
dsp::DataLogger::DataLogger() {
ModuleName = "DataLogger";
ConstructorHelper();
}
void dsp::DataLogger::ConstructorHelper() {
AllocateInputs(1);
AllocateOutputs(0);
ConfigExpectedInput(0, "Data", DATATYPE_ANY, VALUETYPE_ANY, VECTORLENGTH_ANY);
InsertParam("Filename", (void*)&Filename, CHAR_t, FilenameCapacity, 0);
InsertParam("CSV", (void*)&csv, BOOL_t, sizeof(bool), sizeof(bool));
}
dsp::DataLogger::~DataLogger() {
delete [] expectedInputs;
delete [] inputs;
delete [] outputs;
}
int dsp::DataLogger::Open() {
if (csv) fd = fopen(Filename, "w");
else fd = fopen(Filename, "wb");
if (fd == NULL) {
std::cerr << "[" << ModuleName << "] File " << Filename
<< " cannot be opened." << std::endl;
return -1;
}
cuCheckOpen(cudaMallocHost(&buf_h, bufSize*numBufs));
for (int i=0; i<numBufs; i++)
bufEnd[i] = 0;
currUpdateBuf = 0;
currThreadBuf = 0;
return 0;
}
int dsp::DataLogger::Close() {
if (fd == NULL) {
std::clog << "[" << ModuleName << "] Nothing to close." << std::endl;
return 0;
}
fclose(fd);
fd = NULL;
cuCheck(cudaFreeHost(buf_h));
return 0;
}
int dsp::DataLogger::Start(void* cuFlowStream) {
pthread_attr_t attr;
struct sched_param param;
std::clog << "[" << ModuleName << "] Starting ... " << std::flush;
errCheckMod(Open());
errCheckMod(pthread_attr_init(&attr));
int minPrio = sched_get_priority_min(SCHED_RR);
int maxPrio = sched_get_priority_max(SCHED_RR);
/* Posix min requirements is 32 priority levels, Although linux provides
99 levels, good to check for min/max available levels. */
param.sched_priority = (maxPrio - minPrio) *
DATALOGGER_THREAD_PRIORITY / 32 + minPrio;
errCheckMod(pthread_attr_setschedpolicy(&attr, SCHED_RR));
errCheckMod(pthread_attr_setschedparam(&attr, ¶m));
if (pthread_create(&thread, &attr, LoggerThreadEntry, this)) {
std::cerr << "[" << ModuleName << "] Error: Unable to create thread."
<< std::endl;
return -1;
}
while (KeepRunning == false);
std::clog << "Started." << std::endl;
return 0;
}
int dsp::DataLogger::Stop() {
std::clog << "[" << ModuleName << "] Stopping ... " << std::flush;
KeepRunning = false;
// signal thread to write remaining buffer and quit
errCheckMod(osindep_sem_post(&bufFullSem));
int ret = pthread_join(thread, NULL);
std::clog << "Stopped." << std::endl;
return ret;
}
void * dsp::DataLogger::LoggerThreadEntry(void * thisObj){
((dsp::DataLogger *)thisObj)->LoggerThread();
return NULL;
}
void dsp::DataLogger::LoggerThread() {
if (osindep_sem_init(&bufFullSem, 0, 0)) {
std::cerr << "[" << ModuleName << "] Cannot initialise semaphore." << std::endl;
return;
}
KeepRunning = true;
while (KeepRunning) {
// timeout after 1.5s
if (osindep_sem_waitforduration(&bufFullSem, 1500000000)){
//if (osindep_sem_waitforduration(&bufFullSem, 15000000000000)){ // Debug x10000
if (errno == ETIMEDOUT)
std::cerr << "[" << ModuleName
<< "] Error: sem_timewait timeout: buffersAvailSem" << std::endl;
else if (errno == EINTR)
std::cerr << "[" << ModuleName
<< "] Error: sem_timewait EINTR: buffersAvailSem" << std::endl;
else
std::cerr << "[" << ModuleName
<< "] Error: sem_timewait: buffersAvailSem" << std::endl;
KeepRunning = false;
break;
}
// Get the pointer to the buffer to write (since buffers are only segmented by the way pointers handle things)
void *ptr = (void*)((char*)buf_h + currThreadBuf*bufSize);
// Determine how many elements exist in this buffer (due to VectorLengths, the buffer may not be totally full)
size_t size = bufEnd[currThreadBuf];
if (csv) {
switch (inputs[0]->Datatype){
case FLOAT_t: size /= sizeof(float); break;
case DOUBLE_t: size /= sizeof(double); break;
case CHAR_t: size /= sizeof(char); break;
case INT_t: size /= sizeof(int); break;
case BOOL_t: size /= sizeof(bool); break;
default: continue; // other types no supported
}
// The divide-by-2 isn't needed here since bufEnd keeps track of how full the buffer is in bytes
//if (inputs[0]->ValueType == VALUE_CMPX)
// size >>= 1; //divide by 2
// Format the output based on datatype
int col = 0;
size_t idx = 0;
while (idx < size) {
switch (inputs[0]->Datatype){
case FLOAT_t: fprintf(fd, "%f", ((float*)ptr)[idx]); break;
case DOUBLE_t: fprintf(fd, "%f", ((double*)ptr)[idx]); break;
case CHAR_t: fprintf(fd, "%d", ((char*)ptr)[idx]); break;
case INT_t: fprintf(fd, "%d", ((int*)ptr)[idx]); break;
case BOOL_t: fprintf(fd, "%d", ((bool*)ptr)[idx]); break;
default: break; // other types no supported
}
if (inputs[0]->ValueType == VALUE_CMPX) {
idx++;
if (idx >= size) break;
switch (inputs[0]->Datatype){
case FLOAT_t: {
float val = ((float*)ptr)[idx];
if (val >= 0.0) fprintf(fd, "+%fj", val);
else fprintf(fd, "%fj", val);
} break;
case DOUBLE_t: {
double val = ((double*)ptr)[idx];
if (val >= 0.0) fprintf(fd, "+%fj", val);
else fprintf(fd, "%fj", val);
} break;
case CHAR_t: {
signed char val = ((char*)ptr)[idx];
if (val >= 0) fprintf(fd, "+%dj", val);
else fprintf(fd, "%dj", val);
} break;
case INT_t: {
signed int val = ((int*)ptr)[idx];
if (val >= 0) fprintf(fd, "+%dj", val);
else fprintf(fd, "%dj", val);
} break;
case BOOL_t: break; //bool should not be complex
default: break; // other types no supported
}
}
idx++;
col++;
if (col >= inputs[0]->VectorLength) {
fprintf(fd, "\n");
col = 0;
} else {
fprintf(fd, ", ");
}
}
} else {
fwrite(ptr, 1, size, fd);
}
bufEnd[currThreadBuf] = 0;
currThreadBuf++;
if (currThreadBuf >= numBufs) currThreadBuf = 0;
}
osindep_sem_destroy(&bufFullSem);
Close();
}
int dsp::DataLogger::Update(void* cuFlowStream) {
if (KeepRunning) {
// Determine how much space to allocate for each input element
size_t size;
switch (inputs[0]->Datatype){
case FLOAT_t: size = sizeof(float); break;
case DOUBLE_t: size = sizeof(double); break;
case CHAR_t: size = sizeof(char); break;
case INT_t: size = sizeof(int); break;
case BOOL_t: size = sizeof(bool); break;
default: // other types no supported
size = 0;
return 0;
}
// Multiply by the number of input elements
if (inputs[0]->ValueType == GRID) {
size *= 50625;
}
else {
size *= inputs[0]->VectorLength;
}
// If the inputs are complex values, the total datasize is twice sizeof(VALUE)
if (inputs[0]->ValueType == VALUE_CMPX) {
size <<= 1; // times 2 for complex
}
// Check if the current buffer is full, and switch to a new one if not
if ((bufEnd[currUpdateBuf] + size) >= bufSize) {
// signal thread to start writing previous update Buf
errCheckMod(osindep_sem_post(&bufFullSem));
currUpdateBuf++;
if (currUpdateBuf >= numBufs) currUpdateBuf = 0;
bufEnd[currUpdateBuf] = 0;
}
// Advance the pointer pointing to the beginning of the destination memory for copying
// Copy the data into the buffer so the Update function can return
// Note that the buffers are all one contiguous chunk of memory only segmented by the way pointers are handled
// So, dest = the beginning of the chunk of memory + advance through other buffers ahead of you
// + advance past the samples already written into this buffer
void *dest = (void*)((char*)buf_h + currUpdateBuf*bufSize + bufEnd[currUpdateBuf]);
if (inputs[0]->MemLoc == CUDA_DEVICE){
cudaStream_t *cuStream = (cudaStream_t*)cuFlowStream;
cuCheckM(cudaMemcpyAsync(dest, inputs[0]->Data, size,
cudaMemcpyDeviceToHost, *cuStream));
} else {
memcpy(dest, inputs[0]->Data, size);
}
// Advance the starting location of the buffer for the next iteration
bufEnd[currUpdateBuf] += size;
return 0;
}
else {
std::cerr << "[" << ModuleName << "] Update: Thread not running."
<< std::endl;
return -1;
}
}
|
0021ee4b1755a3e7a4a92f7ce576e195b11f7dba.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <cassert>
#include <hipcub/hipcub.hpp> // NOLINT
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
template <typename T>
__global__ void SliceKernel(int num, int dims, const T *input,
const int *offsets_info, T *output) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int shared_data[];
for (int i = threadIdx.x; i < dims * 3; i += blockDim.x) {
shared_data[i] = offsets_info[i];
}
__syncthreads();
if (idx < num) {
int t_idx = idx;
int in_idx = 0;
for (int i = dims - 1; i >= 0; i--) {
// output_shape
auto t = t_idx % shared_data[i * 3 + 1];
// out offset
auto s = t + shared_data[i * 3];
// input_seg_offset
in_idx = in_idx + shared_data[i * 3 + 2] * s;
t_idx = t_idx / shared_data[i * 3 + 1];
}
output[idx] = input[in_idx];
}
}
SlicePlugin::SlicePlugin(std::vector<int> starts, std::vector<int> ends,
std::vector<int> axes, bool with_fp16)
: starts_(starts), ends_(ends), axes_(axes) {
with_fp16_ = with_fp16;
hipEventCreate(©_event_);
hipStreamCreate(©_stream_);
}
SlicePlugin::SlicePlugin(void const *serial_data, size_t serial_length) {
deserializeBase(serial_data, serial_length);
DeserializeValue(&serial_data, &serial_length, &starts_);
DeserializeValue(&serial_data, &serial_length, &ends_);
DeserializeValue(&serial_data, &serial_length, &axes_);
hipEventCreate(©_event_);
hipStreamCreate(©_stream_);
}
SlicePlugin::~SlicePlugin() {
hipStreamDestroy(copy_stream_);
hipEventDestroy(copy_event_);
hipFree(offset_temp_data_);
}
SlicePlugin *SlicePlugin::clone() const TRT_NOEXCEPT {
return new SlicePlugin(starts_, ends_, axes_, with_fp16_);
}
bool SlicePlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT {
if (with_fp16_) {
return ((type == nvinfer1::DataType::kFLOAT ||
type == nvinfer1::DataType::kHALF) &&
(format == nvinfer1::PluginFormat::kLINEAR));
} else {
return ((type == nvinfer1::DataType::kFLOAT) &&
(format == nvinfer1::PluginFormat::kLINEAR));
}
}
nvinfer1::Dims SlicePlugin::getOutputDimensions(
int index, const nvinfer1::Dims *inputs, int nb_input_dims) TRT_NOEXCEPT {
auto in_dims = inputs[0];
nvinfer1::Dims out_dims = in_dims;
for (size_t i = 0; i < axes_.size(); i++) {
int start = starts_[i];
int end = ends_[i];
out_dims.d[axes_[i] - 1] = end - start;
}
return out_dims;
}
int SlicePlugin::enqueue(int batch_size, const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs, void *workspace, hipStream_t stream) {
#else
void *const *outputs, void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
#endif
auto input_dims = getInputDims(0);
// notice input dims is [C, H, W], add input batch dim here
auto out_dims = getOutputDimensions(0, &input_dims, 1);
input_dims.nbDims += 1;
out_dims.nbDims += 1;
for (auto i = input_dims.nbDims; i > 0; --i) {
input_dims.d[i] = input_dims.d[i - 1];
out_dims.d[i] = out_dims.d[i - 1];
}
input_dims.d[0] = batch_size;
out_dims.d[0] = batch_size;
auto num_dims = input_dims.nbDims;
size_t out_num = ProductDim(out_dims);
std::vector<int> seg_offsets;
std::vector<int> offsets;
std::vector<int> extends;
offsets.resize(num_dims);
extends.resize(num_dims);
seg_offsets.resize(num_dims);
seg_offsets[num_dims - 1] = 1;
for (int i = num_dims - 2; i >= 0; i--) {
seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1];
}
for (size_t i = 0; i < num_dims; ++i) {
offsets[i] = 0;
extends[i] = out_dims.d[i];
}
for (size_t i = 0; i < axes_.size(); ++i) {
offsets[axes_[i]] = starts_[i];
}
std::vector<int> offset_info;
for (size_t i = 0; i < num_dims; ++i) {
offset_info.push_back(offsets[i]);
offset_info.push_back(extends[i]);
offset_info.push_back(seg_offsets[i]);
}
if (offset_temp_data_ == nullptr) {
hipMalloc(&offset_temp_data_, 3 * num_dims * sizeof(int));
}
hipMemcpyAsync(offset_temp_data_, offset_info.data(),
sizeof(int) * 3 * num_dims, hipMemcpyHostToDevice,
copy_stream_);
hipEventRecord(copy_event_, copy_stream_);
hipStreamWaitEvent(stream, copy_event_, 0);
int threads = 256;
int blocks = (out_num + threads - 1) / threads;
auto input_type = getDataType();
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Slice-->fp32";
const float *input1 = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
hipLaunchKernelGGL(( SliceKernel<float>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream,
out_num, num_dims, input1, offset_temp_data_, output);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Slice-->fp16";
const half *input1 = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
hipLaunchKernelGGL(( SliceKernel<half>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream,
out_num, num_dims, input1, offset_temp_data_, output);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The Slice TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
size_t SlicePlugin::getSerializationSize() const TRT_NOEXCEPT {
return getBaseSerializationSize() + SerializedSize(getPluginType()) +
SerializedSize(starts_) + SerializedSize(ends_) +
SerializedSize(axes_);
}
void SlicePlugin::serialize(void *buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, getPluginType());
serializeBase(buffer);
SerializeValue(&buffer, starts_);
SerializeValue(&buffer, ends_);
SerializeValue(&buffer, axes_);
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
SlicePluginDynamic::SlicePluginDynamic(std::vector<int> starts,
std::vector<int> ends,
std::vector<int> axes, bool with_fp16)
: starts_(starts), ends_(ends), axes_(axes) {
with_fp16_ = with_fp16;
hipEventCreate(©_event_);
hipStreamCreate(©_stream_);
}
SlicePluginDynamic::SlicePluginDynamic(void const *serialData,
size_t serialLength) {
DeserializeValue(&serialData, &serialLength, &starts_);
DeserializeValue(&serialData, &serialLength, &ends_);
DeserializeValue(&serialData, &serialLength, &axes_);
DeserializeValue(&serialData, &serialLength, &with_fp16_);
hipEventCreate(©_event_);
hipStreamCreate(©_stream_);
}
void SlicePluginDynamic::destroy() TRT_NOEXCEPT {
hipStreamDestroy(copy_stream_);
hipEventDestroy(copy_event_);
hipFree(offset_temp_data_);
delete this;
}
int SlicePluginDynamic::initialize() TRT_NOEXCEPT { return 0; }
size_t SlicePluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
size_t size = SerializedSize(starts_) + SerializedSize(ends_) +
SerializedSize(axes_) + SerializedSize(with_fp16_);
return size;
}
void SlicePluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, starts_);
SerializeValue(&buffer, ends_);
SerializeValue(&buffer, axes_);
SerializeValue(&buffer, with_fp16_);
}
nvinfer1::DimsExprs SlicePluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
auto in_dims = inputs[0];
nvinfer1::DimsExprs ret = in_dims;
// start, ends should greater 0
for (size_t i = 0; i < axes_.size(); i++) {
int start = starts_[i];
int end = ends_[i];
#if IS_TRT_VERSION_GE(7200)
ret.d[axes_[i]] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUB,
*expr_builder.operation(nvinfer1::DimensionOperation::kMIN,
*expr_builder.constant(ends_[i]),
*in_dims.d[axes_[i]]),
*expr_builder.constant(start));
#else
ret.d[axes_[i]] = expr_builder.constant(end - start);
#endif
}
return ret;
}
bool SlicePluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType SlicePluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument(
"The Slice Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT ||
input_types[0] == nvinfer1::DataType::kHALF),
true, platform::errors::InvalidArgument(
"The input type should be half or float"));
return input_types[0];
}
int SlicePluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs, void *const *outputs,
void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
auto input_dims = input_desc[0].dims;
auto out_dims = output_desc[0].dims;
auto num_dims = input_dims.nbDims;
size_t out_num = ProductDim(out_dims);
std::vector<int> seg_offsets;
std::vector<int> offsets;
std::vector<int> extends;
offsets.resize(num_dims);
extends.resize(num_dims);
seg_offsets.resize(num_dims);
seg_offsets[num_dims - 1] = 1;
for (int i = num_dims - 2; i >= 0; i--) {
seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1];
}
for (size_t i = 0; i < num_dims; ++i) {
offsets[i] = 0;
extends[i] = out_dims.d[i];
}
for (size_t i = 0; i < axes_.size(); ++i) {
offsets[axes_[i]] = starts_[i];
}
std::vector<int> offset_info;
for (size_t i = 0; i < num_dims; ++i) {
offset_info.push_back(offsets[i]);
offset_info.push_back(extends[i]);
offset_info.push_back(seg_offsets[i]);
}
if (offset_temp_data_ == nullptr) {
hipMalloc(&offset_temp_data_, 3 * num_dims * sizeof(int));
}
hipMemcpyAsync(offset_temp_data_, offset_info.data(),
sizeof(int) * 3 * num_dims, hipMemcpyHostToDevice,
copy_stream_);
hipEventRecord(copy_event_, copy_stream_);
hipStreamWaitEvent(stream, copy_event_, 0);
int threads = 256;
int blocks = (out_num + threads - 1) / threads;
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Slice-->fp32";
const float *input1 = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
hipLaunchKernelGGL(( SliceKernel<float>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream,
out_num, num_dims, input1, offset_temp_data_, output);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Slice-->fp16";
const half *input1 = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
hipLaunchKernelGGL(( SliceKernel<half>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream,
out_num, num_dims, input1, offset_temp_data_, output);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The Slice TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 0021ee4b1755a3e7a4a92f7ce576e195b11f7dba.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_runtime.h>
#include <stdio.h>
#include <cassert>
#include <cub/cub.cuh> // NOLINT
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
template <typename T>
__global__ void SliceKernel(int num, int dims, const T *input,
const int *offsets_info, T *output) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int shared_data[];
for (int i = threadIdx.x; i < dims * 3; i += blockDim.x) {
shared_data[i] = offsets_info[i];
}
__syncthreads();
if (idx < num) {
int t_idx = idx;
int in_idx = 0;
for (int i = dims - 1; i >= 0; i--) {
// output_shape
auto t = t_idx % shared_data[i * 3 + 1];
// out offset
auto s = t + shared_data[i * 3];
// input_seg_offset
in_idx = in_idx + shared_data[i * 3 + 2] * s;
t_idx = t_idx / shared_data[i * 3 + 1];
}
output[idx] = input[in_idx];
}
}
SlicePlugin::SlicePlugin(std::vector<int> starts, std::vector<int> ends,
std::vector<int> axes, bool with_fp16)
: starts_(starts), ends_(ends), axes_(axes) {
with_fp16_ = with_fp16;
cudaEventCreate(©_event_);
cudaStreamCreate(©_stream_);
}
SlicePlugin::SlicePlugin(void const *serial_data, size_t serial_length) {
deserializeBase(serial_data, serial_length);
DeserializeValue(&serial_data, &serial_length, &starts_);
DeserializeValue(&serial_data, &serial_length, &ends_);
DeserializeValue(&serial_data, &serial_length, &axes_);
cudaEventCreate(©_event_);
cudaStreamCreate(©_stream_);
}
SlicePlugin::~SlicePlugin() {
cudaStreamDestroy(copy_stream_);
cudaEventDestroy(copy_event_);
cudaFree(offset_temp_data_);
}
SlicePlugin *SlicePlugin::clone() const TRT_NOEXCEPT {
return new SlicePlugin(starts_, ends_, axes_, with_fp16_);
}
bool SlicePlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT {
if (with_fp16_) {
return ((type == nvinfer1::DataType::kFLOAT ||
type == nvinfer1::DataType::kHALF) &&
(format == nvinfer1::PluginFormat::kLINEAR));
} else {
return ((type == nvinfer1::DataType::kFLOAT) &&
(format == nvinfer1::PluginFormat::kLINEAR));
}
}
nvinfer1::Dims SlicePlugin::getOutputDimensions(
int index, const nvinfer1::Dims *inputs, int nb_input_dims) TRT_NOEXCEPT {
auto in_dims = inputs[0];
nvinfer1::Dims out_dims = in_dims;
for (size_t i = 0; i < axes_.size(); i++) {
int start = starts_[i];
int end = ends_[i];
out_dims.d[axes_[i] - 1] = end - start;
}
return out_dims;
}
int SlicePlugin::enqueue(int batch_size, const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs, void *workspace, cudaStream_t stream) {
#else
void *const *outputs, void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
#endif
auto input_dims = getInputDims(0);
// notice input dims is [C, H, W], add input batch dim here
auto out_dims = getOutputDimensions(0, &input_dims, 1);
input_dims.nbDims += 1;
out_dims.nbDims += 1;
for (auto i = input_dims.nbDims; i > 0; --i) {
input_dims.d[i] = input_dims.d[i - 1];
out_dims.d[i] = out_dims.d[i - 1];
}
input_dims.d[0] = batch_size;
out_dims.d[0] = batch_size;
auto num_dims = input_dims.nbDims;
size_t out_num = ProductDim(out_dims);
std::vector<int> seg_offsets;
std::vector<int> offsets;
std::vector<int> extends;
offsets.resize(num_dims);
extends.resize(num_dims);
seg_offsets.resize(num_dims);
seg_offsets[num_dims - 1] = 1;
for (int i = num_dims - 2; i >= 0; i--) {
seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1];
}
for (size_t i = 0; i < num_dims; ++i) {
offsets[i] = 0;
extends[i] = out_dims.d[i];
}
for (size_t i = 0; i < axes_.size(); ++i) {
offsets[axes_[i]] = starts_[i];
}
std::vector<int> offset_info;
for (size_t i = 0; i < num_dims; ++i) {
offset_info.push_back(offsets[i]);
offset_info.push_back(extends[i]);
offset_info.push_back(seg_offsets[i]);
}
if (offset_temp_data_ == nullptr) {
cudaMalloc(&offset_temp_data_, 3 * num_dims * sizeof(int));
}
cudaMemcpyAsync(offset_temp_data_, offset_info.data(),
sizeof(int) * 3 * num_dims, cudaMemcpyHostToDevice,
copy_stream_);
cudaEventRecord(copy_event_, copy_stream_);
cudaStreamWaitEvent(stream, copy_event_, 0);
int threads = 256;
int blocks = (out_num + threads - 1) / threads;
auto input_type = getDataType();
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Slice-->fp32";
const float *input1 = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
SliceKernel<float><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>(
out_num, num_dims, input1, offset_temp_data_, output);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Slice-->fp16";
const half *input1 = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
SliceKernel<half><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>(
out_num, num_dims, input1, offset_temp_data_, output);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The Slice TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
size_t SlicePlugin::getSerializationSize() const TRT_NOEXCEPT {
return getBaseSerializationSize() + SerializedSize(getPluginType()) +
SerializedSize(starts_) + SerializedSize(ends_) +
SerializedSize(axes_);
}
void SlicePlugin::serialize(void *buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, getPluginType());
serializeBase(buffer);
SerializeValue(&buffer, starts_);
SerializeValue(&buffer, ends_);
SerializeValue(&buffer, axes_);
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
SlicePluginDynamic::SlicePluginDynamic(std::vector<int> starts,
std::vector<int> ends,
std::vector<int> axes, bool with_fp16)
: starts_(starts), ends_(ends), axes_(axes) {
with_fp16_ = with_fp16;
cudaEventCreate(©_event_);
cudaStreamCreate(©_stream_);
}
SlicePluginDynamic::SlicePluginDynamic(void const *serialData,
size_t serialLength) {
DeserializeValue(&serialData, &serialLength, &starts_);
DeserializeValue(&serialData, &serialLength, &ends_);
DeserializeValue(&serialData, &serialLength, &axes_);
DeserializeValue(&serialData, &serialLength, &with_fp16_);
cudaEventCreate(©_event_);
cudaStreamCreate(©_stream_);
}
void SlicePluginDynamic::destroy() TRT_NOEXCEPT {
cudaStreamDestroy(copy_stream_);
cudaEventDestroy(copy_event_);
cudaFree(offset_temp_data_);
delete this;
}
int SlicePluginDynamic::initialize() TRT_NOEXCEPT { return 0; }
size_t SlicePluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
size_t size = SerializedSize(starts_) + SerializedSize(ends_) +
SerializedSize(axes_) + SerializedSize(with_fp16_);
return size;
}
void SlicePluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, starts_);
SerializeValue(&buffer, ends_);
SerializeValue(&buffer, axes_);
SerializeValue(&buffer, with_fp16_);
}
nvinfer1::DimsExprs SlicePluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
auto in_dims = inputs[0];
nvinfer1::DimsExprs ret = in_dims;
// start, ends should greater 0
for (size_t i = 0; i < axes_.size(); i++) {
int start = starts_[i];
int end = ends_[i];
#if IS_TRT_VERSION_GE(7200)
ret.d[axes_[i]] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUB,
*expr_builder.operation(nvinfer1::DimensionOperation::kMIN,
*expr_builder.constant(ends_[i]),
*in_dims.d[axes_[i]]),
*expr_builder.constant(start));
#else
ret.d[axes_[i]] = expr_builder.constant(end - start);
#endif
}
return ret;
}
bool SlicePluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType SlicePluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument(
"The Slice Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT ||
input_types[0] == nvinfer1::DataType::kHALF),
true, platform::errors::InvalidArgument(
"The input type should be half or float"));
return input_types[0];
}
int SlicePluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs, void *const *outputs,
void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
auto input_dims = input_desc[0].dims;
auto out_dims = output_desc[0].dims;
auto num_dims = input_dims.nbDims;
size_t out_num = ProductDim(out_dims);
std::vector<int> seg_offsets;
std::vector<int> offsets;
std::vector<int> extends;
offsets.resize(num_dims);
extends.resize(num_dims);
seg_offsets.resize(num_dims);
seg_offsets[num_dims - 1] = 1;
for (int i = num_dims - 2; i >= 0; i--) {
seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1];
}
for (size_t i = 0; i < num_dims; ++i) {
offsets[i] = 0;
extends[i] = out_dims.d[i];
}
for (size_t i = 0; i < axes_.size(); ++i) {
offsets[axes_[i]] = starts_[i];
}
std::vector<int> offset_info;
for (size_t i = 0; i < num_dims; ++i) {
offset_info.push_back(offsets[i]);
offset_info.push_back(extends[i]);
offset_info.push_back(seg_offsets[i]);
}
if (offset_temp_data_ == nullptr) {
cudaMalloc(&offset_temp_data_, 3 * num_dims * sizeof(int));
}
cudaMemcpyAsync(offset_temp_data_, offset_info.data(),
sizeof(int) * 3 * num_dims, cudaMemcpyHostToDevice,
copy_stream_);
cudaEventRecord(copy_event_, copy_stream_);
cudaStreamWaitEvent(stream, copy_event_, 0);
int threads = 256;
int blocks = (out_num + threads - 1) / threads;
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Slice-->fp32";
const float *input1 = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
SliceKernel<float><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>(
out_num, num_dims, input1, offset_temp_data_, output);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Slice-->fp16";
const half *input1 = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
SliceKernel<half><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>(
out_num, num_dims, input1, offset_temp_data_, output);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The Slice TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
89927090bf9f99050864a06fabfcbe102e7d4374.hip | // !!! This is a file automatically generated by hipify!!!
#include "OptionParser.h"
#include "ResultDatabase.h"
#include "cudacommon.h"
#include <stdio.h>
// ****************************************************************************
// Function: addBenchmarkSpecOptions
//
// Purpose:
// Add benchmark specific command line argument parsing.
//
// -nopinned
// This option controls whether page-locked or "pinned" memory is used.
// The use of pinned memory typically results in higher bandwidth for data
// transfer between host and device.
//
// Arguments:
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Jeremy Meredith
// Creation: September 08, 2009
//
// Modifications:
//
// ****************************************************************************
void addBenchmarkSpecOptions(OptionParser &op) {
op.addOption("pinned", OPT_BOOL, "0", "use pinned (pagelocked) memory");
}
// ****************************************************************************
// Function: runBenchmark
//
// Purpose:
// Measures the bandwidth of the bus connecting the host processor to the
// OpenCL device. This benchmark repeatedly transfers data chunks of various
// sizes across the bus to the host from the device and calculates the
// bandwidth for each chunk size.
//
// Arguments:
// resultDB: the benchmark stores its results in this ResultDatabase
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Jeremy Meredith
// Creation: September 08, 2009
//
// Modifications:
// Jeremy Meredith, Wed Dec 1 17:05:27 EST 2010
// Added calculation of latency estimate. //
// ****************************************************************************
void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) {
cout << "Running BusSpeedReadback" << endl;
const bool verbose = op.getOptionBool("verbose");
const bool quiet = op.getOptionBool("quiet");
const bool pinned = op.getOptionBool("pinned");
// Sizes are in kb
int nSizes = 20;
int sizes[20] = {1, 2, 4, 8, 16, 32, 64,
128, 256, 512, 1024, 2048, 4096, 8192,
16384, 32768, 65536, 131072, 262144, 524288};
long long numMaxFloats = 1024 * (sizes[nSizes - 1]) / 4;
// Create some host memory pattern
float *hostMem1;
float *hostMem2;
if (pinned) {
hipHostMalloc((void **)&hostMem1, sizeof(float) * numMaxFloats);
hipError_t err1 = hipGetLastError();
hipHostMalloc((void **)&hostMem2, sizeof(float) * numMaxFloats);
hipError_t err2 = hipGetLastError();
while (err1 != hipSuccess || err2 != hipSuccess) {
// free the first buffer if only the second failed
if (err1 == hipSuccess)
hipHostFree((void *)hostMem1);
// drop the size and try again
if (verbose && !quiet) {
cout << " - dropping size allocating pinned mem\n";
}
--nSizes;
if (nSizes < 1) {
cerr << "Error: Couldn't allocated any pinned buffer\n";
return;
}
numMaxFloats = 1024 * (sizes[nSizes - 1]) / 4;
hipHostMalloc((void **)&hostMem1, sizeof(float) * numMaxFloats);
err1 = hipGetLastError();
hipHostMalloc((void **)&hostMem2, sizeof(float) * numMaxFloats);
err2 = hipGetLastError();
}
} else {
hostMem1 = new float[numMaxFloats];
hostMem2 = new float[numMaxFloats];
}
for (int i = 0; i < numMaxFloats; i++)
hostMem1[i] = i % 77;
float *device;
hipMalloc((void **)&device, sizeof(float) * numMaxFloats);
while (hipGetLastError() != hipSuccess) {
// drop the size and try again
if (verbose && !quiet) {
cout << " - dropping size allocating device mem\n";
}
--nSizes;
if (nSizes < 1) {
cerr << "Error: Couldn't allocated any device buffer\n";
return;
}
numMaxFloats = 1024 * (sizes[nSizes - 1]) / 4;
hipMalloc((void **)&device, sizeof(float) * numMaxFloats);
}
CUDA_SAFE_CALL(hipMemcpy(device, hostMem1, numMaxFloats * sizeof(float),
hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipDeviceSynchronize());
const unsigned int passes = op.getOptionInt("passes");
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Three passes, forward and backward both
for (int pass = 0; pass < passes; pass++) {
// store the times temporarily to estimate latency
// float times[nSizes];
// Step through sizes forward on even passes and backward on odd
for (int i = 0; i < nSizes; i++) {
int sizeIndex;
if ((pass % 2) == 0)
sizeIndex = i;
else
sizeIndex = (nSizes - 1) - i;
int nbytes = sizes[sizeIndex] * 1024;
hipEventRecord(start, 0);
hipMemcpy(hostMem2, device, nbytes, hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float t = 0;
hipEventElapsedTime(&t, start, stop);
// times[sizeIndex] = t;
// Convert to GB/sec
if (verbose && !quiet) {
cout << "size " << sizes[sizeIndex] << "k took " << t << " ms\n";
}
double speed = (double(sizes[sizeIndex]) * 1024. / (1000 * 1000)) / t;
resultDB.AddResult("ReadbackSpeed", "---", "GB/sec", speed);
resultDB.AddOverall("ReadbackSpeed", "GB/sec", speed);
}
}
// Cleanup
CUDA_SAFE_CALL(hipFree((void *)device));
if (pinned) {
CUDA_SAFE_CALL(hipHostFree((void *)hostMem1));
CUDA_SAFE_CALL(hipHostFree((void *)hostMem2));
} else {
delete[] hostMem1;
delete[] hostMem2;
CUDA_SAFE_CALL(hipEventDestroy(start));
CUDA_SAFE_CALL(hipEventDestroy(stop));
}
}
| 89927090bf9f99050864a06fabfcbe102e7d4374.cu | #include "OptionParser.h"
#include "ResultDatabase.h"
#include "cudacommon.h"
#include <stdio.h>
// ****************************************************************************
// Function: addBenchmarkSpecOptions
//
// Purpose:
// Add benchmark specific command line argument parsing.
//
// -nopinned
// This option controls whether page-locked or "pinned" memory is used.
// The use of pinned memory typically results in higher bandwidth for data
// transfer between host and device.
//
// Arguments:
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Jeremy Meredith
// Creation: September 08, 2009
//
// Modifications:
//
// ****************************************************************************
void addBenchmarkSpecOptions(OptionParser &op) {
op.addOption("pinned", OPT_BOOL, "0", "use pinned (pagelocked) memory");
}
// ****************************************************************************
// Function: runBenchmark
//
// Purpose:
// Measures the bandwidth of the bus connecting the host processor to the
// OpenCL device. This benchmark repeatedly transfers data chunks of various
// sizes across the bus to the host from the device and calculates the
// bandwidth for each chunk size.
//
// Arguments:
// resultDB: the benchmark stores its results in this ResultDatabase
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Jeremy Meredith
// Creation: September 08, 2009
//
// Modifications:
// Jeremy Meredith, Wed Dec 1 17:05:27 EST 2010
// Added calculation of latency estimate. //
// ****************************************************************************
void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) {
cout << "Running BusSpeedReadback" << endl;
const bool verbose = op.getOptionBool("verbose");
const bool quiet = op.getOptionBool("quiet");
const bool pinned = op.getOptionBool("pinned");
// Sizes are in kb
int nSizes = 20;
int sizes[20] = {1, 2, 4, 8, 16, 32, 64,
128, 256, 512, 1024, 2048, 4096, 8192,
16384, 32768, 65536, 131072, 262144, 524288};
long long numMaxFloats = 1024 * (sizes[nSizes - 1]) / 4;
// Create some host memory pattern
float *hostMem1;
float *hostMem2;
if (pinned) {
cudaMallocHost((void **)&hostMem1, sizeof(float) * numMaxFloats);
cudaError_t err1 = cudaGetLastError();
cudaMallocHost((void **)&hostMem2, sizeof(float) * numMaxFloats);
cudaError_t err2 = cudaGetLastError();
while (err1 != cudaSuccess || err2 != cudaSuccess) {
// free the first buffer if only the second failed
if (err1 == cudaSuccess)
cudaFreeHost((void *)hostMem1);
// drop the size and try again
if (verbose && !quiet) {
cout << " - dropping size allocating pinned mem\n";
}
--nSizes;
if (nSizes < 1) {
cerr << "Error: Couldn't allocated any pinned buffer\n";
return;
}
numMaxFloats = 1024 * (sizes[nSizes - 1]) / 4;
cudaMallocHost((void **)&hostMem1, sizeof(float) * numMaxFloats);
err1 = cudaGetLastError();
cudaMallocHost((void **)&hostMem2, sizeof(float) * numMaxFloats);
err2 = cudaGetLastError();
}
} else {
hostMem1 = new float[numMaxFloats];
hostMem2 = new float[numMaxFloats];
}
for (int i = 0; i < numMaxFloats; i++)
hostMem1[i] = i % 77;
float *device;
cudaMalloc((void **)&device, sizeof(float) * numMaxFloats);
while (cudaGetLastError() != cudaSuccess) {
// drop the size and try again
if (verbose && !quiet) {
cout << " - dropping size allocating device mem\n";
}
--nSizes;
if (nSizes < 1) {
cerr << "Error: Couldn't allocated any device buffer\n";
return;
}
numMaxFloats = 1024 * (sizes[nSizes - 1]) / 4;
cudaMalloc((void **)&device, sizeof(float) * numMaxFloats);
}
CUDA_SAFE_CALL(cudaMemcpy(device, hostMem1, numMaxFloats * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaThreadSynchronize());
const unsigned int passes = op.getOptionInt("passes");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Three passes, forward and backward both
for (int pass = 0; pass < passes; pass++) {
// store the times temporarily to estimate latency
// float times[nSizes];
// Step through sizes forward on even passes and backward on odd
for (int i = 0; i < nSizes; i++) {
int sizeIndex;
if ((pass % 2) == 0)
sizeIndex = i;
else
sizeIndex = (nSizes - 1) - i;
int nbytes = sizes[sizeIndex] * 1024;
cudaEventRecord(start, 0);
cudaMemcpy(hostMem2, device, nbytes, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float t = 0;
cudaEventElapsedTime(&t, start, stop);
// times[sizeIndex] = t;
// Convert to GB/sec
if (verbose && !quiet) {
cout << "size " << sizes[sizeIndex] << "k took " << t << " ms\n";
}
double speed = (double(sizes[sizeIndex]) * 1024. / (1000 * 1000)) / t;
resultDB.AddResult("ReadbackSpeed", "---", "GB/sec", speed);
resultDB.AddOverall("ReadbackSpeed", "GB/sec", speed);
}
}
// Cleanup
CUDA_SAFE_CALL(cudaFree((void *)device));
if (pinned) {
CUDA_SAFE_CALL(cudaFreeHost((void *)hostMem1));
CUDA_SAFE_CALL(cudaFreeHost((void *)hostMem2));
} else {
delete[] hostMem1;
delete[] hostMem2;
CUDA_SAFE_CALL(cudaEventDestroy(start));
CUDA_SAFE_CALL(cudaEventDestroy(stop));
}
}
|
fdd032dd69de0c4b4f651f5cebd5b72d6cb14649.hip | // !!! This is a file automatically generated by hipify!!!
#include "file_system.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define DIR = 0;
#define FILE = 1;
using namespace std;
__device__ __managed__ u32 gtime = 1;
__device__ __managed__ u32 gtime_create = 1;
__device__ __managed__ u32 block_position = 0;
__device__ __managed__ u32 FCB_position = 4096;
__device__ __managed__ u32 current_FCB_position = 4096;
__device__ void display(FileSystem* fs) {
printf("this fs////////////////////////////////////////////");
printf("in the zero position\n");
printf("name = %s\n", fs->directory[0].name);
printf("sibling = %d\n", fs->directory[0].sibling);
printf("child = %d\n", fs->directory[0].child);
printf("parent = %d\n", fs->directory[0].parent);
printf("size = %d\n", fs->directory[0].size);
printf("indentity = %d\n", fs->directory[0].identity);
printf("create time = %d\n", fs->directory[0].create_date);
printf("modified time = %d\n", fs->directory[0].modified_date);
printf("\n");
printf("in the one position\n");
printf("name = %s\n", fs->directory[1].name);
printf("sibling = %d\n", fs->directory[1].sibling);
printf("parent = %d\n", fs->directory[1].parent);
printf("child = %d\n", fs->directory[1].child);
printf("size = %d\n", fs->directory[1].size);
printf("indentity = %d\n", fs->directory[1].identity);
printf("create time = %d\n", fs->directory[1].create_date);
printf("modified time = %d\n", fs->directory[1].modified_date);
printf("\n");
printf("in the second position\n");
printf("name = %s\n", fs->directory[2].name);
printf("sibling = %d\n", fs->directory[2].sibling);
printf("parent = %d\n", fs->directory[2].parent);
printf("child = %d\n", fs->directory[2].child);
printf("size = %d\n", fs->directory[2].size);
printf("indentity = %d\n", fs->directory[2].identity);
printf("create time = %d\n", fs->directory[2].create_date);
printf("modified time = %d\n", fs->directory[2].modified_date);
printf("\n");
printf("in the third position\n");
printf("name = %s\n", fs->directory[3].name);
printf("sibling = %d\n", fs->directory[3].sibling);
printf("parent = %d\n", fs->directory[3].parent);
printf("child = %d\n", fs->directory[3].child);
printf("size = %d\n", fs->directory[3].size);
printf("indentity = %d\n", fs->directory[3].identity);
printf("create time = %d\n", fs->directory[3].create_date);
printf("modified time = %d\n", fs->directory[3].modified_date);
}
__device__ void display_valid(FileSystem* fs) {
printf("this is valid fs//////////////////////////////////");
printf("in the zero position\n");
printf("name = %s\n", fs->valid_directory[0].name);
printf("sibling = %d\n", fs->valid_directory[0].sibling);
printf("child = %d\n", fs->valid_directory[0].child);
printf("parent = %d\n", fs->valid_directory[0].parent);
printf("size = %d\n", fs->valid_directory[0].size);
printf("indentity = %d\n", fs->valid_directory[0].identity);
printf("create time = %d\n", fs->valid_directory[0].create_date);
printf("modified time = %d\n", fs->valid_directory[0].modified_date);
printf("\n");
printf("in the one position\n");
printf("name = %s\n", fs->valid_directory[1].name);
printf("sibling = %d\n", fs->valid_directory[1].sibling);
printf("parent = %d\n", fs->valid_directory[1].parent);
printf("child = %d\n", fs->valid_directory[1].child);
printf("size = %d\n", fs->valid_directory[1].size);
printf("indentity = %d\n", fs->valid_directory[1].identity);
printf("create time = %d\n", fs->valid_directory[1].create_date);
printf("modified time = %d\n", fs->valid_directory[1].modified_date);
printf("\n");
printf("in the second position\n");
printf("name = %s\n", fs->valid_directory[2].name);
printf("sibling = %d\n", fs->valid_directory[2].sibling);
printf("parent = %d\n", fs->valid_directory[2].parent);
printf("child = %d\n", fs->valid_directory[2].child);
printf("size = %d\n", fs->valid_directory[2].size);
printf("indentity = %d\n", fs->valid_directory[2].identity);
printf("create time = %d\n", fs->valid_directory[2].create_date);
printf("modified time = %d\n", fs->valid_directory[2].modified_date);
printf("\n");
printf("in the third position\n");
printf("name = %s\n", fs->valid_directory[3].name);
printf("sibling = %d\n", fs->valid_directory[3].sibling);
printf("parent = %d\n", fs->valid_directory[3].parent);
printf("child = %d\n", fs->valid_directory[3].child);
printf("size = %d\n", fs->valid_directory[3].size);
printf("indentity = %d\n", fs->valid_directory[3].identity);
printf("create time = %d\n", fs->valid_directory[3].create_date);
printf("modified time = %d\n", fs->valid_directory[3].modified_date);
}
__device__ void fs_init(FileSystem *fs, uchar *volume, int SUPERBLOCK_SIZE,
int FCB_SIZE, int FCB_ENTRIES, int VOLUME_SIZE,
int STORAGE_BLOCK_SIZE, int MAX_FILENAME_SIZE,
int MAX_FILE_NUM, int MAX_FILE_SIZE, int FILE_BASE_ADDRESS)
{
// init variables
fs->volume = volume;
// init constants
fs->SUPERBLOCK_SIZE = SUPERBLOCK_SIZE;
fs->FCB_SIZE = FCB_SIZE;
fs->FCB_ENTRIES = FCB_ENTRIES;
fs->STORAGE_SIZE = VOLUME_SIZE;
fs->STORAGE_BLOCK_SIZE = STORAGE_BLOCK_SIZE;
fs->MAX_FILENAME_SIZE = MAX_FILENAME_SIZE;
fs->MAX_FILE_NUM = MAX_FILE_NUM;
fs->MAX_FILE_SIZE = MAX_FILE_SIZE;
fs->FILE_BASE_ADDRESS = FILE_BASE_ADDRESS;
}
__device__ __managed__ int current_index = 0;
__device__ __managed__ int last_index = 1;
__device__ __managed__ int current_depth = 0;
__device__ __managed__ int write_index = 0;
__device__ void segment_management_FCB(FileSystem *fs, u32 fp) {
for (int i = fp; i < 36863; i = i + 32) {
if (fs->volume[i + 32] == 0 && fs->volume[i + 32 + 1] == 0 && fs->volume[i + 32 + 2] == 0 && fs->volume[i + 32 + 3] == 0) break;
for (int j = 0; j < 32; j++) {
fs->volume[i + j] = fs->volume[i + j + 32];
fs->volume[i + j + 32] = 0;
}
}
}
__device__ void segment_management(FileSystem *fs, u32 fp, u32 original_size) {
//manage the volume
u32 position = fs->FILE_BASE_ADDRESS + fp * 32;
u32 size = ((original_size - 1) / 32 + 1) * 32;
while ((fs->volume[position + size] != 0 || (position + size) %32 != 0)&& position + original_size < fs->STORAGE_SIZE) {
fs->volume[position] = fs->volume[position + size];
fs->volume[position + size] = 0;
position++;
}
//manage the block
for (int i = 0; i < block_position / 8 + 1; i++) {
fs->volume[i] = 0;
}
block_position = block_position - (original_size - 1) / 32 - 1;
u32 whole_block = block_position / 8;
u32 remainder = block_position % 8;
for (int i = 0; i < whole_block && i < fs->SUPERBLOCK_SIZE ; i++) {
fs->volume[i] = 511;
}
for (int i = 0; i < remainder; i++) {
fs->volume[whole_block] = fs->volume[whole_block] + (1 << i);
}
//change FCB
u32 FCB_block_position;
for (int i = 4096; i < 36863; i = i + 32) {
if (fs->volume[i] == 0 && fs->volume[i + 1] == 0 && fs->volume[i + 2] == 0 && fs->volume[i + 3] == 0) break;
FCB_block_position = (fs->volume[i + 28] << 24) + (fs->volume[i + 29] << 16) + (fs->volume[i + 30] << 8) + (fs->volume[i + 31]);
if (FCB_block_position > fp) {
FCB_block_position = FCB_block_position - (original_size - 1) / 32 - 1;
fs->volume[i + 28] = FCB_block_position >> 24;
fs->volume[i + 29] = FCB_block_position >> 16;
fs->volume[i + 30] = FCB_block_position >> 8;
fs->volume[i + 31] = FCB_block_position;
}
}
}
__device__ void display(FileSystem*fs, u32 stop_position, int op) {
//display date
if (op == 0) {
printf("stop position = %d\n", stop_position);
printf("===sort by modified time===\n");
for (u32 i = 0; i <= stop_position; i++) {
if (fs->valid_directory[i].identity == 0) printf("%s d\n", fs->valid_directory[i].name);
else printf("%s\n",fs->valid_directory[i].name);
}
}
else {
printf("stop position = %d\n", stop_position);
printf("===sort by file size===\n");
for (u32 i = 0; i <= stop_position; i++) {
if (fs->valid_directory[i].identity == 0) printf("%s %d d\n", fs->valid_directory[i].name, fs->valid_directory[i].size);
else printf("%s %d\n", fs->valid_directory[i].name, fs->valid_directory[i].size);
}
}
}
__device__ void swap(FileSystem* fs, int x, int y) {
struct file_directory tempt = fs->valid_directory[x];
fs->valid_directory[x] = fs->valid_directory[y];
fs->valid_directory[y] = tempt;
}
__device__ void bubblesort(FileSystem *fs, u32 left, u32 right, int op) {
// sort by date
if (op == 0) {
for (int i = left; i < right; i ++) {
for (int j = left; j < right - i + left; j++) {
int j_date_previous = fs->valid_directory[j].modified_date;
int j_date_after = fs->valid_directory[j+1].modified_date;
if (j_date_previous < j_date_after) swap(fs, j, j + 1);
}
}
}
else {
for (int i = left; i < right; i++) {
for (int j = left; j < right - i + left; j++) {
int j_size_previous = fs->valid_directory[j].size;
int j_size_after = fs->valid_directory[j + 1].size;
int j_date_previous = fs->valid_directory[j].create_date;
int j_date_after = fs->valid_directory[j + 1].create_date;
if (j_size_previous < j_size_after) swap(fs, j, j + 1);
if (j_size_after == j_size_previous && j_date_previous > j_date_after) swap(fs, j, j + 1);
}
}
}
}
__device__ u32 if_exist(FileSystem *fs, char *s) {
//return FCB position
int flag;
for (int i = 4096; i < 36863; i = i + 32) {
flag = 0;
if (fs->volume[i] == 0 && fs->volume[i + 1] == 0 && fs->volume[i + 2] == 0 && fs->volume[i + 3] == 0) {
break;
}
for (int j = 4; j < 24; j++) {
if (fs->volume[i + j] != s[j - 4]) {
flag = 1;
break;
}
}
if (flag == 1) continue;
if (flag == 0) return i;
}
return -1;
}
__device__ bool check_the_name(char*name1, char*name2) {
for (int i = 0; i < 20; i++) {
if (name1[i] != name2[i]) return true;
}
return false;
}
__device__ int if_exist_directory(FileSystem *fs, char *s) {
if (fs->directory[current_index].child == NULL) return -1;
else {
int directory_index = fs->directory[current_index].child;
while (check_the_name(fs->directory[directory_index].name, s) && fs->directory[directory_index].sibling != NULL) directory_index = fs->directory[directory_index].sibling;
if (check_the_name(fs->directory[directory_index].name, s)) return -directory_index;
return directory_index;
}
}
__device__ u32 fs_open(FileSystem *fs, char *s, int op)
{
printf("//////////////////////////////////////////before open\n");
display(fs);
/* Implement open operation here */
//if not exist
int check = if_exist_directory(fs, s);
//printf("check in open is %d\n", check);
if (check < 0) {
printf("file do not exist\n");
if (op == 0) {
printf("can not find the file to read error\n");
return -1;
}
//store in the directory
int name_count = 0;
fs->directory[last_index].child = NULL;
fs->directory[last_index].identity = 1;
for (int i = 0; i < 20 && (i == 0 || s[i - 1] != '\0'); i++) {
fs->directory[last_index].name[i] = s[i];
name_count++;
}
fs->directory[last_index].parent = current_index;
fs->directory[last_index].sibling = NULL;
fs->directory[last_index].size = 0;
fs->directory[last_index].create_date = gtime_create;
fs->directory[last_index].modified_date = gtime;
if (fs->directory[current_index].child == NULL) fs->directory[current_index].child = last_index;
else fs->directory[-check].sibling = last_index;
fs->directory[current_index].size += name_count;
write_index = last_index;
last_index++;
//implement the volume
//store the name
current_FCB_position = FCB_position;
//printf("for name in open = ");
for (int i = 4; i < 24; i++) {
fs->volume[FCB_position + i] = s[i - 4];
}
//store the create date
fs->volume[FCB_position + 24] = gtime_create >> 8;
fs->volume[FCB_position + 25] = gtime_create;
//strore the modified date
fs->volume[FCB_position + 26] = gtime >> 8;
fs->volume[FCB_position + 27] = gtime;
//store the start block
fs->volume[FCB_position + 28] = block_position >> 24;
fs->volume[FCB_position + 29] = block_position >> 16;
fs->volume[FCB_position + 30] = block_position >> 8;
fs->volume[FCB_position + 31] = block_position;
//update the date
gtime++;
gtime_create++;
//update FCB position
FCB_position = FCB_position + 32;
return block_position;
}
//if exist
else {
current_FCB_position = if_exist(fs, s);
u32 start_block = (fs->volume[current_FCB_position + 28] << 24) + (fs->volume[current_FCB_position + 29] << 16) + (fs->volume[current_FCB_position + 30] << 8) + (fs->volume[current_FCB_position + 31]);
//if write
if (op == 1) {
write_index = check;
fs->directory[write_index].modified_date = gtime;
//clean the old file in volume
u32 size = (fs->volume[current_FCB_position] << 24) + (fs->volume[current_FCB_position + 1] << 16) + (fs->volume[current_FCB_position + 2] << 8) + (fs->volume[current_FCB_position + 3]);
for (int i = 0; i < size; i++) {
fs->volume[start_block * 32 + i + fs->FILE_BASE_ADDRESS] = 0;
}
//clean the old file in block
for (int i = 0; i < (size - 1) / 32 + 1; i++) {
u32 super_block_position = start_block + i;
int shift_number = super_block_position % 8;
fs->volume[super_block_position / 8] = fs->volume[super_block_position / 8] - (1 << shift_number);
}
//update FCB date
fs->volume[current_FCB_position + 26] = gtime >> 8;
fs->volume[current_FCB_position + 27] = gtime;
//update the date
gtime++;
}
return start_block;
}
}
__device__ void fs_read(FileSystem *fs, uchar *output, u32 size, u32 fp)
{
printf("///////////////////////////////////before read\n");
display(fs);
/* Implement read operation here */
for (int i = 0; i < size; i++) {
output[i] = fs->volume[fp * 32 + i + fs->FILE_BASE_ADDRESS];
}
}
__device__ u32 fs_write(FileSystem *fs, uchar* input, u32 size, u32 fp)
{
printf("///////////////////////////////////////////before write\n");
display(fs);
/* Implement write operation here */
//enough space
fs->directory[write_index].size = size;
if ((fs->volume[(fp + (size - 1) / 32)/8] >> (fp + (size - 1) / 32) % 8) % 2 == 0) {
u32 old_file_size = (fs->volume[current_FCB_position] << 24) + (fs->volume[current_FCB_position + 1] << 16) + (fs->volume[current_FCB_position + 2] << 8) + (fs->volume[current_FCB_position + 3]);
u32 original_size = old_file_size - size;
//update volume
for (int i = 0; i < size; i++) {
fs->volume[fp * 32 + i + fs->FILE_BASE_ADDRESS] = input[i];
//update block
if (i % 32 == 0) {
u32 super_block_position = fp + i / 32;
int shift_number = super_block_position % 8;
fs->volume[(fp + i /32) / 8] = fs->volume[(fp + i / 32) / 8] + (1 << shift_number);
}
}
if (int (original_size) < 0) block_position = block_position + (-original_size - 1) / 32 + 1;
//update size
fs->volume[current_FCB_position] = size >> 24;
fs->volume[current_FCB_position + 1] = size >> 16;
fs->volume[current_FCB_position + 2] = size >> 8;
fs->volume[current_FCB_position + 3] = size;
if (original_size > 0 && old_file_size != 0 && fp != block_position - 1) segment_management(fs, fp + (size - 1) / 32 + 1, original_size);
}
//out of space
else {
u32 original_size = (fs->volume[current_FCB_position] << 24) + (fs->volume[current_FCB_position + 1] << 16) + (fs->volume[current_FCB_position + 2] << 8) + (fs->volume[current_FCB_position + 3]);
if (block_position * 32 - 1 + size >= fs->SUPERBLOCK_SIZE) {
return -1;
}
//update volume
else {
for (int i = 0; i < size; i++) {
fs->volume[block_position * 32 + i + fs->FILE_BASE_ADDRESS] = input[i];
//update block
if (i % 32 == 0) {
u32 super_block_position = block_position + i / 32;
int shift_number = super_block_position % 8;
fs->volume[(block_position + i / 32) / 8] = fs->volume[(block_position + i / 32) / 8] + (1 << shift_number);
}
}
//update size
fs->volume[current_FCB_position] = size >> 24;
fs->volume[current_FCB_position + 1] = size >> 16;
fs->volume[current_FCB_position + 2] = size >> 8;
fs->volume[current_FCB_position + 3] = size;
//update block position
fs->volume[current_FCB_position + 28] = block_position >> 16;
fs->volume[current_FCB_position + 29] = block_position >> 16;
fs->volume[current_FCB_position + 30] = block_position >> 8;
fs->volume[current_FCB_position + 31] = block_position;
}
segment_management(fs, fp, original_size);
}
}
__device__ int morrisTraversal(FileSystem* fs, int root_index) {
int count = 0;
int parent_index = root_index;
while (fs->directory[parent_index].sibling != NULL) {
if (fs->directory[parent_index].child != NULL) {
int child_index = fs->directory[parent_index].child;
while (fs->directory[child_index].sibling != NULL) {
if (fs->directory[child_index].child != NULL) {
int grand_child_index = fs->directory[child_index].child;
while (fs->directory[grand_child_index].sibling != NULL) {
fs->valid_directory[count] = fs->directory[grand_child_index];
printf("count = %d, %d\n", count, grand_child_index);
count++;
grand_child_index = fs->directory[grand_child_index].sibling;
}
fs->valid_directory[count] = fs->directory[grand_child_index];
printf("count = %d, %d\n", count, child_index);
count++;
}
fs->valid_directory[count] = fs->directory[child_index];
printf("count = %d, %d\n", count, child_index);
count++;
child_index = fs->directory[child_index].sibling;
}
fs->valid_directory[count] = fs->directory[child_index];
printf("count = %d, %d\n", count, child_index);
count++;
}
fs->valid_directory[count] = fs->directory[parent_index];
printf("count = %d, %d\n", count, parent_index);
count++;
parent_index = fs->directory[parent_index].sibling;
}
fs->valid_directory[count] = fs->directory[parent_index];
printf("count = %d, %d\n", count, parent_index);
count++;
return count;
}
__device__ void fs_gsys(FileSystem *fs, int op)
{
if (op == 0 || op == 1) {
int count = morrisTraversal(fs, fs->directory[0].child);
printf("/////////////////////after morrisTraversal\n");
display(fs);
display_valid(fs);
printf("///////////////////////////count is %d\n", count);
bubblesort(fs, 0, count - 1, op);
printf("///////////////////////////after bubble sort\n");
display(fs);
display_valid(fs);
display(fs, count - 1, op);
}
// CD_P
else if (op == 6) {
//no parent
if (fs->directory[current_index].parent == NULL) printf("no parent error\n");
else current_index = fs->directory[current_index].parent;
}
//PWD
else if (op == 5) {
int index_directory = current_index;
if (current_depth == 1) printf("/%s\n", fs->directory[current_index].name);
else if (current_depth == 2) printf("/%s/%s\n", fs->directory[fs->directory[current_index].parent].name, fs->directory[current_index].name);
else {
int parent_index = fs->directory[current_index].parent;
char* parent = fs->directory[parent_index].name;
int pp_index = fs->directory[parent_index].parent;
char* parent_parent = fs->directory[pp_index].name;
printf("/%s/%s/%s\n", parent_parent, parent, fs->directory[current_index].name);
}
}
}
__device__ void fs_gsys(FileSystem *fs, int op, char *s)
{
if (op == 2) {
int index_previous = -1;
if (fs->directory[current_index].child == NULL) printf("no subdirectory error\n");
int index_directory = fs->directory[current_index].child;
while (fs->directory[index_directory].name != s && fs->directory[index_directory].sibling != NULL) {
index_previous = index_directory;
index_directory = fs->directory[index_directory].sibling;
}
if (fs->directory[index_directory].name != s) printf("no such directory error\n");
//clean the file inside
// it is file
if (fs->directory[index_directory].identity == 1) {
//rm operation for directory tree
/* Implement rm operation here */
if (if_exist(fs, s) == -1) printf("no such file founded error\n");
else {
current_FCB_position = if_exist(fs, s);
//change volume
u32 start_block = (fs->volume[current_FCB_position + 28] << 24) + (fs->volume[current_FCB_position + 29] << 16) + (fs->volume[current_FCB_position + 30] << 8) + (fs->volume[current_FCB_position + 31]);
//clean the old file in volume
u32 size = (fs->volume[current_FCB_position] << 24) + (fs->volume[current_FCB_position + 1] << 16) + (fs->volume[current_FCB_position + 2] << 8) + (fs->volume[current_FCB_position + 3]);
for (int i = 0; i < size; i++) {
fs->volume[start_block * 32 + i + fs->FILE_BASE_ADDRESS] = 0;
}
//clean the old file in block
for (int i = 0; i < (size - 1) / 32 + 1; i++) {
fs->volume[start_block + i] = 0;
}
//clean the FCB
for (int i = 0; i < 32; i++) {
fs->volume[current_FCB_position + i] = 0;
}
segment_management(fs, start_block, size);
segment_management_FCB(fs, current_FCB_position);
FCB_position = FCB_position - 32;
}
if (index_previous != -1) fs->directory[index_previous].sibling = NULL;
else fs->directory[current_index].child = NULL;
}
else printf("can not use RM to remove the directory\n");
}
else if (op == 3) {
//MKDIR
//for debug
printf("///////////////////////////////////in MKDIR\n");
display(fs);
if (last_index > 1024) printf("file out of storage error\n");
//for debug
printf("last_index is %d\n", last_index);
if (fs->directory[current_index].identity == 1) printf("can not MKDIR in file error\n");
if (current_depth >= 3) printf("file out of depth\n");
int index_directory = fs->directory[current_index].child;
printf("index_directory = %d\n", index_directory);
//no other file
if (index_directory == NULL) {
printf("no other file in MKDIR\n");
int name_count = 0;
for (int i = 0; i < 20 || s[i - 1] != '\0'; i++) {
fs->directory[last_index].name[i] = s[i];
name_count++;
}
fs->directory[last_index].sibling = NULL;
fs->directory[last_index].child = NULL;
fs->directory[last_index].parent = current_index;
fs->directory[last_index].identity = 0;
fs->directory[last_index].size = 0;
fs->directory[last_index].modified_date = gtime;
fs->directory[last_index].create_date = gtime_create;
fs->directory[current_index].child = last_index;
fs->directory[current_index].size += name_count;
}
//other files
else {
printf("other file\n");
int file_count = 0;
int name_count = 0;
while (fs->directory[index_directory].sibling != NULL) {
index_directory = fs->directory[index_directory].sibling;
file_count++;
}
printf("index_directory = %d\n", index_directory);
if (file_count >= 50) printf("file out of directory storage\n");
for (int i = 0; i < 20 && (i == 0||s[i - 1] != '\0'); i++) {
fs->directory[last_index].name[i] = s[i];
name_count++;
}
fs->directory[last_index].sibling = NULL;
fs->directory[last_index].child = NULL;
fs->directory[last_index].parent = current_index;
fs->directory[index_directory].sibling = last_index;
fs->directory[last_index].identity = 0;
fs->directory[last_index].size = 0;
fs->directory[current_index].size += name_count;
fs->directory[last_index].modified_date = gtime;
fs->directory[last_index].create_date = gtime_create;
}
last_index++;
printf("last index = %d\n", last_index);
printf("\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\after MKDIR\n");
display(fs);
}
else if (op == 4) {
//CD
if (fs->directory[current_index].child == NULL) printf("no subdirectory error\n");
int index_directory = fs->directory[current_index].child;
while (fs->directory[index_directory].name != s && fs->directory[index_directory].sibling != NULL) index_directory = fs->directory[index_directory].sibling;
if (fs->directory[index_directory].name != s) printf("no such directory error\n");
else if (fs->directory[index_directory].identity == 1) printf("can not move into a file\n");
else current_index = index_directory;
current_depth++;
}
//RM_RF
else if (op == 7) {
int index_previous = -1;
if (fs->directory[current_index].child == NULL) printf("no subdirectory error\n");
int index_directory = fs->directory[current_index].child;
while (fs->directory[index_directory].name != s && fs->directory[index_directory].sibling != NULL) {
index_previous = index_directory;
index_directory = fs->directory[index_directory].sibling;
}
if (fs->directory[index_directory].name != s) printf("no such directory error\n");
//clean the file inside
// it is file
if (fs->directory[index_directory].identity == 1) fs_gsys(fs, 2, fs->directory[index_directory].name);
// it is direcoty
// clean the directory
if (index_previous != -1) fs->directory[index_previous].sibling = NULL;
else fs->directory[current_index].child = NULL;
}
}
| fdd032dd69de0c4b4f651f5cebd5b72d6cb14649.cu | #include "file_system.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define DIR = 0;
#define FILE = 1;
using namespace std;
__device__ __managed__ u32 gtime = 1;
__device__ __managed__ u32 gtime_create = 1;
__device__ __managed__ u32 block_position = 0;
__device__ __managed__ u32 FCB_position = 4096;
__device__ __managed__ u32 current_FCB_position = 4096;
__device__ void display(FileSystem* fs) {
printf("this fs////////////////////////////////////////////");
printf("in the zero position\n");
printf("name = %s\n", fs->directory[0].name);
printf("sibling = %d\n", fs->directory[0].sibling);
printf("child = %d\n", fs->directory[0].child);
printf("parent = %d\n", fs->directory[0].parent);
printf("size = %d\n", fs->directory[0].size);
printf("indentity = %d\n", fs->directory[0].identity);
printf("create time = %d\n", fs->directory[0].create_date);
printf("modified time = %d\n", fs->directory[0].modified_date);
printf("\n");
printf("in the one position\n");
printf("name = %s\n", fs->directory[1].name);
printf("sibling = %d\n", fs->directory[1].sibling);
printf("parent = %d\n", fs->directory[1].parent);
printf("child = %d\n", fs->directory[1].child);
printf("size = %d\n", fs->directory[1].size);
printf("indentity = %d\n", fs->directory[1].identity);
printf("create time = %d\n", fs->directory[1].create_date);
printf("modified time = %d\n", fs->directory[1].modified_date);
printf("\n");
printf("in the second position\n");
printf("name = %s\n", fs->directory[2].name);
printf("sibling = %d\n", fs->directory[2].sibling);
printf("parent = %d\n", fs->directory[2].parent);
printf("child = %d\n", fs->directory[2].child);
printf("size = %d\n", fs->directory[2].size);
printf("indentity = %d\n", fs->directory[2].identity);
printf("create time = %d\n", fs->directory[2].create_date);
printf("modified time = %d\n", fs->directory[2].modified_date);
printf("\n");
printf("in the third position\n");
printf("name = %s\n", fs->directory[3].name);
printf("sibling = %d\n", fs->directory[3].sibling);
printf("parent = %d\n", fs->directory[3].parent);
printf("child = %d\n", fs->directory[3].child);
printf("size = %d\n", fs->directory[3].size);
printf("indentity = %d\n", fs->directory[3].identity);
printf("create time = %d\n", fs->directory[3].create_date);
printf("modified time = %d\n", fs->directory[3].modified_date);
}
__device__ void display_valid(FileSystem* fs) {
printf("this is valid fs//////////////////////////////////");
printf("in the zero position\n");
printf("name = %s\n", fs->valid_directory[0].name);
printf("sibling = %d\n", fs->valid_directory[0].sibling);
printf("child = %d\n", fs->valid_directory[0].child);
printf("parent = %d\n", fs->valid_directory[0].parent);
printf("size = %d\n", fs->valid_directory[0].size);
printf("indentity = %d\n", fs->valid_directory[0].identity);
printf("create time = %d\n", fs->valid_directory[0].create_date);
printf("modified time = %d\n", fs->valid_directory[0].modified_date);
printf("\n");
printf("in the one position\n");
printf("name = %s\n", fs->valid_directory[1].name);
printf("sibling = %d\n", fs->valid_directory[1].sibling);
printf("parent = %d\n", fs->valid_directory[1].parent);
printf("child = %d\n", fs->valid_directory[1].child);
printf("size = %d\n", fs->valid_directory[1].size);
printf("indentity = %d\n", fs->valid_directory[1].identity);
printf("create time = %d\n", fs->valid_directory[1].create_date);
printf("modified time = %d\n", fs->valid_directory[1].modified_date);
printf("\n");
printf("in the second position\n");
printf("name = %s\n", fs->valid_directory[2].name);
printf("sibling = %d\n", fs->valid_directory[2].sibling);
printf("parent = %d\n", fs->valid_directory[2].parent);
printf("child = %d\n", fs->valid_directory[2].child);
printf("size = %d\n", fs->valid_directory[2].size);
printf("indentity = %d\n", fs->valid_directory[2].identity);
printf("create time = %d\n", fs->valid_directory[2].create_date);
printf("modified time = %d\n", fs->valid_directory[2].modified_date);
printf("\n");
printf("in the third position\n");
printf("name = %s\n", fs->valid_directory[3].name);
printf("sibling = %d\n", fs->valid_directory[3].sibling);
printf("parent = %d\n", fs->valid_directory[3].parent);
printf("child = %d\n", fs->valid_directory[3].child);
printf("size = %d\n", fs->valid_directory[3].size);
printf("indentity = %d\n", fs->valid_directory[3].identity);
printf("create time = %d\n", fs->valid_directory[3].create_date);
printf("modified time = %d\n", fs->valid_directory[3].modified_date);
}
__device__ void fs_init(FileSystem *fs, uchar *volume, int SUPERBLOCK_SIZE,
int FCB_SIZE, int FCB_ENTRIES, int VOLUME_SIZE,
int STORAGE_BLOCK_SIZE, int MAX_FILENAME_SIZE,
int MAX_FILE_NUM, int MAX_FILE_SIZE, int FILE_BASE_ADDRESS)
{
// init variables
fs->volume = volume;
// init constants
fs->SUPERBLOCK_SIZE = SUPERBLOCK_SIZE;
fs->FCB_SIZE = FCB_SIZE;
fs->FCB_ENTRIES = FCB_ENTRIES;
fs->STORAGE_SIZE = VOLUME_SIZE;
fs->STORAGE_BLOCK_SIZE = STORAGE_BLOCK_SIZE;
fs->MAX_FILENAME_SIZE = MAX_FILENAME_SIZE;
fs->MAX_FILE_NUM = MAX_FILE_NUM;
fs->MAX_FILE_SIZE = MAX_FILE_SIZE;
fs->FILE_BASE_ADDRESS = FILE_BASE_ADDRESS;
}
__device__ __managed__ int current_index = 0;
__device__ __managed__ int last_index = 1;
__device__ __managed__ int current_depth = 0;
__device__ __managed__ int write_index = 0;
__device__ void segment_management_FCB(FileSystem *fs, u32 fp) {
for (int i = fp; i < 36863; i = i + 32) {
if (fs->volume[i + 32] == 0 && fs->volume[i + 32 + 1] == 0 && fs->volume[i + 32 + 2] == 0 && fs->volume[i + 32 + 3] == 0) break;
for (int j = 0; j < 32; j++) {
fs->volume[i + j] = fs->volume[i + j + 32];
fs->volume[i + j + 32] = 0;
}
}
}
__device__ void segment_management(FileSystem *fs, u32 fp, u32 original_size) {
//manage the volume
u32 position = fs->FILE_BASE_ADDRESS + fp * 32;
u32 size = ((original_size - 1) / 32 + 1) * 32;
while ((fs->volume[position + size] != 0 || (position + size) %32 != 0)&& position + original_size < fs->STORAGE_SIZE) {
fs->volume[position] = fs->volume[position + size];
fs->volume[position + size] = 0;
position++;
}
//manage the block
for (int i = 0; i < block_position / 8 + 1; i++) {
fs->volume[i] = 0;
}
block_position = block_position - (original_size - 1) / 32 - 1;
u32 whole_block = block_position / 8;
u32 remainder = block_position % 8;
for (int i = 0; i < whole_block && i < fs->SUPERBLOCK_SIZE ; i++) {
fs->volume[i] = 511;
}
for (int i = 0; i < remainder; i++) {
fs->volume[whole_block] = fs->volume[whole_block] + (1 << i);
}
//change FCB
u32 FCB_block_position;
for (int i = 4096; i < 36863; i = i + 32) {
if (fs->volume[i] == 0 && fs->volume[i + 1] == 0 && fs->volume[i + 2] == 0 && fs->volume[i + 3] == 0) break;
FCB_block_position = (fs->volume[i + 28] << 24) + (fs->volume[i + 29] << 16) + (fs->volume[i + 30] << 8) + (fs->volume[i + 31]);
if (FCB_block_position > fp) {
FCB_block_position = FCB_block_position - (original_size - 1) / 32 - 1;
fs->volume[i + 28] = FCB_block_position >> 24;
fs->volume[i + 29] = FCB_block_position >> 16;
fs->volume[i + 30] = FCB_block_position >> 8;
fs->volume[i + 31] = FCB_block_position;
}
}
}
__device__ void display(FileSystem*fs, u32 stop_position, int op) {
//display date
if (op == 0) {
printf("stop position = %d\n", stop_position);
printf("===sort by modified time===\n");
for (u32 i = 0; i <= stop_position; i++) {
if (fs->valid_directory[i].identity == 0) printf("%s d\n", fs->valid_directory[i].name);
else printf("%s\n",fs->valid_directory[i].name);
}
}
else {
printf("stop position = %d\n", stop_position);
printf("===sort by file size===\n");
for (u32 i = 0; i <= stop_position; i++) {
if (fs->valid_directory[i].identity == 0) printf("%s %d d\n", fs->valid_directory[i].name, fs->valid_directory[i].size);
else printf("%s %d\n", fs->valid_directory[i].name, fs->valid_directory[i].size);
}
}
}
__device__ void swap(FileSystem* fs, int x, int y) {
struct file_directory tempt = fs->valid_directory[x];
fs->valid_directory[x] = fs->valid_directory[y];
fs->valid_directory[y] = tempt;
}
__device__ void bubblesort(FileSystem *fs, u32 left, u32 right, int op) {
// sort by date
if (op == 0) {
for (int i = left; i < right; i ++) {
for (int j = left; j < right - i + left; j++) {
int j_date_previous = fs->valid_directory[j].modified_date;
int j_date_after = fs->valid_directory[j+1].modified_date;
if (j_date_previous < j_date_after) swap(fs, j, j + 1);
}
}
}
else {
for (int i = left; i < right; i++) {
for (int j = left; j < right - i + left; j++) {
int j_size_previous = fs->valid_directory[j].size;
int j_size_after = fs->valid_directory[j + 1].size;
int j_date_previous = fs->valid_directory[j].create_date;
int j_date_after = fs->valid_directory[j + 1].create_date;
if (j_size_previous < j_size_after) swap(fs, j, j + 1);
if (j_size_after == j_size_previous && j_date_previous > j_date_after) swap(fs, j, j + 1);
}
}
}
}
__device__ u32 if_exist(FileSystem *fs, char *s) {
//return FCB position
int flag;
for (int i = 4096; i < 36863; i = i + 32) {
flag = 0;
if (fs->volume[i] == 0 && fs->volume[i + 1] == 0 && fs->volume[i + 2] == 0 && fs->volume[i + 3] == 0) {
break;
}
for (int j = 4; j < 24; j++) {
if (fs->volume[i + j] != s[j - 4]) {
flag = 1;
break;
}
}
if (flag == 1) continue;
if (flag == 0) return i;
}
return -1;
}
__device__ bool check_the_name(char*name1, char*name2) {
for (int i = 0; i < 20; i++) {
if (name1[i] != name2[i]) return true;
}
return false;
}
__device__ int if_exist_directory(FileSystem *fs, char *s) {
if (fs->directory[current_index].child == NULL) return -1;
else {
int directory_index = fs->directory[current_index].child;
while (check_the_name(fs->directory[directory_index].name, s) && fs->directory[directory_index].sibling != NULL) directory_index = fs->directory[directory_index].sibling;
if (check_the_name(fs->directory[directory_index].name, s)) return -directory_index;
return directory_index;
}
}
__device__ u32 fs_open(FileSystem *fs, char *s, int op)
{
printf("//////////////////////////////////////////before open\n");
display(fs);
/* Implement open operation here */
//if not exist
int check = if_exist_directory(fs, s);
//printf("check in open is %d\n", check);
if (check < 0) {
printf("file do not exist\n");
if (op == 0) {
printf("can not find the file to read error\n");
return -1;
}
//store in the directory
int name_count = 0;
fs->directory[last_index].child = NULL;
fs->directory[last_index].identity = 1;
for (int i = 0; i < 20 && (i == 0 || s[i - 1] != '\0'); i++) {
fs->directory[last_index].name[i] = s[i];
name_count++;
}
fs->directory[last_index].parent = current_index;
fs->directory[last_index].sibling = NULL;
fs->directory[last_index].size = 0;
fs->directory[last_index].create_date = gtime_create;
fs->directory[last_index].modified_date = gtime;
if (fs->directory[current_index].child == NULL) fs->directory[current_index].child = last_index;
else fs->directory[-check].sibling = last_index;
fs->directory[current_index].size += name_count;
write_index = last_index;
last_index++;
//implement the volume
//store the name
current_FCB_position = FCB_position;
//printf("for name in open = ");
for (int i = 4; i < 24; i++) {
fs->volume[FCB_position + i] = s[i - 4];
}
//store the create date
fs->volume[FCB_position + 24] = gtime_create >> 8;
fs->volume[FCB_position + 25] = gtime_create;
//strore the modified date
fs->volume[FCB_position + 26] = gtime >> 8;
fs->volume[FCB_position + 27] = gtime;
//store the start block
fs->volume[FCB_position + 28] = block_position >> 24;
fs->volume[FCB_position + 29] = block_position >> 16;
fs->volume[FCB_position + 30] = block_position >> 8;
fs->volume[FCB_position + 31] = block_position;
//update the date
gtime++;
gtime_create++;
//update FCB position
FCB_position = FCB_position + 32;
return block_position;
}
//if exist
else {
current_FCB_position = if_exist(fs, s);
u32 start_block = (fs->volume[current_FCB_position + 28] << 24) + (fs->volume[current_FCB_position + 29] << 16) + (fs->volume[current_FCB_position + 30] << 8) + (fs->volume[current_FCB_position + 31]);
//if write
if (op == 1) {
write_index = check;
fs->directory[write_index].modified_date = gtime;
//clean the old file in volume
u32 size = (fs->volume[current_FCB_position] << 24) + (fs->volume[current_FCB_position + 1] << 16) + (fs->volume[current_FCB_position + 2] << 8) + (fs->volume[current_FCB_position + 3]);
for (int i = 0; i < size; i++) {
fs->volume[start_block * 32 + i + fs->FILE_BASE_ADDRESS] = 0;
}
//clean the old file in block
for (int i = 0; i < (size - 1) / 32 + 1; i++) {
u32 super_block_position = start_block + i;
int shift_number = super_block_position % 8;
fs->volume[super_block_position / 8] = fs->volume[super_block_position / 8] - (1 << shift_number);
}
//update FCB date
fs->volume[current_FCB_position + 26] = gtime >> 8;
fs->volume[current_FCB_position + 27] = gtime;
//update the date
gtime++;
}
return start_block;
}
}
__device__ void fs_read(FileSystem *fs, uchar *output, u32 size, u32 fp)
{
printf("///////////////////////////////////before read\n");
display(fs);
/* Implement read operation here */
for (int i = 0; i < size; i++) {
output[i] = fs->volume[fp * 32 + i + fs->FILE_BASE_ADDRESS];
}
}
__device__ u32 fs_write(FileSystem *fs, uchar* input, u32 size, u32 fp)
{
printf("///////////////////////////////////////////before write\n");
display(fs);
/* Implement write operation here */
//enough space
fs->directory[write_index].size = size;
if ((fs->volume[(fp + (size - 1) / 32)/8] >> (fp + (size - 1) / 32) % 8) % 2 == 0) {
u32 old_file_size = (fs->volume[current_FCB_position] << 24) + (fs->volume[current_FCB_position + 1] << 16) + (fs->volume[current_FCB_position + 2] << 8) + (fs->volume[current_FCB_position + 3]);
u32 original_size = old_file_size - size;
//update volume
for (int i = 0; i < size; i++) {
fs->volume[fp * 32 + i + fs->FILE_BASE_ADDRESS] = input[i];
//update block
if (i % 32 == 0) {
u32 super_block_position = fp + i / 32;
int shift_number = super_block_position % 8;
fs->volume[(fp + i /32) / 8] = fs->volume[(fp + i / 32) / 8] + (1 << shift_number);
}
}
if (int (original_size) < 0) block_position = block_position + (-original_size - 1) / 32 + 1;
//update size
fs->volume[current_FCB_position] = size >> 24;
fs->volume[current_FCB_position + 1] = size >> 16;
fs->volume[current_FCB_position + 2] = size >> 8;
fs->volume[current_FCB_position + 3] = size;
if (original_size > 0 && old_file_size != 0 && fp != block_position - 1) segment_management(fs, fp + (size - 1) / 32 + 1, original_size);
}
//out of space
else {
u32 original_size = (fs->volume[current_FCB_position] << 24) + (fs->volume[current_FCB_position + 1] << 16) + (fs->volume[current_FCB_position + 2] << 8) + (fs->volume[current_FCB_position + 3]);
if (block_position * 32 - 1 + size >= fs->SUPERBLOCK_SIZE) {
return -1;
}
//update volume
else {
for (int i = 0; i < size; i++) {
fs->volume[block_position * 32 + i + fs->FILE_BASE_ADDRESS] = input[i];
//update block
if (i % 32 == 0) {
u32 super_block_position = block_position + i / 32;
int shift_number = super_block_position % 8;
fs->volume[(block_position + i / 32) / 8] = fs->volume[(block_position + i / 32) / 8] + (1 << shift_number);
}
}
//update size
fs->volume[current_FCB_position] = size >> 24;
fs->volume[current_FCB_position + 1] = size >> 16;
fs->volume[current_FCB_position + 2] = size >> 8;
fs->volume[current_FCB_position + 3] = size;
//update block position
fs->volume[current_FCB_position + 28] = block_position >> 16;
fs->volume[current_FCB_position + 29] = block_position >> 16;
fs->volume[current_FCB_position + 30] = block_position >> 8;
fs->volume[current_FCB_position + 31] = block_position;
}
segment_management(fs, fp, original_size);
}
}
__device__ int morrisTraversal(FileSystem* fs, int root_index) {
int count = 0;
int parent_index = root_index;
while (fs->directory[parent_index].sibling != NULL) {
if (fs->directory[parent_index].child != NULL) {
int child_index = fs->directory[parent_index].child;
while (fs->directory[child_index].sibling != NULL) {
if (fs->directory[child_index].child != NULL) {
int grand_child_index = fs->directory[child_index].child;
while (fs->directory[grand_child_index].sibling != NULL) {
fs->valid_directory[count] = fs->directory[grand_child_index];
printf("count = %d, %d\n", count, grand_child_index);
count++;
grand_child_index = fs->directory[grand_child_index].sibling;
}
fs->valid_directory[count] = fs->directory[grand_child_index];
printf("count = %d, %d\n", count, child_index);
count++;
}
fs->valid_directory[count] = fs->directory[child_index];
printf("count = %d, %d\n", count, child_index);
count++;
child_index = fs->directory[child_index].sibling;
}
fs->valid_directory[count] = fs->directory[child_index];
printf("count = %d, %d\n", count, child_index);
count++;
}
fs->valid_directory[count] = fs->directory[parent_index];
printf("count = %d, %d\n", count, parent_index);
count++;
parent_index = fs->directory[parent_index].sibling;
}
fs->valid_directory[count] = fs->directory[parent_index];
printf("count = %d, %d\n", count, parent_index);
count++;
return count;
}
__device__ void fs_gsys(FileSystem *fs, int op)
{
if (op == 0 || op == 1) {
int count = morrisTraversal(fs, fs->directory[0].child);
printf("/////////////////////after morrisTraversal\n");
display(fs);
display_valid(fs);
printf("///////////////////////////count is %d\n", count);
bubblesort(fs, 0, count - 1, op);
printf("///////////////////////////after bubble sort\n");
display(fs);
display_valid(fs);
display(fs, count - 1, op);
}
// CD_P
else if (op == 6) {
//no parent
if (fs->directory[current_index].parent == NULL) printf("no parent error\n");
else current_index = fs->directory[current_index].parent;
}
//PWD
else if (op == 5) {
int index_directory = current_index;
if (current_depth == 1) printf("/%s\n", fs->directory[current_index].name);
else if (current_depth == 2) printf("/%s/%s\n", fs->directory[fs->directory[current_index].parent].name, fs->directory[current_index].name);
else {
int parent_index = fs->directory[current_index].parent;
char* parent = fs->directory[parent_index].name;
int pp_index = fs->directory[parent_index].parent;
char* parent_parent = fs->directory[pp_index].name;
printf("/%s/%s/%s\n", parent_parent, parent, fs->directory[current_index].name);
}
}
}
__device__ void fs_gsys(FileSystem *fs, int op, char *s)
{
if (op == 2) {
int index_previous = -1;
if (fs->directory[current_index].child == NULL) printf("no subdirectory error\n");
int index_directory = fs->directory[current_index].child;
while (fs->directory[index_directory].name != s && fs->directory[index_directory].sibling != NULL) {
index_previous = index_directory;
index_directory = fs->directory[index_directory].sibling;
}
if (fs->directory[index_directory].name != s) printf("no such directory error\n");
//clean the file inside
// it is file
if (fs->directory[index_directory].identity == 1) {
//rm operation for directory tree
/* Implement rm operation here */
if (if_exist(fs, s) == -1) printf("no such file founded error\n");
else {
current_FCB_position = if_exist(fs, s);
//change volume
u32 start_block = (fs->volume[current_FCB_position + 28] << 24) + (fs->volume[current_FCB_position + 29] << 16) + (fs->volume[current_FCB_position + 30] << 8) + (fs->volume[current_FCB_position + 31]);
//clean the old file in volume
u32 size = (fs->volume[current_FCB_position] << 24) + (fs->volume[current_FCB_position + 1] << 16) + (fs->volume[current_FCB_position + 2] << 8) + (fs->volume[current_FCB_position + 3]);
for (int i = 0; i < size; i++) {
fs->volume[start_block * 32 + i + fs->FILE_BASE_ADDRESS] = 0;
}
//clean the old file in block
for (int i = 0; i < (size - 1) / 32 + 1; i++) {
fs->volume[start_block + i] = 0;
}
//clean the FCB
for (int i = 0; i < 32; i++) {
fs->volume[current_FCB_position + i] = 0;
}
segment_management(fs, start_block, size);
segment_management_FCB(fs, current_FCB_position);
FCB_position = FCB_position - 32;
}
if (index_previous != -1) fs->directory[index_previous].sibling = NULL;
else fs->directory[current_index].child = NULL;
}
else printf("can not use RM to remove the directory\n");
}
else if (op == 3) {
//MKDIR
//for debug
printf("///////////////////////////////////in MKDIR\n");
display(fs);
if (last_index > 1024) printf("file out of storage error\n");
//for debug
printf("last_index is %d\n", last_index);
if (fs->directory[current_index].identity == 1) printf("can not MKDIR in file error\n");
if (current_depth >= 3) printf("file out of depth\n");
int index_directory = fs->directory[current_index].child;
printf("index_directory = %d\n", index_directory);
//no other file
if (index_directory == NULL) {
printf("no other file in MKDIR\n");
int name_count = 0;
for (int i = 0; i < 20 || s[i - 1] != '\0'; i++) {
fs->directory[last_index].name[i] = s[i];
name_count++;
}
fs->directory[last_index].sibling = NULL;
fs->directory[last_index].child = NULL;
fs->directory[last_index].parent = current_index;
fs->directory[last_index].identity = 0;
fs->directory[last_index].size = 0;
fs->directory[last_index].modified_date = gtime;
fs->directory[last_index].create_date = gtime_create;
fs->directory[current_index].child = last_index;
fs->directory[current_index].size += name_count;
}
//other files
else {
printf("other file\n");
int file_count = 0;
int name_count = 0;
while (fs->directory[index_directory].sibling != NULL) {
index_directory = fs->directory[index_directory].sibling;
file_count++;
}
printf("index_directory = %d\n", index_directory);
if (file_count >= 50) printf("file out of directory storage\n");
for (int i = 0; i < 20 && (i == 0||s[i - 1] != '\0'); i++) {
fs->directory[last_index].name[i] = s[i];
name_count++;
}
fs->directory[last_index].sibling = NULL;
fs->directory[last_index].child = NULL;
fs->directory[last_index].parent = current_index;
fs->directory[index_directory].sibling = last_index;
fs->directory[last_index].identity = 0;
fs->directory[last_index].size = 0;
fs->directory[current_index].size += name_count;
fs->directory[last_index].modified_date = gtime;
fs->directory[last_index].create_date = gtime_create;
}
last_index++;
printf("last index = %d\n", last_index);
printf("\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\after MKDIR\n");
display(fs);
}
else if (op == 4) {
//CD
if (fs->directory[current_index].child == NULL) printf("no subdirectory error\n");
int index_directory = fs->directory[current_index].child;
while (fs->directory[index_directory].name != s && fs->directory[index_directory].sibling != NULL) index_directory = fs->directory[index_directory].sibling;
if (fs->directory[index_directory].name != s) printf("no such directory error\n");
else if (fs->directory[index_directory].identity == 1) printf("can not move into a file\n");
else current_index = index_directory;
current_depth++;
}
//RM_RF
else if (op == 7) {
int index_previous = -1;
if (fs->directory[current_index].child == NULL) printf("no subdirectory error\n");
int index_directory = fs->directory[current_index].child;
while (fs->directory[index_directory].name != s && fs->directory[index_directory].sibling != NULL) {
index_previous = index_directory;
index_directory = fs->directory[index_directory].sibling;
}
if (fs->directory[index_directory].name != s) printf("no such directory error\n");
//clean the file inside
// it is file
if (fs->directory[index_directory].identity == 1) fs_gsys(fs, 2, fs->directory[index_directory].name);
// it is direcoty
// clean the directory
if (index_previous != -1) fs->directory[index_previous].sibling = NULL;
else fs->directory[current_index].child = NULL;
}
}
|
b1eabd412442ca8d4c23b9139de6f31f7cc8e282.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "histogram_utils.cuh"
#include <hip/hip_cooperative_groups.h>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <contrib/libs/nvidia/cub/cub/warp/warp_scan.cuh>
using namespace cooperative_groups;
namespace NKernel
{
__global__ void CopyHistogramsImpl(const ui32* leftLeaves,
const ui32* rightLeaves,
ui32 numStats,
ui32 binFeaturesInHist,
float* histograms) {
const ui32 leftLeafId = __ldg(leftLeaves + blockIdx.y);
const ui32 rightLeafId = __ldg(rightLeaves + blockIdx.y);
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
float* srcHist = histograms + leftLeafId * binFeaturesInHist * numStats;
float* dstHist = histograms + rightLeafId * binFeaturesInHist * numStats;
const ui32 histSize = binFeaturesInHist * numStats;
while (i < histSize) {
WriteThrough(dstHist + i, __ldg(srcHist + i));
i += gridDim.x * blockDim.x;
}
}
void CopyHistograms(const ui32* leftLeaves,
const ui32* rightLeaves,
const ui32 leavesCount,
ui32 numStats,
ui32 binFeaturesInHist,
float* histograms,
TCudaStream stream
) {
const ui32 histSize = numStats * binFeaturesInHist;
ui32 blockSize = 256;
dim3 numBlocks;
numBlocks.z = 1;
numBlocks.y = leavesCount;
numBlocks.x = CeilDivide(histSize, blockSize);
if (numBlocks.x) {
hipLaunchKernelGGL(( CopyHistogramsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, leftLeaves, rightLeaves, numStats, binFeaturesInHist, histograms);
}
}
__global__ void CopyHistogramImpl(const ui32 leftLeafId,
const ui32 rightLeafId,
ui32 numStats,
ui32 binFeaturesInHist,
float* histograms) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
float* srcHist = histograms + leftLeafId * binFeaturesInHist * numStats;
float* dstHist = histograms + rightLeafId * binFeaturesInHist * numStats;
const ui32 histSize = binFeaturesInHist * numStats;
while (i < histSize) {
WriteThrough(dstHist + i, __ldg(srcHist + i));
i += gridDim.x * blockDim.x;
}
}
void CopyHistogram(const ui32 leftLeaves,
const ui32 rightLeaves,
ui32 numStats,
ui32 binFeaturesInHist,
float* histograms,
TCudaStream stream
) {
const ui32 histSize = numStats * binFeaturesInHist;
ui32 blockSize = 256;
dim3 numBlocks;
numBlocks.z = 1;
numBlocks.y = 1;
numBlocks.x = CeilDivide(histSize, blockSize);
if (numBlocks.x) {
hipLaunchKernelGGL(( CopyHistogramImpl), dim3(numBlocks), dim3(blockSize), 0, stream, leftLeaves, rightLeaves, numStats, binFeaturesInHist, histograms);
}
}
//write histogram block to histograms
__global__ void WriteReducesHistogramsImpl(int histBlockOffset,
int binFeaturesInBlock,
const ui32* histogramIds,
const float* blockHistogram,
const int binFeatureCount,
float* dstHistogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int leafId = blockIdx.y;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
const int dstId = histogramIds[blockIdx.y];
if (binFeatureId < binFeaturesInBlock) {
blockHistogram += binFeatureId;
blockHistogram += binFeaturesInBlock * statId;
blockHistogram += leafId * binFeaturesInBlock * statCount;
const float val = __ldg(blockHistogram);
dstHistogram += dstId * binFeatureCount * statCount;
dstHistogram += statId * binFeatureCount;
dstHistogram += histBlockOffset + binFeatureId;
dstHistogram[0] = val;
}
}
void WriteReducesHistograms(int blockOffset,
int histBlockSize,
const ui32* histogramIds,
ui32 leafCount,
ui32 statCount,
const float* blockHistogram,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 128;
dim3 numBlocks;
numBlocks.x = CeilDivide(histBlockSize, blockSize);
numBlocks.y = leafCount;
numBlocks.z = statCount;
if (histBlockSize && leafCount && statCount) {
hipLaunchKernelGGL(( WriteReducesHistogramsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, blockOffset,
histBlockSize,
histogramIds,
blockHistogram,
binFeatureCount,
dstHistogram);
}
}
//write histogram block to histograms
__global__ void WriteReducesHistogramImpl(int histBlockOffset,
int binFeaturesInBlock,
const ui32 dstId,
const float* blockHistogram,
const int binFeatureCount,
float* dstHistogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int leafId = 0;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
if (binFeatureId < binFeaturesInBlock) {
blockHistogram += binFeatureId;
blockHistogram += binFeaturesInBlock * statId;
blockHistogram += leafId * binFeaturesInBlock * statCount;
const float val = __ldg(blockHistogram);
dstHistogram += dstId * binFeatureCount * statCount;
dstHistogram += statId * binFeatureCount;
dstHistogram += histBlockOffset + binFeatureId;
dstHistogram[0] = val;
}
}
void WriteReducesHistogram(int blockOffset,
int histBlockSize,
const ui32 histogramId,
ui32 statCount,
const float* blockHistogram,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 128;
dim3 numBlocks;
numBlocks.x = CeilDivide(histBlockSize, blockSize);
numBlocks.y = 1;
numBlocks.z = statCount;
if (histBlockSize && statCount) {
hipLaunchKernelGGL(( WriteReducesHistogramImpl), dim3(numBlocks), dim3(blockSize), 0, stream, blockOffset,
histBlockSize,
histogramId,
blockHistogram,
binFeatureCount,
dstHistogram);
}
}
//write histogram block to histograms
__global__ void ZeroHistogramsImpl(const ui32* histIds,
const int binFeatureCount,
float* dstHistogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
const int dstHist = histIds[blockIdx.y];
if (binFeatureId < binFeatureCount) {
dstHistogram += dstHist * binFeatureCount * statCount;
dstHistogram += statId * binFeatureCount;
WriteThrough(dstHistogram + binFeatureId, 0.0f);
}
}
void ZeroHistograms(const ui32* histIds,
ui32 idsCount,
ui32 statCount,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(binFeatureCount, blockSize);
numBlocks.y = idsCount;
numBlocks.z = statCount;
if (!IsGridEmpty(numBlocks)) {
hipLaunchKernelGGL(( ZeroHistogramsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, histIds,
binFeatureCount,
dstHistogram);
}
}
//write histogram block to histograms
__global__ void ZeroHistogramImpl(const ui32 dstHist,
const int binFeatureCount,
float* dstHistogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
if (binFeatureId < binFeatureCount) {
dstHistogram += dstHist * binFeatureCount * statCount;
dstHistogram += statId * binFeatureCount;
WriteThrough(dstHistogram + binFeatureId, 0.0f);
}
}
void ZeroHistogram(const ui32 histId,
ui32 statCount,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(binFeatureCount, blockSize);
numBlocks.y = 1;
numBlocks.z = statCount;
if (!IsGridEmpty(numBlocks)) {
hipLaunchKernelGGL(( ZeroHistogramImpl), dim3(numBlocks), dim3(blockSize), 0, stream, histId,
binFeatureCount,
dstHistogram);
}
}
//write histogram block to histograms
__global__ void SubstractHistogramsImpl(const ui32* fromIds,
const ui32* whatIds,
const int binFeatureCount,
float* histogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int fromId = __ldg(fromIds + blockIdx.y);
const int whatId = __ldg(whatIds + blockIdx.y);
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
histogram += binFeatureId;
if (binFeatureId < binFeatureCount) {
const ui64 fromOffset = fromId * binFeatureCount * statCount + statId * binFeatureCount;
const ui64 whatOffset = whatId * binFeatureCount * statCount + statId * binFeatureCount;
float newVal = __ldg(histogram + fromOffset) - __ldg(histogram + whatOffset);
if (statId == 0) {
newVal = max(newVal, 0.0f);
}
WriteThrough(histogram + fromOffset, newVal);
}
}
void SubstractHistgorams(const ui32* fromIds,
const ui32* whatIds,
const int idsCount,
const int statCount,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(binFeatureCount, blockSize);
numBlocks.y = idsCount;
numBlocks.z = statCount;
if (!IsGridEmpty(numBlocks)) {
hipLaunchKernelGGL(( SubstractHistogramsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, fromIds, whatIds, binFeatureCount, dstHistogram);
}
}
//write histogram block to histograms
__global__ void SubstractHistogramImpl(const ui32 fromId,
const ui32 whatId,
const int binFeatureCount,
float* histogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
histogram += binFeatureId;
if (binFeatureId < binFeatureCount) {
const ui64 fromOffset = fromId * binFeatureCount * statCount + statId * binFeatureCount;
const ui64 whatOffset = whatId * binFeatureCount * statCount + statId * binFeatureCount;
float newVal = __ldg(histogram + fromOffset) - __ldg(histogram + whatOffset);
if (statId == 0) {
newVal = max(newVal, 0.0f);
}
WriteThrough(histogram + fromOffset, newVal);
}
}
void SubstractHistgoram(const ui32 fromIds,
const ui32 whatIds,
const int statCount,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(binFeatureCount, blockSize);
numBlocks.y = 1;
numBlocks.z = statCount;
if (!IsGridEmpty(numBlocks)) {
hipLaunchKernelGGL(( SubstractHistogramImpl), dim3(numBlocks), dim3(blockSize), 0, stream, fromIds, whatIds, binFeatureCount, dstHistogram);
}
}
template <int BlockSize>
__global__ void ScanHistogramsImpl(const TBinarizedFeature* features, int featureCount,
const ui32* histIds,
const int binFeatureCount,
float* histograms) {
const int featuresPerBlock = BlockSize / 32;
using WarpScan = cub::WarpScan<double>;
__shared__ typename WarpScan::TempStorage tempStorage[featuresPerBlock];
const int warpId = threadIdx.x / 32;
const int threadIdInWarp = threadIdx.x & 31;
const int featureId = blockIdx.x * featuresPerBlock + warpId;
const int histId = histIds[blockIdx.y];
const int statId = blockIdx.z;
const ui64 statCount = gridDim.z;
if (featureId < featureCount) {
features += featureId;
const bool skipFeature = features->OneHotFeature || (features->Folds <= 1);
if (!skipFeature) {
histograms += histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex;
const int folds = features->Folds;
const int n = ((folds + 31) / 32) * 32;
double prefixSum = 0;
for (int binOffset = 0; binOffset < n; binOffset += 32) {
const double val = (binOffset + threadIdInWarp) < folds
? histograms[(binOffset + threadIdInWarp)]
: 0.0f;
double sum = 0;
__syncwarp();
WarpScan(tempStorage[warpId]).InclusiveSum(val, sum);
__syncwarp();
sum += prefixSum;
if ((binOffset + threadIdInWarp) < folds) {
histograms[binOffset + threadIdInWarp] = sum;
}
if ((binOffset + 32) < n) {
prefixSum = cub::ShuffleIndex<32, double>(sum, 31, 0xffffffff);
}
}
}
}
};
void ScanHistograms(
const TBinarizedFeature* features, int fCount,
const ui32* ids,
const int idsCount,
const int statCount,
const int binFeatureCount,
float* histograms,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(fCount * 32, blockSize);
numBlocks.y = idsCount;
numBlocks.z = statCount;
if (!IsGridEmpty(numBlocks)) {
hipLaunchKernelGGL(( ScanHistogramsImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, features, fCount, ids, binFeatureCount, histograms);
}
}
template <int BlockSize>
__global__ void ScanHistogramImpl(const TBinarizedFeature* features, int featureCount,
ui32 histId,
const int binFeatureCount,
float* histograms) {
const int featuresPerBlock = BlockSize / 32;
using WarpScan = cub::WarpScan<double>;
__shared__ typename WarpScan::TempStorage tempStorage[featuresPerBlock];
const int warpId = threadIdx.x / 32;
const int threadIdInWarp = threadIdx.x & 31;
const int featureId = blockIdx.x * featuresPerBlock + warpId;
const int statId = blockIdx.z;
const ui64 statCount = gridDim.z;
if (featureId < featureCount) {
features += featureId;
const bool skipFeature = features->OneHotFeature || (features->Folds <= 1);
if (!skipFeature) {
histograms += histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex;
const int folds = features->Folds;
const int n = ((folds + 31) / 32) * 32;
double prefixSum = 0;
for (int binOffset = 0; binOffset < n; binOffset += 32) {
const double val = (binOffset + threadIdInWarp) < folds
? histograms[(binOffset + threadIdInWarp)]
: 0.0f;
double sum = 0;
__syncwarp();
WarpScan(tempStorage[warpId]).InclusiveSum(val, sum);
__syncwarp();
sum += prefixSum;
if ((binOffset + threadIdInWarp) < folds) {
histograms[binOffset + threadIdInWarp] = sum;
}
if ((binOffset + 32) < n) {
prefixSum = cub::ShuffleIndex<32, double>(sum, 31, 0xffffffff);
}
}
}
}
};
void ScanHistogram(
const TBinarizedFeature* features, int fCount,
ui32 id,
const int statCount,
const int binFeatureCount,
float* histograms,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(fCount * 32, blockSize);
numBlocks.y = 1;
numBlocks.z = statCount;
if (!IsGridEmpty(numBlocks)) {
hipLaunchKernelGGL(( ScanHistogramImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, features, fCount, id, binFeatureCount, histograms);
}
}
}
| b1eabd412442ca8d4c23b9139de6f31f7cc8e282.cu | #include "histogram_utils.cuh"
#include <cooperative_groups.h>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <contrib/libs/nvidia/cub/cub/warp/warp_scan.cuh>
using namespace cooperative_groups;
namespace NKernel
{
__global__ void CopyHistogramsImpl(const ui32* leftLeaves,
const ui32* rightLeaves,
ui32 numStats,
ui32 binFeaturesInHist,
float* histograms) {
const ui32 leftLeafId = __ldg(leftLeaves + blockIdx.y);
const ui32 rightLeafId = __ldg(rightLeaves + blockIdx.y);
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
float* srcHist = histograms + leftLeafId * binFeaturesInHist * numStats;
float* dstHist = histograms + rightLeafId * binFeaturesInHist * numStats;
const ui32 histSize = binFeaturesInHist * numStats;
while (i < histSize) {
WriteThrough(dstHist + i, __ldg(srcHist + i));
i += gridDim.x * blockDim.x;
}
}
void CopyHistograms(const ui32* leftLeaves,
const ui32* rightLeaves,
const ui32 leavesCount,
ui32 numStats,
ui32 binFeaturesInHist,
float* histograms,
TCudaStream stream
) {
const ui32 histSize = numStats * binFeaturesInHist;
ui32 blockSize = 256;
dim3 numBlocks;
numBlocks.z = 1;
numBlocks.y = leavesCount;
numBlocks.x = CeilDivide(histSize, blockSize);
if (numBlocks.x) {
CopyHistogramsImpl<<<numBlocks, blockSize, 0, stream>>>(leftLeaves, rightLeaves, numStats, binFeaturesInHist, histograms);
}
}
__global__ void CopyHistogramImpl(const ui32 leftLeafId,
const ui32 rightLeafId,
ui32 numStats,
ui32 binFeaturesInHist,
float* histograms) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
float* srcHist = histograms + leftLeafId * binFeaturesInHist * numStats;
float* dstHist = histograms + rightLeafId * binFeaturesInHist * numStats;
const ui32 histSize = binFeaturesInHist * numStats;
while (i < histSize) {
WriteThrough(dstHist + i, __ldg(srcHist + i));
i += gridDim.x * blockDim.x;
}
}
void CopyHistogram(const ui32 leftLeaves,
const ui32 rightLeaves,
ui32 numStats,
ui32 binFeaturesInHist,
float* histograms,
TCudaStream stream
) {
const ui32 histSize = numStats * binFeaturesInHist;
ui32 blockSize = 256;
dim3 numBlocks;
numBlocks.z = 1;
numBlocks.y = 1;
numBlocks.x = CeilDivide(histSize, blockSize);
if (numBlocks.x) {
CopyHistogramImpl<<<numBlocks, blockSize, 0, stream>>>(leftLeaves, rightLeaves, numStats, binFeaturesInHist, histograms);
}
}
//write histogram block to histograms
__global__ void WriteReducesHistogramsImpl(int histBlockOffset,
int binFeaturesInBlock,
const ui32* histogramIds,
const float* blockHistogram,
const int binFeatureCount,
float* dstHistogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int leafId = blockIdx.y;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
const int dstId = histogramIds[blockIdx.y];
if (binFeatureId < binFeaturesInBlock) {
blockHistogram += binFeatureId;
blockHistogram += binFeaturesInBlock * statId;
blockHistogram += leafId * binFeaturesInBlock * statCount;
const float val = __ldg(blockHistogram);
dstHistogram += dstId * binFeatureCount * statCount;
dstHistogram += statId * binFeatureCount;
dstHistogram += histBlockOffset + binFeatureId;
dstHistogram[0] = val;
}
}
void WriteReducesHistograms(int blockOffset,
int histBlockSize,
const ui32* histogramIds,
ui32 leafCount,
ui32 statCount,
const float* blockHistogram,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 128;
dim3 numBlocks;
numBlocks.x = CeilDivide(histBlockSize, blockSize);
numBlocks.y = leafCount;
numBlocks.z = statCount;
if (histBlockSize && leafCount && statCount) {
WriteReducesHistogramsImpl<<<numBlocks, blockSize, 0, stream>>>(blockOffset,
histBlockSize,
histogramIds,
blockHistogram,
binFeatureCount,
dstHistogram);
}
}
//write histogram block to histograms
__global__ void WriteReducesHistogramImpl(int histBlockOffset,
int binFeaturesInBlock,
const ui32 dstId,
const float* blockHistogram,
const int binFeatureCount,
float* dstHistogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int leafId = 0;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
if (binFeatureId < binFeaturesInBlock) {
blockHistogram += binFeatureId;
blockHistogram += binFeaturesInBlock * statId;
blockHistogram += leafId * binFeaturesInBlock * statCount;
const float val = __ldg(blockHistogram);
dstHistogram += dstId * binFeatureCount * statCount;
dstHistogram += statId * binFeatureCount;
dstHistogram += histBlockOffset + binFeatureId;
dstHistogram[0] = val;
}
}
void WriteReducesHistogram(int blockOffset,
int histBlockSize,
const ui32 histogramId,
ui32 statCount,
const float* blockHistogram,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 128;
dim3 numBlocks;
numBlocks.x = CeilDivide(histBlockSize, blockSize);
numBlocks.y = 1;
numBlocks.z = statCount;
if (histBlockSize && statCount) {
WriteReducesHistogramImpl<<<numBlocks, blockSize, 0, stream>>>(blockOffset,
histBlockSize,
histogramId,
blockHistogram,
binFeatureCount,
dstHistogram);
}
}
//write histogram block to histograms
__global__ void ZeroHistogramsImpl(const ui32* histIds,
const int binFeatureCount,
float* dstHistogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
const int dstHist = histIds[blockIdx.y];
if (binFeatureId < binFeatureCount) {
dstHistogram += dstHist * binFeatureCount * statCount;
dstHistogram += statId * binFeatureCount;
WriteThrough(dstHistogram + binFeatureId, 0.0f);
}
}
void ZeroHistograms(const ui32* histIds,
ui32 idsCount,
ui32 statCount,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(binFeatureCount, blockSize);
numBlocks.y = idsCount;
numBlocks.z = statCount;
if (!IsGridEmpty(numBlocks)) {
ZeroHistogramsImpl<<<numBlocks, blockSize, 0, stream>>>(histIds,
binFeatureCount,
dstHistogram);
}
}
//write histogram block to histograms
__global__ void ZeroHistogramImpl(const ui32 dstHist,
const int binFeatureCount,
float* dstHistogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
if (binFeatureId < binFeatureCount) {
dstHistogram += dstHist * binFeatureCount * statCount;
dstHistogram += statId * binFeatureCount;
WriteThrough(dstHistogram + binFeatureId, 0.0f);
}
}
void ZeroHistogram(const ui32 histId,
ui32 statCount,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(binFeatureCount, blockSize);
numBlocks.y = 1;
numBlocks.z = statCount;
if (!IsGridEmpty(numBlocks)) {
ZeroHistogramImpl<<<numBlocks, blockSize, 0, stream>>>(histId,
binFeatureCount,
dstHistogram);
}
}
//write histogram block to histograms
__global__ void SubstractHistogramsImpl(const ui32* fromIds,
const ui32* whatIds,
const int binFeatureCount,
float* histogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int fromId = __ldg(fromIds + blockIdx.y);
const int whatId = __ldg(whatIds + blockIdx.y);
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
histogram += binFeatureId;
if (binFeatureId < binFeatureCount) {
const ui64 fromOffset = fromId * binFeatureCount * statCount + statId * binFeatureCount;
const ui64 whatOffset = whatId * binFeatureCount * statCount + statId * binFeatureCount;
float newVal = __ldg(histogram + fromOffset) - __ldg(histogram + whatOffset);
if (statId == 0) {
newVal = max(newVal, 0.0f);
}
WriteThrough(histogram + fromOffset, newVal);
}
}
void SubstractHistgorams(const ui32* fromIds,
const ui32* whatIds,
const int idsCount,
const int statCount,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(binFeatureCount, blockSize);
numBlocks.y = idsCount;
numBlocks.z = statCount;
if (!IsGridEmpty(numBlocks)) {
SubstractHistogramsImpl<<<numBlocks, blockSize, 0, stream>>>(fromIds, whatIds, binFeatureCount, dstHistogram);
}
}
//write histogram block to histograms
__global__ void SubstractHistogramImpl(const ui32 fromId,
const ui32 whatId,
const int binFeatureCount,
float* histogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
histogram += binFeatureId;
if (binFeatureId < binFeatureCount) {
const ui64 fromOffset = fromId * binFeatureCount * statCount + statId * binFeatureCount;
const ui64 whatOffset = whatId * binFeatureCount * statCount + statId * binFeatureCount;
float newVal = __ldg(histogram + fromOffset) - __ldg(histogram + whatOffset);
if (statId == 0) {
newVal = max(newVal, 0.0f);
}
WriteThrough(histogram + fromOffset, newVal);
}
}
void SubstractHistgoram(const ui32 fromIds,
const ui32 whatIds,
const int statCount,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(binFeatureCount, blockSize);
numBlocks.y = 1;
numBlocks.z = statCount;
if (!IsGridEmpty(numBlocks)) {
SubstractHistogramImpl<<<numBlocks, blockSize, 0, stream>>>(fromIds, whatIds, binFeatureCount, dstHistogram);
}
}
template <int BlockSize>
__global__ void ScanHistogramsImpl(const TBinarizedFeature* features, int featureCount,
const ui32* histIds,
const int binFeatureCount,
float* histograms) {
const int featuresPerBlock = BlockSize / 32;
using WarpScan = cub::WarpScan<double>;
__shared__ typename WarpScan::TempStorage tempStorage[featuresPerBlock];
const int warpId = threadIdx.x / 32;
const int threadIdInWarp = threadIdx.x & 31;
const int featureId = blockIdx.x * featuresPerBlock + warpId;
const int histId = histIds[blockIdx.y];
const int statId = blockIdx.z;
const ui64 statCount = gridDim.z;
if (featureId < featureCount) {
features += featureId;
const bool skipFeature = features->OneHotFeature || (features->Folds <= 1);
if (!skipFeature) {
histograms += histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex;
const int folds = features->Folds;
const int n = ((folds + 31) / 32) * 32;
double prefixSum = 0;
for (int binOffset = 0; binOffset < n; binOffset += 32) {
const double val = (binOffset + threadIdInWarp) < folds
? histograms[(binOffset + threadIdInWarp)]
: 0.0f;
double sum = 0;
__syncwarp();
WarpScan(tempStorage[warpId]).InclusiveSum(val, sum);
__syncwarp();
sum += prefixSum;
if ((binOffset + threadIdInWarp) < folds) {
histograms[binOffset + threadIdInWarp] = sum;
}
if ((binOffset + 32) < n) {
prefixSum = cub::ShuffleIndex<32, double>(sum, 31, 0xffffffff);
}
}
}
}
};
void ScanHistograms(
const TBinarizedFeature* features, int fCount,
const ui32* ids,
const int idsCount,
const int statCount,
const int binFeatureCount,
float* histograms,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(fCount * 32, blockSize);
numBlocks.y = idsCount;
numBlocks.z = statCount;
if (!IsGridEmpty(numBlocks)) {
ScanHistogramsImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(features, fCount, ids, binFeatureCount, histograms);
}
}
template <int BlockSize>
__global__ void ScanHistogramImpl(const TBinarizedFeature* features, int featureCount,
ui32 histId,
const int binFeatureCount,
float* histograms) {
const int featuresPerBlock = BlockSize / 32;
using WarpScan = cub::WarpScan<double>;
__shared__ typename WarpScan::TempStorage tempStorage[featuresPerBlock];
const int warpId = threadIdx.x / 32;
const int threadIdInWarp = threadIdx.x & 31;
const int featureId = blockIdx.x * featuresPerBlock + warpId;
const int statId = blockIdx.z;
const ui64 statCount = gridDim.z;
if (featureId < featureCount) {
features += featureId;
const bool skipFeature = features->OneHotFeature || (features->Folds <= 1);
if (!skipFeature) {
histograms += histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex;
const int folds = features->Folds;
const int n = ((folds + 31) / 32) * 32;
double prefixSum = 0;
for (int binOffset = 0; binOffset < n; binOffset += 32) {
const double val = (binOffset + threadIdInWarp) < folds
? histograms[(binOffset + threadIdInWarp)]
: 0.0f;
double sum = 0;
__syncwarp();
WarpScan(tempStorage[warpId]).InclusiveSum(val, sum);
__syncwarp();
sum += prefixSum;
if ((binOffset + threadIdInWarp) < folds) {
histograms[binOffset + threadIdInWarp] = sum;
}
if ((binOffset + 32) < n) {
prefixSum = cub::ShuffleIndex<32, double>(sum, 31, 0xffffffff);
}
}
}
}
};
void ScanHistogram(
const TBinarizedFeature* features, int fCount,
ui32 id,
const int statCount,
const int binFeatureCount,
float* histograms,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(fCount * 32, blockSize);
numBlocks.y = 1;
numBlocks.z = statCount;
if (!IsGridEmpty(numBlocks)) {
ScanHistogramImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(features, fCount, id, binFeatureCount, histograms);
}
}
}
|
2dbb0e2ce9a7f56510999963555908aa0541b67b.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
#include "rocblas.h"
#include <hiprand/hiprand.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "mex.h"
#include "kcDefs.h" //see for info on anything starting with KC_
#include "kcArrayFunctions.h"
//rough summing kernel (does not need to be efficient)
__global__ void kcSumLangevinVars(KC_FP_TYPE * der, KC_FP_TYPE * der_sum, KC_FP_TYPE * G, KC_FP_TYPE * G_sum, KC_FP_TYPE * ll, KC_FP_TYPE * ll_sum, int * mBlkIdx, int NT, int NH, KC_FP_TYPE * gPrior, KC_FP_TYPE * lPrior) {
int nsum = blockIdx.x*blockDim.x+threadIdx.x;
if(nsum == 0) {
for(int idx = 0; idx < NH+3; idx++){
der_sum[idx]=lPrior[idx];
}
for(int jj = 0; jj < NH+3; jj++){
for(int idx = 0; idx<NT; idx++){
der_sum[jj] += der[jj*NT+idx];
}
}
}
else if(nsum == 1) {
for(int idx = 0; idx < NH+3; idx++) {
for(int idx2 = 0; idx2 < NH+3; idx2++) {
G_sum[idx+idx2*(NH+3)] = 0;
G_sum[idx+idx2*(NH+3)] = gPrior[idx*(NH+3)+idx2];
}
}
for(int jj = 0; jj < NH+3; jj++) {
for(int kk = 0; kk < NH+3; kk++) {
for(int idx =0; idx < NT; idx++) {
G_sum[jj*(NH+3)+kk] -= G[idx+(jj*(NH+3)+kk)*NT];
}
}
}
}
else if(nsum == 2) {
ll_sum[0] = 0;
for(int idx = 0; idx < NT; idx++) {
ll_sum[0] += ll[idx];
}
}
}
//derivates of firing rate function w.r.t. gamma (assuming fixed latent variables)
__device__ KC_FP_TYPE h(KC_FP_TYPE alpha, KC_FP_TYPE dt, KC_FP_TYPE sh) {
KC_FP_TYPE fr = alpha*KC_MAX(KC_MIN(KC_EXP(sh),KC_MAXN),KC_MINN)*dt;
return fr;
}
__device__ KC_FP_TYPE dh(KC_FP_TYPE alpha, KC_FP_TYPE dt, KC_FP_TYPE sh, KC_FP_TYPE mult) {
KC_FP_TYPE ex = KC_MAX(KC_MIN(KC_EXP(sh),KC_MAXN),KC_MINN);
return dt*mult*alpha*ex;
}
// computes log p(single trial | gamma, fixed lambdas, spike history)
__global__ void kcBoundaryLikelihoodTrialHist(KC_FP_TYPE * y, KC_FP_TYPE * spe, int * mBlkIdx, KC_FP_TYPE dt, int NT, KC_FP_TYPE * llSum, KC_FP_TYPE * trialSum, KC_FP_TYPE * trialSumRiemann, KC_FP_TYPE * alphas, KC_FP_TYPE * h_filt, KC_FP_TYPE * y_hist, int NH, KC_FP_TYPE * zs, KC_FP_TYPE * ss) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < NT) {
for(int jj = 0; jj<NH+3; jj++){
trialSum[idx+jj*NT]=0;
for(int kk = 0; kk<NH+3; kk++){
trialSumRiemann[idx+(jj*(NH+3)+kk)*NT]=0;
}
}
llSum[idx] = 0;
int stepTime = zs[idx];
int stepState = ss[idx]-1;
for(int ii = mBlkIdx[idx]; ii < mBlkIdx[idx+1]; ii++) {
KC_FP_TYPE alpha;
int currentState = 0;
// check this timing
if( (ii-mBlkIdx[idx]) < stepTime ) {
alpha = alphas[0];
}
else {
alpha = alphas[stepState];
currentState = stepState;
}
KC_FP_TYPE sh = spe[ii];
KC_FP_TYPE r = h(alpha,1,sh);
llSum[idx] += y[ii]*(KC_LOG(r)+KC_LOG(dt)) - dt*r - KC_GAMMALN(y[ii]+1.0);
for(int jj = 0; jj < NH+3 ; jj++) {
KC_FP_TYPE mult1 = 0;
if(jj < 3 && jj==currentState){
mult1 = 1/alpha;
}
else if(jj > 2 && jj < (NH+3) && ii<(mBlkIdx[idx]+(jj-3)+1)){
mult1 = y_hist[NH*(idx+1) + (ii-mBlkIdx[idx]) - (jj-3) -1];
}
else if(jj > 2 && jj < (NH+3)){
mult1 = y[ii-(jj-3)-1];
}
KC_FP_TYPE dr = dh(alpha,1,sh,mult1);
trialSum[idx+jj*NT] += (y[ii]/r-dt)*dr;
for(int kk = jj; kk < NH+3; kk++) {
KC_FP_TYPE mult2 = 0;
if(kk < 3 && kk==currentState){
mult2 = 1/alpha;
}
else if(kk > 2 && kk < (NH+3) && ii<(mBlkIdx[idx]+(kk-3)+1)){
mult2 = y_hist[NH*(idx+1) + (ii-mBlkIdx[idx]) - (kk-3) -1];
}
else if(kk > 2 && kk < (NH+3)){
mult2 = y[ii-(kk-3)-1];
}
KC_FP_TYPE dr2 = dh(alpha,1,sh,mult2);
trialSumRiemann[idx+(NH+3)*NT*jj+NT*kk] += -1*dt*dr*dr2/r;
}
}
}
}
}
// [log_alphah, der_log_alpha, der_log_h] = kcAlphaSpikeHistorySampler(gpu_y,gpu_trIndex,StepSamples.z(:,ss),StepSamples.s(:,ss),timeSeries.delta_t,gpu_spe,StepSamples.alpha(:,ss-1),StepSamples.hs(ss-1,:),der_log_prior_alpha,der_log_prior_h)
// Computes the the log probability of a set of spike trains under the ramping model given a fixed set of latent variable
// as a function of \gamma (the bound height) along with first/second derivates w.r.t. \gamma
// args
// 0 = y (observations, on GPU)
// 1 = trIdx
// 2 = StepSamples.z(:,ss) (switch times)
// 3 = StepSamples.s(:,ss) (switch states)
// 4 = dt (bin size in seconds)
// 5 = spe (spike history effect, TT x 1)
// 6 = alphas
// 7 = hs
// 8 = der log prior of alpha and hs
// 9 = fisher information of log prior of alpha and hs
// 10 = spike history (spikes before start of trials, NH*NT x 1)
//
//outputs (left-hand side)
// 0 = log p(y|alphas,hs)
// 1 = d/dg log p(y|alphas,hs)
// 2 = d^2/d^2g log p(y|alphas,hs)
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
hipError_t ce;
//loads up trial information
unsigned int TT = kcGetArrayNumEl(prhs[0]);
KC_FP_TYPE * y = kcGetArrayData(prhs[0],TT);
int * trIdx = kcGetArrayDataInt(prhs[1]);
unsigned int NT = kcGetArrayNumEl(prhs[1])-1;
// load StepSamples.z
KC_FP_TYPE * zs;
checkCudaErrors(hipMalloc((void**)&zs,sizeof(KC_FP_TYPE)*NT));
checkCudaErrors(hipMemcpy(zs,(KC_FP_TYPE*)mxGetPr(prhs[2]),sizeof(KC_FP_TYPE)*NT,hipMemcpyHostToDevice));
// load StepSamples.s
KC_FP_TYPE * ss;
checkCudaErrors(hipMalloc((void**)&ss,sizeof(KC_FP_TYPE)*NT));
checkCudaErrors(hipMemcpy(ss,(KC_FP_TYPE*)mxGetPr(prhs[3]),sizeof(KC_FP_TYPE)*NT,hipMemcpyHostToDevice));
// load dt
KC_FP_TYPE dt = mxGetScalar(prhs[4]);
// load spike history effect
KC_FP_TYPE * spe = kcGetArrayData(prhs[5],TT);
// load alphas
KC_FP_TYPE * alphas;
checkCudaErrors(hipMalloc((void**)&alphas,sizeof(KC_FP_TYPE)*3));
checkCudaErrors(hipMemcpy(alphas,(KC_FP_TYPE*)mxGetPr(prhs[6]),sizeof(KC_FP_TYPE)*3,hipMemcpyHostToDevice));
// load filter weights
int NH = mxGetNumberOfElements(prhs[7]);
KC_FP_TYPE * h_filt;
checkCudaErrors(hipMalloc((void**)&h_filt,sizeof(KC_FP_TYPE)*NH));
checkCudaErrors(hipMemcpy(h_filt,(KC_FP_TYPE*)mxGetPr(prhs[7]),sizeof(KC_FP_TYPE)*NH,hipMemcpyHostToDevice));
// load derivative of log prior
KC_FP_TYPE * l_prior;
checkCudaErrors(hipMalloc((void**)&l_prior,sizeof(KC_FP_TYPE)*(NH+3)));
checkCudaErrors(hipMemcpy(l_prior,(KC_FP_TYPE*)mxGetPr(prhs[8]),sizeof(KC_FP_TYPE)*(NH+3),hipMemcpyHostToDevice));
// load fisher information of log prior
KC_FP_TYPE * g_prior;
checkCudaErrors(hipMalloc((void**)&g_prior,sizeof(KC_FP_TYPE)*(NH+3)*(NH+3)));
checkCudaErrors(hipMemcpy(g_prior,(KC_FP_TYPE*)mxGetPr(prhs[9]),sizeof(KC_FP_TYPE)*(NH+3)*(NH+3),hipMemcpyHostToDevice));
//loads spike history before trials
KC_FP_TYPE * y_hist = kcGetArrayData(prhs[10],NH*NT);
//sets up space for computations on GPU
KC_FP_TYPE * der_log_p_y;
checkCudaErrors(hipMalloc((void**)&der_log_p_y,sizeof(KC_FP_TYPE)*(NT)*(NH+3)));
KC_FP_TYPE * der_log_p_y_sum;
checkCudaErrors(hipMalloc((void**)&der_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+3)*1));
KC_FP_TYPE * log_p_y;
checkCudaErrors(hipMalloc((void**)&log_p_y,sizeof(KC_FP_TYPE)*NT));
KC_FP_TYPE * log_p_y_sum;
checkCudaErrors(hipMalloc((void**)&log_p_y_sum,sizeof(KC_FP_TYPE)*1));
KC_FP_TYPE * G_log_p_y1;
checkCudaErrors(hipMalloc((void**)&G_log_p_y1,sizeof(KC_FP_TYPE)*(NT)*(NH+3)*(NH+3)));
KC_FP_TYPE * G_log_p_y_sum;
checkCudaErrors(hipMalloc((void**)&G_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+3)*(NH+3)));
//sets up CUDA variables
int blockSize = 2;
int numBlocks = (int)NT/(int)blockSize + ((NT%blockSize==0)?0:1);
//gets each trials likelihood + derivatives of filter
hipLaunchKernelGGL(( kcBoundaryLikelihoodTrialHist), dim3(numBlocks),dim3(blockSize) , 0, 0, y,spe,trIdx,dt,NT,log_p_y,der_log_p_y,G_log_p_y1,alphas,h_filt,y_hist,NH,zs,ss);
checkCudaErrors(hipDeviceSynchronize());
//sums up all the trials' likelihoods and derivatives with respect to alpha and spike history filters
int nBlocksC = 3;
int blockSizeC = 1;
hipLaunchKernelGGL(( kcSumLangevinVars) , dim3(nBlocksC),dim3(blockSizeC) , 0, 0, der_log_p_y, der_log_p_y_sum, G_log_p_y1, G_log_p_y_sum, log_p_y, log_p_y_sum, trIdx, NT, NH, g_prior, l_prior);
checkCudaErrors(hipDeviceSynchronize());
//pushes answers back to MATLAB
if(nlhs > 0) {
plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[0]),log_p_y_sum,sizeof(KC_FP_TYPE)*1,hipMemcpyDeviceToHost));
}
if(nlhs > 1) {
plhs[1] = mxCreateNumericMatrix(NH+3,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[1]),der_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+3)*(1),hipMemcpyDeviceToHost));
}
if(nlhs > 2) {
plhs[2] = mxCreateNumericMatrix(NH+3,NH+3,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[2]),G_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+3)*(NH+3),hipMemcpyDeviceToHost));
}
//clears up GPU variables
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipFree(log_p_y));
checkCudaErrors(hipFree(log_p_y_sum));
checkCudaErrors(hipFree(der_log_p_y));
checkCudaErrors(hipFree(der_log_p_y_sum));
checkCudaErrors(hipFree(G_log_p_y1));
checkCudaErrors(hipFree(G_log_p_y_sum));
checkCudaErrors(hipFree(h_filt));
checkCudaErrors(hipFree(g_prior));
checkCudaErrors(hipFree(l_prior));
checkCudaErrors(hipFree(zs));
checkCudaErrors(hipFree(ss));
checkCudaErrors(hipFree(alphas));
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error at the end of kcLangevinStep.cu ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA errors");
}
}
| 2dbb0e2ce9a7f56510999963555908aa0541b67b.cu |
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cuda_runtime.h>
#include <cusparse_v2.h>
#include "cublas_v2.h"
#include <curand.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "mex.h"
#include "kcDefs.h" //see for info on anything starting with KC_
#include "kcArrayFunctions.h"
//rough summing kernel (does not need to be efficient)
__global__ void kcSumLangevinVars(KC_FP_TYPE * der, KC_FP_TYPE * der_sum, KC_FP_TYPE * G, KC_FP_TYPE * G_sum, KC_FP_TYPE * ll, KC_FP_TYPE * ll_sum, int * mBlkIdx, int NT, int NH, KC_FP_TYPE * gPrior, KC_FP_TYPE * lPrior) {
int nsum = blockIdx.x*blockDim.x+threadIdx.x;
if(nsum == 0) {
for(int idx = 0; idx < NH+3; idx++){
der_sum[idx]=lPrior[idx];
}
for(int jj = 0; jj < NH+3; jj++){
for(int idx = 0; idx<NT; idx++){
der_sum[jj] += der[jj*NT+idx];
}
}
}
else if(nsum == 1) {
for(int idx = 0; idx < NH+3; idx++) {
for(int idx2 = 0; idx2 < NH+3; idx2++) {
G_sum[idx+idx2*(NH+3)] = 0;
G_sum[idx+idx2*(NH+3)] = gPrior[idx*(NH+3)+idx2];
}
}
for(int jj = 0; jj < NH+3; jj++) {
for(int kk = 0; kk < NH+3; kk++) {
for(int idx =0; idx < NT; idx++) {
G_sum[jj*(NH+3)+kk] -= G[idx+(jj*(NH+3)+kk)*NT];
}
}
}
}
else if(nsum == 2) {
ll_sum[0] = 0;
for(int idx = 0; idx < NT; idx++) {
ll_sum[0] += ll[idx];
}
}
}
//derivates of firing rate function w.r.t. gamma (assuming fixed latent variables)
__device__ KC_FP_TYPE h(KC_FP_TYPE alpha, KC_FP_TYPE dt, KC_FP_TYPE sh) {
KC_FP_TYPE fr = alpha*KC_MAX(KC_MIN(KC_EXP(sh),KC_MAXN),KC_MINN)*dt;
return fr;
}
__device__ KC_FP_TYPE dh(KC_FP_TYPE alpha, KC_FP_TYPE dt, KC_FP_TYPE sh, KC_FP_TYPE mult) {
KC_FP_TYPE ex = KC_MAX(KC_MIN(KC_EXP(sh),KC_MAXN),KC_MINN);
return dt*mult*alpha*ex;
}
// computes log p(single trial | gamma, fixed lambdas, spike history)
__global__ void kcBoundaryLikelihoodTrialHist(KC_FP_TYPE * y, KC_FP_TYPE * spe, int * mBlkIdx, KC_FP_TYPE dt, int NT, KC_FP_TYPE * llSum, KC_FP_TYPE * trialSum, KC_FP_TYPE * trialSumRiemann, KC_FP_TYPE * alphas, KC_FP_TYPE * h_filt, KC_FP_TYPE * y_hist, int NH, KC_FP_TYPE * zs, KC_FP_TYPE * ss) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < NT) {
for(int jj = 0; jj<NH+3; jj++){
trialSum[idx+jj*NT]=0;
for(int kk = 0; kk<NH+3; kk++){
trialSumRiemann[idx+(jj*(NH+3)+kk)*NT]=0;
}
}
llSum[idx] = 0;
int stepTime = zs[idx];
int stepState = ss[idx]-1;
for(int ii = mBlkIdx[idx]; ii < mBlkIdx[idx+1]; ii++) {
KC_FP_TYPE alpha;
int currentState = 0;
// check this timing
if( (ii-mBlkIdx[idx]) < stepTime ) {
alpha = alphas[0];
}
else {
alpha = alphas[stepState];
currentState = stepState;
}
KC_FP_TYPE sh = spe[ii];
KC_FP_TYPE r = h(alpha,1,sh);
llSum[idx] += y[ii]*(KC_LOG(r)+KC_LOG(dt)) - dt*r - KC_GAMMALN(y[ii]+1.0);
for(int jj = 0; jj < NH+3 ; jj++) {
KC_FP_TYPE mult1 = 0;
if(jj < 3 && jj==currentState){
mult1 = 1/alpha;
}
else if(jj > 2 && jj < (NH+3) && ii<(mBlkIdx[idx]+(jj-3)+1)){
mult1 = y_hist[NH*(idx+1) + (ii-mBlkIdx[idx]) - (jj-3) -1];
}
else if(jj > 2 && jj < (NH+3)){
mult1 = y[ii-(jj-3)-1];
}
KC_FP_TYPE dr = dh(alpha,1,sh,mult1);
trialSum[idx+jj*NT] += (y[ii]/r-dt)*dr;
for(int kk = jj; kk < NH+3; kk++) {
KC_FP_TYPE mult2 = 0;
if(kk < 3 && kk==currentState){
mult2 = 1/alpha;
}
else if(kk > 2 && kk < (NH+3) && ii<(mBlkIdx[idx]+(kk-3)+1)){
mult2 = y_hist[NH*(idx+1) + (ii-mBlkIdx[idx]) - (kk-3) -1];
}
else if(kk > 2 && kk < (NH+3)){
mult2 = y[ii-(kk-3)-1];
}
KC_FP_TYPE dr2 = dh(alpha,1,sh,mult2);
trialSumRiemann[idx+(NH+3)*NT*jj+NT*kk] += -1*dt*dr*dr2/r;
}
}
}
}
}
// [log_alphah, der_log_alpha, der_log_h] = kcAlphaSpikeHistorySampler(gpu_y,gpu_trIndex,StepSamples.z(:,ss),StepSamples.s(:,ss),timeSeries.delta_t,gpu_spe,StepSamples.alpha(:,ss-1),StepSamples.hs(ss-1,:),der_log_prior_alpha,der_log_prior_h)
// Computes the the log probability of a set of spike trains under the ramping model given a fixed set of latent variable
// as a function of \gamma (the bound height) along with first/second derivates w.r.t. \gamma
// args
// 0 = y (observations, on GPU)
// 1 = trIdx
// 2 = StepSamples.z(:,ss) (switch times)
// 3 = StepSamples.s(:,ss) (switch states)
// 4 = dt (bin size in seconds)
// 5 = spe (spike history effect, TT x 1)
// 6 = alphas
// 7 = hs
// 8 = der log prior of alpha and hs
// 9 = fisher information of log prior of alpha and hs
// 10 = spike history (spikes before start of trials, NH*NT x 1)
//
//outputs (left-hand side)
// 0 = log p(y|alphas,hs)
// 1 = d/dg log p(y|alphas,hs)
// 2 = d^2/d^2g log p(y|alphas,hs)
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
cudaError_t ce;
//loads up trial information
unsigned int TT = kcGetArrayNumEl(prhs[0]);
KC_FP_TYPE * y = kcGetArrayData(prhs[0],TT);
int * trIdx = kcGetArrayDataInt(prhs[1]);
unsigned int NT = kcGetArrayNumEl(prhs[1])-1;
// load StepSamples.z
KC_FP_TYPE * zs;
checkCudaErrors(cudaMalloc((void**)&zs,sizeof(KC_FP_TYPE)*NT));
checkCudaErrors(cudaMemcpy(zs,(KC_FP_TYPE*)mxGetPr(prhs[2]),sizeof(KC_FP_TYPE)*NT,cudaMemcpyHostToDevice));
// load StepSamples.s
KC_FP_TYPE * ss;
checkCudaErrors(cudaMalloc((void**)&ss,sizeof(KC_FP_TYPE)*NT));
checkCudaErrors(cudaMemcpy(ss,(KC_FP_TYPE*)mxGetPr(prhs[3]),sizeof(KC_FP_TYPE)*NT,cudaMemcpyHostToDevice));
// load dt
KC_FP_TYPE dt = mxGetScalar(prhs[4]);
// load spike history effect
KC_FP_TYPE * spe = kcGetArrayData(prhs[5],TT);
// load alphas
KC_FP_TYPE * alphas;
checkCudaErrors(cudaMalloc((void**)&alphas,sizeof(KC_FP_TYPE)*3));
checkCudaErrors(cudaMemcpy(alphas,(KC_FP_TYPE*)mxGetPr(prhs[6]),sizeof(KC_FP_TYPE)*3,cudaMemcpyHostToDevice));
// load filter weights
int NH = mxGetNumberOfElements(prhs[7]);
KC_FP_TYPE * h_filt;
checkCudaErrors(cudaMalloc((void**)&h_filt,sizeof(KC_FP_TYPE)*NH));
checkCudaErrors(cudaMemcpy(h_filt,(KC_FP_TYPE*)mxGetPr(prhs[7]),sizeof(KC_FP_TYPE)*NH,cudaMemcpyHostToDevice));
// load derivative of log prior
KC_FP_TYPE * l_prior;
checkCudaErrors(cudaMalloc((void**)&l_prior,sizeof(KC_FP_TYPE)*(NH+3)));
checkCudaErrors(cudaMemcpy(l_prior,(KC_FP_TYPE*)mxGetPr(prhs[8]),sizeof(KC_FP_TYPE)*(NH+3),cudaMemcpyHostToDevice));
// load fisher information of log prior
KC_FP_TYPE * g_prior;
checkCudaErrors(cudaMalloc((void**)&g_prior,sizeof(KC_FP_TYPE)*(NH+3)*(NH+3)));
checkCudaErrors(cudaMemcpy(g_prior,(KC_FP_TYPE*)mxGetPr(prhs[9]),sizeof(KC_FP_TYPE)*(NH+3)*(NH+3),cudaMemcpyHostToDevice));
//loads spike history before trials
KC_FP_TYPE * y_hist = kcGetArrayData(prhs[10],NH*NT);
//sets up space for computations on GPU
KC_FP_TYPE * der_log_p_y;
checkCudaErrors(cudaMalloc((void**)&der_log_p_y,sizeof(KC_FP_TYPE)*(NT)*(NH+3)));
KC_FP_TYPE * der_log_p_y_sum;
checkCudaErrors(cudaMalloc((void**)&der_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+3)*1));
KC_FP_TYPE * log_p_y;
checkCudaErrors(cudaMalloc((void**)&log_p_y,sizeof(KC_FP_TYPE)*NT));
KC_FP_TYPE * log_p_y_sum;
checkCudaErrors(cudaMalloc((void**)&log_p_y_sum,sizeof(KC_FP_TYPE)*1));
KC_FP_TYPE * G_log_p_y1;
checkCudaErrors(cudaMalloc((void**)&G_log_p_y1,sizeof(KC_FP_TYPE)*(NT)*(NH+3)*(NH+3)));
KC_FP_TYPE * G_log_p_y_sum;
checkCudaErrors(cudaMalloc((void**)&G_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+3)*(NH+3)));
//sets up CUDA variables
int blockSize = 2;
int numBlocks = (int)NT/(int)blockSize + ((NT%blockSize==0)?0:1);
//gets each trials likelihood + derivatives of filter
kcBoundaryLikelihoodTrialHist<<< numBlocks,blockSize >>>(y,spe,trIdx,dt,NT,log_p_y,der_log_p_y,G_log_p_y1,alphas,h_filt,y_hist,NH,zs,ss);
checkCudaErrors(cudaDeviceSynchronize());
//sums up all the trials' likelihoods and derivatives with respect to alpha and spike history filters
int nBlocksC = 3;
int blockSizeC = 1;
kcSumLangevinVars <<< nBlocksC,blockSizeC >>> (der_log_p_y, der_log_p_y_sum, G_log_p_y1, G_log_p_y_sum, log_p_y, log_p_y_sum, trIdx, NT, NH, g_prior, l_prior);
checkCudaErrors(cudaDeviceSynchronize());
//pushes answers back to MATLAB
if(nlhs > 0) {
plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[0]),log_p_y_sum,sizeof(KC_FP_TYPE)*1,cudaMemcpyDeviceToHost));
}
if(nlhs > 1) {
plhs[1] = mxCreateNumericMatrix(NH+3,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[1]),der_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+3)*(1),cudaMemcpyDeviceToHost));
}
if(nlhs > 2) {
plhs[2] = mxCreateNumericMatrix(NH+3,NH+3,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[2]),G_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+3)*(NH+3),cudaMemcpyDeviceToHost));
}
//clears up GPU variables
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaFree(log_p_y));
checkCudaErrors(cudaFree(log_p_y_sum));
checkCudaErrors(cudaFree(der_log_p_y));
checkCudaErrors(cudaFree(der_log_p_y_sum));
checkCudaErrors(cudaFree(G_log_p_y1));
checkCudaErrors(cudaFree(G_log_p_y_sum));
checkCudaErrors(cudaFree(h_filt));
checkCudaErrors(cudaFree(g_prior));
checkCudaErrors(cudaFree(l_prior));
checkCudaErrors(cudaFree(zs));
checkCudaErrors(cudaFree(ss));
checkCudaErrors(cudaFree(alphas));
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error at the end of kcLangevinStep.cu ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA errors");
}
}
|
f27e53746552e27e08bf64b7b1fa122956d56dea.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "A_for_lightning_estimation.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *rho = NULL;
hipMalloc(&rho, XSIZE*YSIZE);
float *N = NULL;
hipMalloc(&N, XSIZE*YSIZE);
int npix = 1;
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
A_for_lightning_estimation), dim3(gridBlock),dim3(threadBlock), 0, 0, rho,N,npix,A);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
A_for_lightning_estimation), dim3(gridBlock),dim3(threadBlock), 0, 0, rho,N,npix,A);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
A_for_lightning_estimation), dim3(gridBlock),dim3(threadBlock), 0, 0, rho,N,npix,A);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f27e53746552e27e08bf64b7b1fa122956d56dea.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "A_for_lightning_estimation.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *rho = NULL;
cudaMalloc(&rho, XSIZE*YSIZE);
float *N = NULL;
cudaMalloc(&N, XSIZE*YSIZE);
int npix = 1;
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
A_for_lightning_estimation<<<gridBlock,threadBlock>>>(rho,N,npix,A);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
A_for_lightning_estimation<<<gridBlock,threadBlock>>>(rho,N,npix,A);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
A_for_lightning_estimation<<<gridBlock,threadBlock>>>(rho,N,npix,A);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f09f4f1f51eabb3fa05017079064d862170b5ac8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <random>
#include <stdio.h>
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void sortKernel(int *c, const int *a)
{
int i = threadIdx.x;
if (a[i] < a[i - 1])
{
c[i] = a[i - 1];
c[i - 1] = a[i];
}
}
// Helper function for using CUDA to add vectors in parallel.
void sortWithCuda(int *c, const int *a, unsigned int size)
{
int *dev_a = 0;
int *dev_c = 0;
hipSetDevice(0);
hipMalloc((void**)&dev_c, size* sizeof(int));
hipMalloc((void**)&dev_a, size* sizeof(int));
hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
sortKernel << <2, size >> >(dev_c, dev_a);
hipDeviceSynchronize();
hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
}
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel << <1, size >> >(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| f09f4f1f51eabb3fa05017079064d862170b5ac8.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <random>
#include <stdio.h>
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void sortKernel(int *c, const int *a)
{
int i = threadIdx.x;
if (a[i] < a[i - 1])
{
c[i] = a[i - 1];
c[i - 1] = a[i];
}
}
// Helper function for using CUDA to add vectors in parallel.
void sortWithCuda(int *c, const int *a, unsigned int size)
{
int *dev_a = 0;
int *dev_c = 0;
cudaSetDevice(0);
cudaMalloc((void**)&dev_c, size* sizeof(int));
cudaMalloc((void**)&dev_a, size* sizeof(int));
cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
sortKernel << <2, size >> >(dev_c, dev_a);
cudaDeviceSynchronize();
cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
}
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel << <1, size >> >(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
5696fb99cee8cb0b79354a785f02ff6ee951f2e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* cuPrintf.cu
*
* This is a printf command callable from within a kernel. It is set
* up so that output is sent to a memory buffer, which is emptied from
* the host side - but only after a hipDeviceSynchronize() on the host.
*
* Currently, there is a limitation of around 200 characters of output
* and no more than 10 arguments to a single cuPrintf() call. Issue
* multiple calls if longer format strings are required.
*
* It requires minimal setup, and is *NOT* optimised for performance.
* For example, writes are not coalesced - this is because there is an
* assumption that people will not want to printf from every single one
* of thousands of threads, but only from individual threads at a time.
*
* Using this is simple - it requires one host-side call to initialise
* everything, and then kernels can call cuPrintf at will. Sample code
* is the easiest way to demonstrate:
*
#include "cuPrintf.hip"
__global__ void testKernel(int val)
{
cuPrintf("Value is: %d\n", val);
}
int main()
{
cudaPrintfInit();
testKernel<<< 2, 3 >>>(10);
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
return 0;
}
*
* See the header file, "cuPrintf.cuh" for more info, especially
* arguments to cudaPrintfInit() and cudaPrintfDisplay();
*/
#ifndef CUPRINTF_CU
#define CUPRINTF_CU
#include "cuPrintf_hip.cuh"
#if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture
#include <sm_11_atomic_functions.h>
#endif
// This is the smallest amount of memory, per-thread, which is allowed.
// It is also the largest amount of space a single printf() can take up
const static int CUPRINTF_MAX_LEN = 256;
// This structure is used internally to track block/thread output restrictions.
typedef struct __align__(8)
{
int threadid; // CUPRINTF_UNRESTRICTED for unrestricted
int blockid; // CUPRINTF_UNRESTRICTED for unrestricted
}
cuPrintfRestriction;
// The main storage is in a global print buffer, which has a known
// start/end/length. These are atomically updated so it works as a
// circular buffer.
// Since the only control primitive that can be used is atomicAdd(),
// we cannot wrap the pointer as such. The actual address must be
// calculated from printfBufferPtr by mod-ing with printfBufferLength.
// For sm_10 architecture, we must subdivide the buffer per-thread
// since we do not even have an atomic primitive.
__constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host)
__constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host)
__device__ static cuPrintfRestriction restrictRules; // Output restrictions
__device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset
// This is the header preceeding all printf entries.
// NOTE: It *must* be size-aligned to the maximum entity size (size_t)
typedef struct __align__(8)
{
unsigned short magic; // Magic number says we're valid
unsigned short fmtoffset; // Offset of fmt string into buffer
unsigned short blockid; // Block ID of author
unsigned short threadid; // Thread ID of author
}
cuPrintfHeader;
// Special header for sm_10 architecture
#define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character
typedef struct __align__(16)
{
unsigned short magic; // sm_10 specific magic number
unsigned short unused;
unsigned int thread_index; // thread ID for this buffer
unsigned int thread_buf_len; // per-thread buffer length
unsigned int offset; // most recent printf's offset
}
cuPrintfHeaderSM10;
// Because we can't write an element which is not aligned to its bit-size,
// we have to align all sizes and variables on maximum-size boundaries.
// That means sizeof(double) in this case, but we'll use (long long) for
// better arch<1.3 support
#define CUPRINTF_ALIGN_SIZE sizeof(long long)
// All our headers are prefixed with a magic number so we know they're ready
#define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character
//
// getNextPrintfBufPtr
//
// Grabs a block of space in the general circular buffer, using an
// atomic function to ensure that it's ours. We handle wrapping
// around the circular buffer and return a pointer to a place which
// can be written to.
//
// Important notes:
// 1. We always grab CUPRINTF_MAX_LEN bytes
// 2. Because of 1, we never worry about wrapping around the end
// 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN
//
// This returns a pointer to the place where we own.
//
__device__ static char *getNextPrintfBufPtr()
{
// Initialisation check
if (!printfBufferPtr)
{
return NULL;
}
// Thread/block restriction check
if ((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y)))
{
return NULL;
}
if ((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z)))
{
return NULL;
}
// Conditional section, dependent on architecture
#if __CUDA_ARCH__ == 100
// For sm_10 architectures, we have no atomic add - this means we must split the
// entire available buffer into per-thread blocks. Inefficient, but what can you do.
int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z);
int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z +
(blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z);
// Find our own block of data and go to it. Make sure the per-thread length
// is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and
// alignment issues! We must round down, of course.
unsigned int thread_buf_len = printfBufferLength / thread_count;
thread_buf_len &= ~(CUPRINTF_MAX_LEN-1);
// We *must* have a thread buffer length able to fit at least two printfs (one header, one real)
if (thread_buf_len < (CUPRINTF_MAX_LEN * 2))
{
return NULL;
}
// Now address our section of the buffer. The first item is a header.
char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index);
cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer;
if (hdr.magic != CUPRINTF_SM10_MAGIC)
{
// If our header is not set up, initialise it
hdr.magic = CUPRINTF_SM10_MAGIC;
hdr.thread_index = thread_index;
hdr.thread_buf_len = thread_buf_len;
hdr.offset = 0; // Note we start at 0! We pre-increment below.
*(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header
// For initial setup purposes, we might need to init thread0's header too
// (so that cudaPrintfDisplay() below will work). This is only run once.
cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer;
tophdr->thread_buf_len = thread_buf_len;
}
// Adjust the offset by the right amount, and wrap it if need be
unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN;
if (offset >= hdr.thread_buf_len)
{
offset = CUPRINTF_MAX_LEN;
}
// Write back the new offset for next time and return a pointer to it
((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset;
return myPrintfBuffer + offset;
#else
// Much easier with an atomic operation!
size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer;
offset %= printfBufferLength;
return globalPrintfBuffer + offset;
#endif
}
//
// writePrintfHeader
//
// Inserts the header for containing our UID, fmt position and
// block/thread number. We generate it dynamically to avoid
// issues arising from requiring pre-initialisation.
//
__device__ static void writePrintfHeader(char *ptr, char *fmtptr)
{
if (ptr)
{
cuPrintfHeader header;
header.magic = CUPRINTF_SM11_MAGIC;
header.fmtoffset = (unsigned short)(fmtptr - ptr);
header.blockid = blockIdx.x + gridDim.x*blockIdx.y;
header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
*(cuPrintfHeader *)(void *)ptr = header;
}
}
//
// cuPrintfStrncpy
//
// This special strncpy outputs an aligned length value, followed by the
// string. It then zero-pads the rest of the string until a 64-aligned
// boundary. The length *includes* the padding. A pointer to the byte
// just after the \0 is returned.
//
// This function could overflow CUPRINTF_MAX_LEN characters in our buffer.
// To avoid it, we must count as we output and truncate where necessary.
//
__device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end)
{
// Initialisation and overflow check
if (!dest || !src || (dest >= end))
{
return NULL;
}
// Prepare to write the length specifier. We're guaranteed to have
// at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in
// chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE.
int *lenptr = (int *)(void *)dest;
int len = 0;
dest += CUPRINTF_ALIGN_SIZE;
// Now copy the string
while (n--)
{
if (dest >= end) // Overflow check
{
break;
}
len++;
*dest++ = *src;
if (*src++ == '\0')
{
break;
}
}
// Now write out the padding bytes, and we have our length.
while ((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0))
{
len++;
*dest++ = 0;
}
*lenptr = len;
return (dest < end) ? dest : NULL; // Overflow means return NULL
}
//
// copyArg
//
// This copies a length specifier and then the argument out to the
// data buffer. Templates let the compiler figure all this out at
// compile-time, making life much simpler from the programming
// point of view. I'm assuimg all (const char *) is a string, and
// everything else is the variable it points at. I'd love to see
// a better way of doing it, but aside from parsing the format
// string I can't think of one.
//
// The length of the data type is inserted at the beginning (so that
// the display can distinguish between float and double), and the
// pointer to the end of the entry is returned.
//
__device__ static char *copyArg(char *ptr, const char *arg, char *end)
{
// Initialisation check
if (!ptr || !arg)
{
return NULL;
}
// strncpy does all our work. We just terminate.
if ((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL)
{
*ptr = 0;
}
return ptr;
}
template <typename T>
__device__ static char *copyArg(char *ptr, T &arg, char *end)
{
// Initisalisation and overflow check. Alignment rules mean that
// we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need
// to check that one offset.
if (!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end))
{
return NULL;
}
// Write the length and argument
*(int *)(void *)ptr = sizeof(arg);
ptr += CUPRINTF_ALIGN_SIZE;
*(T *)(void *)ptr = arg;
ptr += CUPRINTF_ALIGN_SIZE;
*ptr = 0;
return ptr;
}
//
// cuPrintf
//
// Templated printf functions to handle multiple arguments.
// Note we return the total amount of data copied, not the number
// of characters output. But then again, who ever looks at the
// return from printf() anyway?
//
// The format is to grab a block of circular buffer space, the
// start of which will hold a header and a pointer to the format
// string. We then write in all the arguments, and finally the
// format string itself. This is to make it easy to prevent
// overflow of our buffer (we support up to 10 arguments, each of
// which can be 12 bytes in length - that means that only the
// format string (or a %s) can actually overflow; so the overflow
// check need only be in the strcpy function.
//
// The header is written at the very last because that's what
// makes it look like we're done.
//
// Errors, which are basically lack-of-initialisation, are ignored
// in the called functions because NULL pointers are passed around
//
// All printf variants basically do the same thing, setting up the
// buffer, writing all arguments, then finalising the header. For
// clarity, we'll pack the code into some big macros.
#define CUPRINTF_PREAMBLE \
char *start, *end, *bufptr, *fmtstart; \
if((start = getNextPrintfBufPtr()) == NULL) return 0; \
end = start + CUPRINTF_MAX_LEN; \
bufptr = start + sizeof(cuPrintfHeader);
// Posting an argument is easy
#define CUPRINTF_ARG(argname) \
bufptr = copyArg(bufptr, argname, end);
// After args are done, record start-of-fmt and write the fmt and header
#define CUPRINTF_POSTAMBLE \
fmtstart = bufptr; \
end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \
writePrintfHeader(start, end ? fmtstart : NULL); \
return end ? (int)(end - start) : 0;
__device__ int cuPrintf(const char *fmt)
{
CUPRINTF_PREAMBLE;
CUPRINTF_POSTAMBLE;
}
template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_ARG(arg10);
CUPRINTF_POSTAMBLE;
}
#undef CUPRINTF_PREAMBLE
#undef CUPRINTF_ARG
#undef CUPRINTF_POSTAMBLE
//
// cuPrintfRestrict
//
// Called to restrict output to a given thread/block.
// We store the info in "restrictRules", which is set up at
// init time by the host. It's not the cleanest way to do this
// because it means restrictions will last between
// invocations, but given the output-pointer continuity,
// I feel this is reasonable.
//
__device__ void cuPrintfRestrict(int threadid, int blockid)
{
int thread_count = blockDim.x * blockDim.y * blockDim.z;
if (((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED))
{
restrictRules.threadid = threadid;
}
int block_count = gridDim.x * gridDim.y;
if (((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED))
{
restrictRules.blockid = blockid;
}
}
///////////////////////////////////////////////////////////////////////////////
// HOST SIDE
#include <stdio.h>
static FILE *printf_fp;
static char *printfbuf_start=NULL;
static char *printfbuf_device=NULL;
static int printfbuf_len=0;
//
// outputPrintfData
//
// Our own internal function, which takes a pointer to a data buffer
// and passes it through libc's printf for output.
//
// We receive the formate string and a pointer to where the data is
// held. We then run through and print it out.
//
// Returns 0 on failure, 1 on success
//
static int outputPrintfData(char *fmt, char *data)
{
// Format string is prefixed by a length that we don't need
fmt += CUPRINTF_ALIGN_SIZE;
// Now run through it, printing everything we can. We must
// run to every % character, extract only that, and use printf
// to format it.
char *p = strchr(fmt, '%');
while (p != NULL)
{
// Print up to the % character
*p = '\0';
fputs(fmt, printf_fp);
*p = '%'; // Put back the %
// Now handle the format specifier
char *format = p++; // Points to the '%'
p += strcspn(p, "%cdiouxXeEfgGaAnps");
if (*p == '\0') // If no format specifier, print the whole thing
{
fmt = format;
break;
}
// Cut out the format bit and use printf to print it. It's prefixed
// by its length.
int arglen = *(int *)data;
if (arglen > CUPRINTF_MAX_LEN)
{
fputs("Corrupt printf buffer data - aborting\n", printf_fp);
return 0;
}
data += CUPRINTF_ALIGN_SIZE;
char specifier = *p++;
char c = *p; // Store for later
*p = '\0';
switch (specifier)
{
// These all take integer arguments
case 'c':
case 'd':
case 'i':
case 'o':
case 'u':
case 'x':
case 'X':
case 'p':
fprintf(printf_fp, format, *((int *)data));
break;
// These all take double arguments
case 'e':
case 'E':
case 'f':
case 'g':
case 'G':
case 'a':
case 'A':
if (arglen == 4) // Float vs. Double thing
{
fprintf(printf_fp, format, *((float *)data));
}
else
{
fprintf(printf_fp, format, *((double *)data));
}
break;
// Strings are handled in a special way
case 's':
fprintf(printf_fp, format, (char *)data);
break;
// % is special
case '%':
fprintf(printf_fp, "%%");
break;
// Everything else is just printed out as-is
default:
fprintf(printf_fp, "%s", format);
break;
}
data += CUPRINTF_ALIGN_SIZE; // Move on to next argument
*p = c; // Restore what we removed
fmt = p; // Adjust fmt string to be past the specifier
p = strchr(fmt, '%'); // and get the next specifier
}
// Print out the last of the string
fputs(fmt, printf_fp);
return 1;
}
//
// doPrintfDisplay
//
// This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the
// print function above to display them. We've got this separate from
// cudaPrintfDisplay() below so we can handle the SM_10 architecture
// partitioning.
//
static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr)
{
// Grab, piece-by-piece, each output element until we catch
// up with the circular buffer end pointer
int printf_count=0;
char printfbuf_local[CUPRINTF_MAX_LEN+1];
printfbuf_local[CUPRINTF_MAX_LEN] = '\0';
while (bufptr != endptr)
{
// Wrap ourselves at the end-of-buffer
if (bufptr == bufend)
{
bufptr = bufstart;
}
// Adjust our start pointer to within the circular buffer and copy a block.
hipMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, hipMemcpyDeviceToHost);
// If the magic number isn't valid, then this write hasn't gone through
// yet and we'll wait until it does (or we're past the end for non-async printfs).
cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local;
if ((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN))
{
//fprintf(printf_fp, "Bad magic number in printf header\n");
break;
}
// Extract all the info and get this printf done
if (headings)
{
fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid);
}
if (hdr->fmtoffset == 0)
{
fprintf(printf_fp, "printf buffer overflow\n");
}
else if (!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader)))
{
break;
}
printf_count++;
// Clear if asked
if (clear)
{
hipMemset(bufptr, 0, CUPRINTF_MAX_LEN);
}
// Now advance our start location, because we're done, and keep copying
bufptr += CUPRINTF_MAX_LEN;
}
return printf_count;
}
//
// cudaPrintfInit
//
// Takes a buffer length to allocate, creates the memory on the device and
// returns a pointer to it for when a kernel is called. It's up to the caller
// to free it.
//
extern "C" hipError_t cudaPrintfInit(size_t bufferLen)
{
// Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN
bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen;
if ((bufferLen % CUPRINTF_MAX_LEN) > 0)
{
bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN));
}
printfbuf_len = (int)bufferLen;
// Allocate a print buffer on the device and zero it
if (hipMalloc((void **)&printfbuf_device, printfbuf_len) != hipSuccess)
{
return hipErrorInitializationError;
}
hipMemset(printfbuf_device, 0, printfbuf_len);
printfbuf_start = printfbuf_device; // Where we start reading from
// No restrictions to begin with
cuPrintfRestriction restrict;
restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED;
hipMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict));
// Initialise the buffer and the respective lengths/pointers.
hipMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *));
hipMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *));
hipMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len));
return hipSuccess;
}
//
// cudaPrintfEnd
//
// Frees up the memory which we allocated
//
extern "C" void cudaPrintfEnd()
{
if (!printfbuf_start || !printfbuf_device)
{
return;
}
hipFree(printfbuf_device);
printfbuf_start = printfbuf_device = NULL;
}
//
// cudaPrintfDisplay
//
// Each call to this function dumps the entire current contents
// of the printf buffer to the pre-specified FILE pointer. The
// circular "start" pointer is advanced so that subsequent calls
// dumps only new stuff.
//
// In the case of async memory access (via streams), call this
// repeatedly to keep trying to empty the buffer. If it's a sync
// access, then the whole buffer should empty in one go.
//
// Arguments:
// outputFP - File descriptor to output to (NULL => stdout)
// showThreadID - If true, prints [block,thread] before each line
//
extern "C" hipError_t cudaPrintfDisplay(void *outputFP, bool showThreadID)
{
printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP);
// For now, we force "synchronous" mode which means we're not concurrent
// with kernel execution. This also means we don't need clearOnPrint.
// If you're patching it for async operation, here's where you want it.
bool sync_printfs = true;
bool clearOnPrint = false;
// Initialisation check
if (!printfbuf_start || !printfbuf_device || !printf_fp)
{
return hipErrorMissingConfiguration;
}
// To determine which architecture we're using, we read the
// first short from the buffer - it'll be the magic number
// relating to the version.
unsigned short magic;
hipMemcpy(&magic, printfbuf_device, sizeof(unsigned short), hipMemcpyDeviceToHost);
// For SM_10 architecture, we've split our buffer into one-per-thread.
// That means we must do each thread block separately. It'll require
// extra reading. We also, for now, don't support async printfs because
// that requires tracking one start pointer per thread.
if (magic == CUPRINTF_SM10_MAGIC)
{
sync_printfs = true;
clearOnPrint = false;
int blocklen = 0;
char *blockptr = printfbuf_device;
while (blockptr < (printfbuf_device + printfbuf_len))
{
cuPrintfHeaderSM10 hdr;
hipMemcpy(&hdr, blockptr, sizeof(hdr), hipMemcpyDeviceToHost);
// We get our block-size-step from the very first header
if (hdr.thread_buf_len != 0)
{
blocklen = hdr.thread_buf_len;
}
// No magic number means no printfs from this thread
if (hdr.magic != CUPRINTF_SM10_MAGIC)
{
if (blocklen == 0)
{
fprintf(printf_fp, "No printf headers found at all!\n");
break; // No valid headers!
}
blockptr += blocklen;
continue;
}
// "offset" is non-zero then we can print the block contents
if (hdr.offset > 0)
{
// For synchronous printfs, we must print from endptr->bufend, then from start->end
if (sync_printfs)
{
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len);
}
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN);
}
// Move on to the next block and loop again
blockptr += hdr.thread_buf_len;
}
}
// For SM_11 and up, everything is a single buffer and it's simple
else if (magic == CUPRINTF_SM11_MAGIC)
{
// Grab the current "end of circular buffer" pointer.
char *printfbuf_end = NULL;
hipMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *));
// Adjust our starting and ending pointers to within the block
char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device;
char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device;
// For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular
// buffer wrap carefully because we could miss those past "end".
if (sync_printfs)
{
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len);
}
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr);
printfbuf_start = printfbuf_end;
}
else
;//printf("Bad magic number in cuPrintf buffer header\n");
// If we were synchronous, then we must ensure that the memory is cleared on exit
// otherwise another kernel launch with a different grid size could conflict.
if (sync_printfs)
{
hipMemset(printfbuf_device, 0, printfbuf_len);
}
return hipSuccess;
}
// Cleanup
#undef CUPRINTF_MAX_LEN
#undef CUPRINTF_ALIGN_SIZE
#undef CUPRINTF_SM10_MAGIC
#undef CUPRINTF_SM11_MAGIC
#endif
| 5696fb99cee8cb0b79354a785f02ff6ee951f2e3.cu | /*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* cuPrintf.cu
*
* This is a printf command callable from within a kernel. It is set
* up so that output is sent to a memory buffer, which is emptied from
* the host side - but only after a cudaDeviceSynchronize() on the host.
*
* Currently, there is a limitation of around 200 characters of output
* and no more than 10 arguments to a single cuPrintf() call. Issue
* multiple calls if longer format strings are required.
*
* It requires minimal setup, and is *NOT* optimised for performance.
* For example, writes are not coalesced - this is because there is an
* assumption that people will not want to printf from every single one
* of thousands of threads, but only from individual threads at a time.
*
* Using this is simple - it requires one host-side call to initialise
* everything, and then kernels can call cuPrintf at will. Sample code
* is the easiest way to demonstrate:
*
#include "cuPrintf.cu"
__global__ void testKernel(int val)
{
cuPrintf("Value is: %d\n", val);
}
int main()
{
cudaPrintfInit();
testKernel<<< 2, 3 >>>(10);
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
return 0;
}
*
* See the header file, "cuPrintf.cuh" for more info, especially
* arguments to cudaPrintfInit() and cudaPrintfDisplay();
*/
#ifndef CUPRINTF_CU
#define CUPRINTF_CU
#include "cuPrintf.cuh"
#if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture
#include <sm_11_atomic_functions.h>
#endif
// This is the smallest amount of memory, per-thread, which is allowed.
// It is also the largest amount of space a single printf() can take up
const static int CUPRINTF_MAX_LEN = 256;
// This structure is used internally to track block/thread output restrictions.
typedef struct __align__(8)
{
int threadid; // CUPRINTF_UNRESTRICTED for unrestricted
int blockid; // CUPRINTF_UNRESTRICTED for unrestricted
}
cuPrintfRestriction;
// The main storage is in a global print buffer, which has a known
// start/end/length. These are atomically updated so it works as a
// circular buffer.
// Since the only control primitive that can be used is atomicAdd(),
// we cannot wrap the pointer as such. The actual address must be
// calculated from printfBufferPtr by mod-ing with printfBufferLength.
// For sm_10 architecture, we must subdivide the buffer per-thread
// since we do not even have an atomic primitive.
__constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host)
__constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host)
__device__ static cuPrintfRestriction restrictRules; // Output restrictions
__device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset
// This is the header preceeding all printf entries.
// NOTE: It *must* be size-aligned to the maximum entity size (size_t)
typedef struct __align__(8)
{
unsigned short magic; // Magic number says we're valid
unsigned short fmtoffset; // Offset of fmt string into buffer
unsigned short blockid; // Block ID of author
unsigned short threadid; // Thread ID of author
}
cuPrintfHeader;
// Special header for sm_10 architecture
#define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character
typedef struct __align__(16)
{
unsigned short magic; // sm_10 specific magic number
unsigned short unused;
unsigned int thread_index; // thread ID for this buffer
unsigned int thread_buf_len; // per-thread buffer length
unsigned int offset; // most recent printf's offset
}
cuPrintfHeaderSM10;
// Because we can't write an element which is not aligned to its bit-size,
// we have to align all sizes and variables on maximum-size boundaries.
// That means sizeof(double) in this case, but we'll use (long long) for
// better arch<1.3 support
#define CUPRINTF_ALIGN_SIZE sizeof(long long)
// All our headers are prefixed with a magic number so we know they're ready
#define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character
//
// getNextPrintfBufPtr
//
// Grabs a block of space in the general circular buffer, using an
// atomic function to ensure that it's ours. We handle wrapping
// around the circular buffer and return a pointer to a place which
// can be written to.
//
// Important notes:
// 1. We always grab CUPRINTF_MAX_LEN bytes
// 2. Because of 1, we never worry about wrapping around the end
// 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN
//
// This returns a pointer to the place where we own.
//
__device__ static char *getNextPrintfBufPtr()
{
// Initialisation check
if (!printfBufferPtr)
{
return NULL;
}
// Thread/block restriction check
if ((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y)))
{
return NULL;
}
if ((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z)))
{
return NULL;
}
// Conditional section, dependent on architecture
#if __CUDA_ARCH__ == 100
// For sm_10 architectures, we have no atomic add - this means we must split the
// entire available buffer into per-thread blocks. Inefficient, but what can you do.
int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z);
int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z +
(blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z);
// Find our own block of data and go to it. Make sure the per-thread length
// is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and
// alignment issues! We must round down, of course.
unsigned int thread_buf_len = printfBufferLength / thread_count;
thread_buf_len &= ~(CUPRINTF_MAX_LEN-1);
// We *must* have a thread buffer length able to fit at least two printfs (one header, one real)
if (thread_buf_len < (CUPRINTF_MAX_LEN * 2))
{
return NULL;
}
// Now address our section of the buffer. The first item is a header.
char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index);
cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer;
if (hdr.magic != CUPRINTF_SM10_MAGIC)
{
// If our header is not set up, initialise it
hdr.magic = CUPRINTF_SM10_MAGIC;
hdr.thread_index = thread_index;
hdr.thread_buf_len = thread_buf_len;
hdr.offset = 0; // Note we start at 0! We pre-increment below.
*(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header
// For initial setup purposes, we might need to init thread0's header too
// (so that cudaPrintfDisplay() below will work). This is only run once.
cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer;
tophdr->thread_buf_len = thread_buf_len;
}
// Adjust the offset by the right amount, and wrap it if need be
unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN;
if (offset >= hdr.thread_buf_len)
{
offset = CUPRINTF_MAX_LEN;
}
// Write back the new offset for next time and return a pointer to it
((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset;
return myPrintfBuffer + offset;
#else
// Much easier with an atomic operation!
size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer;
offset %= printfBufferLength;
return globalPrintfBuffer + offset;
#endif
}
//
// writePrintfHeader
//
// Inserts the header for containing our UID, fmt position and
// block/thread number. We generate it dynamically to avoid
// issues arising from requiring pre-initialisation.
//
__device__ static void writePrintfHeader(char *ptr, char *fmtptr)
{
if (ptr)
{
cuPrintfHeader header;
header.magic = CUPRINTF_SM11_MAGIC;
header.fmtoffset = (unsigned short)(fmtptr - ptr);
header.blockid = blockIdx.x + gridDim.x*blockIdx.y;
header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
*(cuPrintfHeader *)(void *)ptr = header;
}
}
//
// cuPrintfStrncpy
//
// This special strncpy outputs an aligned length value, followed by the
// string. It then zero-pads the rest of the string until a 64-aligned
// boundary. The length *includes* the padding. A pointer to the byte
// just after the \0 is returned.
//
// This function could overflow CUPRINTF_MAX_LEN characters in our buffer.
// To avoid it, we must count as we output and truncate where necessary.
//
__device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end)
{
// Initialisation and overflow check
if (!dest || !src || (dest >= end))
{
return NULL;
}
// Prepare to write the length specifier. We're guaranteed to have
// at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in
// chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE.
int *lenptr = (int *)(void *)dest;
int len = 0;
dest += CUPRINTF_ALIGN_SIZE;
// Now copy the string
while (n--)
{
if (dest >= end) // Overflow check
{
break;
}
len++;
*dest++ = *src;
if (*src++ == '\0')
{
break;
}
}
// Now write out the padding bytes, and we have our length.
while ((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0))
{
len++;
*dest++ = 0;
}
*lenptr = len;
return (dest < end) ? dest : NULL; // Overflow means return NULL
}
//
// copyArg
//
// This copies a length specifier and then the argument out to the
// data buffer. Templates let the compiler figure all this out at
// compile-time, making life much simpler from the programming
// point of view. I'm assuimg all (const char *) is a string, and
// everything else is the variable it points at. I'd love to see
// a better way of doing it, but aside from parsing the format
// string I can't think of one.
//
// The length of the data type is inserted at the beginning (so that
// the display can distinguish between float and double), and the
// pointer to the end of the entry is returned.
//
__device__ static char *copyArg(char *ptr, const char *arg, char *end)
{
// Initialisation check
if (!ptr || !arg)
{
return NULL;
}
// strncpy does all our work. We just terminate.
if ((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL)
{
*ptr = 0;
}
return ptr;
}
template <typename T>
__device__ static char *copyArg(char *ptr, T &arg, char *end)
{
// Initisalisation and overflow check. Alignment rules mean that
// we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need
// to check that one offset.
if (!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end))
{
return NULL;
}
// Write the length and argument
*(int *)(void *)ptr = sizeof(arg);
ptr += CUPRINTF_ALIGN_SIZE;
*(T *)(void *)ptr = arg;
ptr += CUPRINTF_ALIGN_SIZE;
*ptr = 0;
return ptr;
}
//
// cuPrintf
//
// Templated printf functions to handle multiple arguments.
// Note we return the total amount of data copied, not the number
// of characters output. But then again, who ever looks at the
// return from printf() anyway?
//
// The format is to grab a block of circular buffer space, the
// start of which will hold a header and a pointer to the format
// string. We then write in all the arguments, and finally the
// format string itself. This is to make it easy to prevent
// overflow of our buffer (we support up to 10 arguments, each of
// which can be 12 bytes in length - that means that only the
// format string (or a %s) can actually overflow; so the overflow
// check need only be in the strcpy function.
//
// The header is written at the very last because that's what
// makes it look like we're done.
//
// Errors, which are basically lack-of-initialisation, are ignored
// in the called functions because NULL pointers are passed around
//
// All printf variants basically do the same thing, setting up the
// buffer, writing all arguments, then finalising the header. For
// clarity, we'll pack the code into some big macros.
#define CUPRINTF_PREAMBLE \
char *start, *end, *bufptr, *fmtstart; \
if((start = getNextPrintfBufPtr()) == NULL) return 0; \
end = start + CUPRINTF_MAX_LEN; \
bufptr = start + sizeof(cuPrintfHeader);
// Posting an argument is easy
#define CUPRINTF_ARG(argname) \
bufptr = copyArg(bufptr, argname, end);
// After args are done, record start-of-fmt and write the fmt and header
#define CUPRINTF_POSTAMBLE \
fmtstart = bufptr; \
end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \
writePrintfHeader(start, end ? fmtstart : NULL); \
return end ? (int)(end - start) : 0;
__device__ int cuPrintf(const char *fmt)
{
CUPRINTF_PREAMBLE;
CUPRINTF_POSTAMBLE;
}
template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_ARG(arg10);
CUPRINTF_POSTAMBLE;
}
#undef CUPRINTF_PREAMBLE
#undef CUPRINTF_ARG
#undef CUPRINTF_POSTAMBLE
//
// cuPrintfRestrict
//
// Called to restrict output to a given thread/block.
// We store the info in "restrictRules", which is set up at
// init time by the host. It's not the cleanest way to do this
// because it means restrictions will last between
// invocations, but given the output-pointer continuity,
// I feel this is reasonable.
//
__device__ void cuPrintfRestrict(int threadid, int blockid)
{
int thread_count = blockDim.x * blockDim.y * blockDim.z;
if (((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED))
{
restrictRules.threadid = threadid;
}
int block_count = gridDim.x * gridDim.y;
if (((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED))
{
restrictRules.blockid = blockid;
}
}
///////////////////////////////////////////////////////////////////////////////
// HOST SIDE
#include <stdio.h>
static FILE *printf_fp;
static char *printfbuf_start=NULL;
static char *printfbuf_device=NULL;
static int printfbuf_len=0;
//
// outputPrintfData
//
// Our own internal function, which takes a pointer to a data buffer
// and passes it through libc's printf for output.
//
// We receive the formate string and a pointer to where the data is
// held. We then run through and print it out.
//
// Returns 0 on failure, 1 on success
//
static int outputPrintfData(char *fmt, char *data)
{
// Format string is prefixed by a length that we don't need
fmt += CUPRINTF_ALIGN_SIZE;
// Now run through it, printing everything we can. We must
// run to every % character, extract only that, and use printf
// to format it.
char *p = strchr(fmt, '%');
while (p != NULL)
{
// Print up to the % character
*p = '\0';
fputs(fmt, printf_fp);
*p = '%'; // Put back the %
// Now handle the format specifier
char *format = p++; // Points to the '%'
p += strcspn(p, "%cdiouxXeEfgGaAnps");
if (*p == '\0') // If no format specifier, print the whole thing
{
fmt = format;
break;
}
// Cut out the format bit and use printf to print it. It's prefixed
// by its length.
int arglen = *(int *)data;
if (arglen > CUPRINTF_MAX_LEN)
{
fputs("Corrupt printf buffer data - aborting\n", printf_fp);
return 0;
}
data += CUPRINTF_ALIGN_SIZE;
char specifier = *p++;
char c = *p; // Store for later
*p = '\0';
switch (specifier)
{
// These all take integer arguments
case 'c':
case 'd':
case 'i':
case 'o':
case 'u':
case 'x':
case 'X':
case 'p':
fprintf(printf_fp, format, *((int *)data));
break;
// These all take double arguments
case 'e':
case 'E':
case 'f':
case 'g':
case 'G':
case 'a':
case 'A':
if (arglen == 4) // Float vs. Double thing
{
fprintf(printf_fp, format, *((float *)data));
}
else
{
fprintf(printf_fp, format, *((double *)data));
}
break;
// Strings are handled in a special way
case 's':
fprintf(printf_fp, format, (char *)data);
break;
// % is special
case '%':
fprintf(printf_fp, "%%");
break;
// Everything else is just printed out as-is
default:
fprintf(printf_fp, "%s", format);
break;
}
data += CUPRINTF_ALIGN_SIZE; // Move on to next argument
*p = c; // Restore what we removed
fmt = p; // Adjust fmt string to be past the specifier
p = strchr(fmt, '%'); // and get the next specifier
}
// Print out the last of the string
fputs(fmt, printf_fp);
return 1;
}
//
// doPrintfDisplay
//
// This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the
// print function above to display them. We've got this separate from
// cudaPrintfDisplay() below so we can handle the SM_10 architecture
// partitioning.
//
static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr)
{
// Grab, piece-by-piece, each output element until we catch
// up with the circular buffer end pointer
int printf_count=0;
char printfbuf_local[CUPRINTF_MAX_LEN+1];
printfbuf_local[CUPRINTF_MAX_LEN] = '\0';
while (bufptr != endptr)
{
// Wrap ourselves at the end-of-buffer
if (bufptr == bufend)
{
bufptr = bufstart;
}
// Adjust our start pointer to within the circular buffer and copy a block.
cudaMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, cudaMemcpyDeviceToHost);
// If the magic number isn't valid, then this write hasn't gone through
// yet and we'll wait until it does (or we're past the end for non-async printfs).
cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local;
if ((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN))
{
//fprintf(printf_fp, "Bad magic number in printf header\n");
break;
}
// Extract all the info and get this printf done
if (headings)
{
fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid);
}
if (hdr->fmtoffset == 0)
{
fprintf(printf_fp, "printf buffer overflow\n");
}
else if (!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader)))
{
break;
}
printf_count++;
// Clear if asked
if (clear)
{
cudaMemset(bufptr, 0, CUPRINTF_MAX_LEN);
}
// Now advance our start location, because we're done, and keep copying
bufptr += CUPRINTF_MAX_LEN;
}
return printf_count;
}
//
// cudaPrintfInit
//
// Takes a buffer length to allocate, creates the memory on the device and
// returns a pointer to it for when a kernel is called. It's up to the caller
// to free it.
//
extern "C" cudaError_t cudaPrintfInit(size_t bufferLen)
{
// Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN
bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen;
if ((bufferLen % CUPRINTF_MAX_LEN) > 0)
{
bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN));
}
printfbuf_len = (int)bufferLen;
// Allocate a print buffer on the device and zero it
if (cudaMalloc((void **)&printfbuf_device, printfbuf_len) != cudaSuccess)
{
return cudaErrorInitializationError;
}
cudaMemset(printfbuf_device, 0, printfbuf_len);
printfbuf_start = printfbuf_device; // Where we start reading from
// No restrictions to begin with
cuPrintfRestriction restrict;
restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED;
cudaMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict));
// Initialise the buffer and the respective lengths/pointers.
cudaMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *));
cudaMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *));
cudaMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len));
return cudaSuccess;
}
//
// cudaPrintfEnd
//
// Frees up the memory which we allocated
//
extern "C" void cudaPrintfEnd()
{
if (!printfbuf_start || !printfbuf_device)
{
return;
}
cudaFree(printfbuf_device);
printfbuf_start = printfbuf_device = NULL;
}
//
// cudaPrintfDisplay
//
// Each call to this function dumps the entire current contents
// of the printf buffer to the pre-specified FILE pointer. The
// circular "start" pointer is advanced so that subsequent calls
// dumps only new stuff.
//
// In the case of async memory access (via streams), call this
// repeatedly to keep trying to empty the buffer. If it's a sync
// access, then the whole buffer should empty in one go.
//
// Arguments:
// outputFP - File descriptor to output to (NULL => stdout)
// showThreadID - If true, prints [block,thread] before each line
//
extern "C" cudaError_t cudaPrintfDisplay(void *outputFP, bool showThreadID)
{
printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP);
// For now, we force "synchronous" mode which means we're not concurrent
// with kernel execution. This also means we don't need clearOnPrint.
// If you're patching it for async operation, here's where you want it.
bool sync_printfs = true;
bool clearOnPrint = false;
// Initialisation check
if (!printfbuf_start || !printfbuf_device || !printf_fp)
{
return cudaErrorMissingConfiguration;
}
// To determine which architecture we're using, we read the
// first short from the buffer - it'll be the magic number
// relating to the version.
unsigned short magic;
cudaMemcpy(&magic, printfbuf_device, sizeof(unsigned short), cudaMemcpyDeviceToHost);
// For SM_10 architecture, we've split our buffer into one-per-thread.
// That means we must do each thread block separately. It'll require
// extra reading. We also, for now, don't support async printfs because
// that requires tracking one start pointer per thread.
if (magic == CUPRINTF_SM10_MAGIC)
{
sync_printfs = true;
clearOnPrint = false;
int blocklen = 0;
char *blockptr = printfbuf_device;
while (blockptr < (printfbuf_device + printfbuf_len))
{
cuPrintfHeaderSM10 hdr;
cudaMemcpy(&hdr, blockptr, sizeof(hdr), cudaMemcpyDeviceToHost);
// We get our block-size-step from the very first header
if (hdr.thread_buf_len != 0)
{
blocklen = hdr.thread_buf_len;
}
// No magic number means no printfs from this thread
if (hdr.magic != CUPRINTF_SM10_MAGIC)
{
if (blocklen == 0)
{
fprintf(printf_fp, "No printf headers found at all!\n");
break; // No valid headers!
}
blockptr += blocklen;
continue;
}
// "offset" is non-zero then we can print the block contents
if (hdr.offset > 0)
{
// For synchronous printfs, we must print from endptr->bufend, then from start->end
if (sync_printfs)
{
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len);
}
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN);
}
// Move on to the next block and loop again
blockptr += hdr.thread_buf_len;
}
}
// For SM_11 and up, everything is a single buffer and it's simple
else if (magic == CUPRINTF_SM11_MAGIC)
{
// Grab the current "end of circular buffer" pointer.
char *printfbuf_end = NULL;
cudaMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *));
// Adjust our starting and ending pointers to within the block
char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device;
char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device;
// For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular
// buffer wrap carefully because we could miss those past "end".
if (sync_printfs)
{
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len);
}
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr);
printfbuf_start = printfbuf_end;
}
else
;//printf("Bad magic number in cuPrintf buffer header\n");
// If we were synchronous, then we must ensure that the memory is cleared on exit
// otherwise another kernel launch with a different grid size could conflict.
if (sync_printfs)
{
cudaMemset(printfbuf_device, 0, printfbuf_len);
}
return cudaSuccess;
}
// Cleanup
#undef CUPRINTF_MAX_LEN
#undef CUPRINTF_ALIGN_SIZE
#undef CUPRINTF_SM10_MAGIC
#undef CUPRINTF_SM11_MAGIC
#endif
|
df06567fb972f3ef8dafea4e1ad5d586b8bcff6a.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal z -> c d s
*/
#include <hip/hip_runtime_api.h>
#include <rocblas.h> // include before magma.h
#include <fstream>
#include <stdlib.h>
#include <string>
#include <sstream>
#include <iostream>
#include <ostream>
#include <assert.h>
#include <stdio.h>
#include "magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
#define PRECISION_z
#define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)]
#define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)]
#define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b)
#define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b)
//============================================================
#define ldb m
#define lda m
#define ldc m
// every multiprocessor handles one BCSR-block
__global__ void
zbcsrlupivloc_kernel(
int size_b,
int kblocks,
double **A,
magma_int_t *ipiv)
{
if( blockIdx.x < kblocks ) {
if(threadIdx.x < size_b ){
for( int i=0; i<size_b; i++){
int dst = ipiv[i]-1;
if( dst != i ){
double *A1 = A[blockIdx.x]+threadIdx.x*size_b+i;
double *A2 = A[blockIdx.x]+threadIdx.x*size_b+dst;
double tmp = *A2;
*A2 = *A1;
*A1 = tmp;
}
}
}
}
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine updates all blocks in
the trailing matrix.
Arguments
---------
@param
size_b magma_int_t
blocksize in BCSR
@param
kblocks magma_int_t
number of blocks
@param
dA magmaDoubleComplex**
matrix in BCSR
@param
ipiv magma_int_t*
array containing pivots
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbcsrlupivloc( magma_int_t size_b,
magma_int_t kblocks,
magmaDoubleComplex **dA,
magma_int_t *ipiv ){
#if defined(PRECISION_d)
dim3 threads( 64, 1 );
dim3 grid(kblocks, 1, 1);
hipLaunchKernelGGL(( zbcsrlupivloc_kernel), dim3(grid), dim3(threads), 0, magma_stream ,
size_b, kblocks, dA, ipiv );
#endif
return MAGMA_SUCCESS;
}
| df06567fb972f3ef8dafea4e1ad5d586b8bcff6a.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal z -> c d s
*/
#include <cuda_runtime_api.h>
#include <cublas_v2.h> // include before magma.h
#include <fstream>
#include <stdlib.h>
#include <string>
#include <sstream>
#include <iostream>
#include <ostream>
#include <assert.h>
#include <stdio.h>
#include "magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
#define PRECISION_z
#define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)]
#define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)]
#define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b)
#define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b)
//============================================================
#define ldb m
#define lda m
#define ldc m
// every multiprocessor handles one BCSR-block
__global__ void
zbcsrlupivloc_kernel(
int size_b,
int kblocks,
double **A,
magma_int_t *ipiv)
{
if( blockIdx.x < kblocks ) {
if(threadIdx.x < size_b ){
for( int i=0; i<size_b; i++){
int dst = ipiv[i]-1;
if( dst != i ){
double *A1 = A[blockIdx.x]+threadIdx.x*size_b+i;
double *A2 = A[blockIdx.x]+threadIdx.x*size_b+dst;
double tmp = *A2;
*A2 = *A1;
*A1 = tmp;
}
}
}
}
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine updates all blocks in
the trailing matrix.
Arguments
---------
@param
size_b magma_int_t
blocksize in BCSR
@param
kblocks magma_int_t
number of blocks
@param
dA magmaDoubleComplex**
matrix in BCSR
@param
ipiv magma_int_t*
array containing pivots
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbcsrlupivloc( magma_int_t size_b,
magma_int_t kblocks,
magmaDoubleComplex **dA,
magma_int_t *ipiv ){
#if defined(PRECISION_d)
dim3 threads( 64, 1 );
dim3 grid(kblocks, 1, 1);
zbcsrlupivloc_kernel<<< grid, threads, 0, magma_stream >>>(
size_b, kblocks, dA, ipiv );
#endif
return MAGMA_SUCCESS;
}
|
f87df6f1ab4373d4b0b8e4b4fc14fe25c7eca4a1.hip | // !!! This is a file automatically generated by hipify!!!
// CIS565 CUDA Rasterizer: A simple rasterization pipeline for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/random.h>
#include "rasterizeKernels.h"
#include "rasterizeTools.h"
glm::vec3* framebuffer;
fragment* depthbuffer;
float* device_vbo;
float* device_cbo;
int* device_ibo;
triangle* primitives;
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//Handy dandy little hashing function that provides seeds for random number generation
__host__ __device__ unsigned int hash(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//Writes a given fragment to a fragment buffer at a given location
__host__ __device__ void writeToDepthbuffer(int x, int y, fragment frag, fragment* depthbuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
depthbuffer[index] = frag;
}
}
//Reads a fragment from a given location in a fragment buffer
__host__ __device__ fragment getFromDepthbuffer(int x, int y, fragment* depthbuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
return depthbuffer[index];
}else{
fragment f;
return f;
}
}
//Writes a given pixel to a pixel buffer at a given location
__host__ __device__ void writeToFramebuffer(int x, int y, glm::vec3 value, glm::vec3* framebuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
framebuffer[index] = value;
}
}
//Reads a pixel from a pixel buffer at a given location
__host__ __device__ glm::vec3 getFromFramebuffer(int x, int y, glm::vec3* framebuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
return framebuffer[index];
}else{
return glm::vec3(0,0,0);
}
}
//Kernel that clears a given pixel buffer with a given color
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image, glm::vec3 color){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = color;
}
}
//Kernel that clears a given fragment buffer with a given fragment
__global__ void clearDepthBuffer(glm::vec2 resolution, fragment* buffer, fragment frag){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
fragment f = frag;
f.position.x = x;
f.position.y = y;
buffer[index] = f;
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
//TODO: Implement a vertex shader
__global__ void vertexShadeKernel(float* vbo, int vbosize){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index<vbosize/3){
}
}
//TODO: Implement primative assembly
__global__ void primitiveAssemblyKernel(float* vbo, int vbosize, float* cbo, int cbosize, int* ibo, int ibosize, triangle* primitives){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int primitivesCount = ibosize/3;
if(index<primitivesCount){
}
}
//TODO: Implement a rasterization method, such as scanline.
__global__ void rasterizationKernel(triangle* primitives, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index<primitivesCount){
}
}
//TODO: Implement a fragment shader
__global__ void fragmentShadeKernel(fragment* depthbuffer, glm::vec2 resolution){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
}
}
//Writes fragment colors to the framebuffer
__global__ void render(glm::vec2 resolution, fragment* depthbuffer, glm::vec3* framebuffer){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
framebuffer[index] = depthbuffer[index].color;
}
}
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRasterizeCore(uchar4* PBOpos, glm::vec2 resolution, float frame, float* vbo, int vbosize, float* cbo, int cbosize, int* ibo, int ibosize){
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize)));
//set up framebuffer
framebuffer = NULL;
hipMalloc((void**)&framebuffer, (int)resolution.x*(int)resolution.y*sizeof(glm::vec3));
//set up depthbuffer
depthbuffer = NULL;
hipMalloc((void**)&depthbuffer, (int)resolution.x*(int)resolution.y*sizeof(fragment));
//kernel launches to black out accumulated/unaccumlated pixel buffers and clear our scattering states
hipLaunchKernelGGL(( clearImage), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, framebuffer, glm::vec3(0,0,0));
fragment frag;
frag.color = glm::vec3(0,0,0);
frag.normal = glm::vec3(0,0,0);
frag.position = glm::vec3(0,0,-10000);
hipLaunchKernelGGL(( clearDepthBuffer), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, depthbuffer,frag);
//------------------------------
//memory stuff
//------------------------------
primitives = NULL;
hipMalloc((void**)&primitives, (ibosize/3)*sizeof(triangle));
device_ibo = NULL;
hipMalloc((void**)&device_ibo, ibosize*sizeof(int));
hipMemcpy( device_ibo, ibo, ibosize*sizeof(int), hipMemcpyHostToDevice);
device_vbo = NULL;
hipMalloc((void**)&device_vbo, vbosize*sizeof(float));
hipMemcpy( device_vbo, vbo, vbosize*sizeof(float), hipMemcpyHostToDevice);
device_cbo = NULL;
hipMalloc((void**)&device_cbo, cbosize*sizeof(float));
hipMemcpy( device_cbo, cbo, cbosize*sizeof(float), hipMemcpyHostToDevice);
tileSize = 32;
int primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize));
//------------------------------
//vertex shader
//------------------------------
hipLaunchKernelGGL(( vertexShadeKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_vbo, vbosize);
hipDeviceSynchronize();
//------------------------------
//primitive assembly
//------------------------------
primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize));
hipLaunchKernelGGL(( primitiveAssemblyKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_vbo, vbosize, device_cbo, cbosize, device_ibo, ibosize, primitives);
hipDeviceSynchronize();
//------------------------------
//rasterization
//------------------------------
hipLaunchKernelGGL(( rasterizationKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, primitives, ibosize/3, depthbuffer, resolution);
hipDeviceSynchronize();
//------------------------------
//fragment shader
//------------------------------
hipLaunchKernelGGL(( fragmentShadeKernel), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, depthbuffer, resolution);
hipDeviceSynchronize();
//------------------------------
//write fragments to framebuffer
//------------------------------
hipLaunchKernelGGL(( render), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, depthbuffer, framebuffer);
hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, resolution, framebuffer);
hipDeviceSynchronize();
kernelCleanup();
checkCUDAError("Kernel failed!");
}
void kernelCleanup(){
hipFree( primitives );
hipFree( device_vbo );
hipFree( device_cbo );
hipFree( device_ibo );
hipFree( framebuffer );
hipFree( depthbuffer );
}
| f87df6f1ab4373d4b0b8e4b4fc14fe25c7eca4a1.cu | // CIS565 CUDA Rasterizer: A simple rasterization pipeline for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <thrust/random.h>
#include "rasterizeKernels.h"
#include "rasterizeTools.h"
glm::vec3* framebuffer;
fragment* depthbuffer;
float* device_vbo;
float* device_cbo;
int* device_ibo;
triangle* primitives;
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//Handy dandy little hashing function that provides seeds for random number generation
__host__ __device__ unsigned int hash(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//Writes a given fragment to a fragment buffer at a given location
__host__ __device__ void writeToDepthbuffer(int x, int y, fragment frag, fragment* depthbuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
depthbuffer[index] = frag;
}
}
//Reads a fragment from a given location in a fragment buffer
__host__ __device__ fragment getFromDepthbuffer(int x, int y, fragment* depthbuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
return depthbuffer[index];
}else{
fragment f;
return f;
}
}
//Writes a given pixel to a pixel buffer at a given location
__host__ __device__ void writeToFramebuffer(int x, int y, glm::vec3 value, glm::vec3* framebuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
framebuffer[index] = value;
}
}
//Reads a pixel from a pixel buffer at a given location
__host__ __device__ glm::vec3 getFromFramebuffer(int x, int y, glm::vec3* framebuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
return framebuffer[index];
}else{
return glm::vec3(0,0,0);
}
}
//Kernel that clears a given pixel buffer with a given color
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image, glm::vec3 color){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = color;
}
}
//Kernel that clears a given fragment buffer with a given fragment
__global__ void clearDepthBuffer(glm::vec2 resolution, fragment* buffer, fragment frag){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
fragment f = frag;
f.position.x = x;
f.position.y = y;
buffer[index] = f;
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
//TODO: Implement a vertex shader
__global__ void vertexShadeKernel(float* vbo, int vbosize){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index<vbosize/3){
}
}
//TODO: Implement primative assembly
__global__ void primitiveAssemblyKernel(float* vbo, int vbosize, float* cbo, int cbosize, int* ibo, int ibosize, triangle* primitives){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int primitivesCount = ibosize/3;
if(index<primitivesCount){
}
}
//TODO: Implement a rasterization method, such as scanline.
__global__ void rasterizationKernel(triangle* primitives, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index<primitivesCount){
}
}
//TODO: Implement a fragment shader
__global__ void fragmentShadeKernel(fragment* depthbuffer, glm::vec2 resolution){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
}
}
//Writes fragment colors to the framebuffer
__global__ void render(glm::vec2 resolution, fragment* depthbuffer, glm::vec3* framebuffer){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
framebuffer[index] = depthbuffer[index].color;
}
}
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRasterizeCore(uchar4* PBOpos, glm::vec2 resolution, float frame, float* vbo, int vbosize, float* cbo, int cbosize, int* ibo, int ibosize){
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize)));
//set up framebuffer
framebuffer = NULL;
cudaMalloc((void**)&framebuffer, (int)resolution.x*(int)resolution.y*sizeof(glm::vec3));
//set up depthbuffer
depthbuffer = NULL;
cudaMalloc((void**)&depthbuffer, (int)resolution.x*(int)resolution.y*sizeof(fragment));
//kernel launches to black out accumulated/unaccumlated pixel buffers and clear our scattering states
clearImage<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, framebuffer, glm::vec3(0,0,0));
fragment frag;
frag.color = glm::vec3(0,0,0);
frag.normal = glm::vec3(0,0,0);
frag.position = glm::vec3(0,0,-10000);
clearDepthBuffer<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, depthbuffer,frag);
//------------------------------
//memory stuff
//------------------------------
primitives = NULL;
cudaMalloc((void**)&primitives, (ibosize/3)*sizeof(triangle));
device_ibo = NULL;
cudaMalloc((void**)&device_ibo, ibosize*sizeof(int));
cudaMemcpy( device_ibo, ibo, ibosize*sizeof(int), cudaMemcpyHostToDevice);
device_vbo = NULL;
cudaMalloc((void**)&device_vbo, vbosize*sizeof(float));
cudaMemcpy( device_vbo, vbo, vbosize*sizeof(float), cudaMemcpyHostToDevice);
device_cbo = NULL;
cudaMalloc((void**)&device_cbo, cbosize*sizeof(float));
cudaMemcpy( device_cbo, cbo, cbosize*sizeof(float), cudaMemcpyHostToDevice);
tileSize = 32;
int primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize));
//------------------------------
//vertex shader
//------------------------------
vertexShadeKernel<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize);
cudaDeviceSynchronize();
//------------------------------
//primitive assembly
//------------------------------
primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize));
primitiveAssemblyKernel<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, device_cbo, cbosize, device_ibo, ibosize, primitives);
cudaDeviceSynchronize();
//------------------------------
//rasterization
//------------------------------
rasterizationKernel<<<primitiveBlocks, tileSize>>>(primitives, ibosize/3, depthbuffer, resolution);
cudaDeviceSynchronize();
//------------------------------
//fragment shader
//------------------------------
fragmentShadeKernel<<<fullBlocksPerGrid, threadsPerBlock>>>(depthbuffer, resolution);
cudaDeviceSynchronize();
//------------------------------
//write fragments to framebuffer
//------------------------------
render<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, depthbuffer, framebuffer);
sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, resolution, framebuffer);
cudaDeviceSynchronize();
kernelCleanup();
checkCUDAError("Kernel failed!");
}
void kernelCleanup(){
cudaFree( primitives );
cudaFree( device_vbo );
cudaFree( device_cbo );
cudaFree( device_ibo );
cudaFree( framebuffer );
cudaFree( depthbuffer );
}
|
c09a77e5e2fcbe396b7142c348d3cfa465a27f68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
__global__ void add(int *a,int *b, int *al)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
b[id] = (*al)*a[id] + b[id];
}
int main()
{
int a[10],b[10],n,al;
printf("Enter n: ");
scanf("%d",&n);
printf("Enter alpha: ");
scanf("%d",&al);
printf("Enter X:\n");
for(int i=0;i<n;i++)
scanf("%d",&a[i]);
printf("Enter Y:\n");
for(int i=0;i<n;i++)
scanf("%d",&b[i]);
int *d_a,*d_b,*d_c;
int size = sizeof(int)*n;
hipMalloc((void**)&d_a,size);
hipMalloc((void**)&d_b,size);
hipMalloc((void**)&d_c,sizeof(int));
hipMemcpy(d_a,&a,size,hipMemcpyHostToDevice);
hipMemcpy(d_b,&b,size,hipMemcpyHostToDevice);
hipMemcpy(d_c,&al,sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(n),dim3(1), 0, 0, d_a,d_b,d_c);
hipMemcpy(&b,d_b,size,hipMemcpyDeviceToHost);
for(int i=0;i<n;i++)
printf("%d ",b[i]);
printf("\n");
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
| c09a77e5e2fcbe396b7142c348d3cfa465a27f68.cu | #include<stdio.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
__global__ void add(int *a,int *b, int *al)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
b[id] = (*al)*a[id] + b[id];
}
int main()
{
int a[10],b[10],n,al;
printf("Enter n: ");
scanf("%d",&n);
printf("Enter alpha: ");
scanf("%d",&al);
printf("Enter X:\n");
for(int i=0;i<n;i++)
scanf("%d",&a[i]);
printf("Enter Y:\n");
for(int i=0;i<n;i++)
scanf("%d",&b[i]);
int *d_a,*d_b,*d_c;
int size = sizeof(int)*n;
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMalloc((void**)&d_c,sizeof(int));
cudaMemcpy(d_a,&a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,&b,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_c,&al,sizeof(int),cudaMemcpyHostToDevice);
add<<<n,1>>>(d_a,d_b,d_c);
cudaMemcpy(&b,d_b,size,cudaMemcpyDeviceToHost);
for(int i=0;i<n;i++)
printf("%d ",b[i]);
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
|
47b9014834960fcf13e0be14aec0bf58e61c068c.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_utilities.cuh"
#include <iostream>
__host__
void check_cuda_error( const std::string& message, const hipError_t err ) {
if( err != hipSuccess ) {
std::cout << message << " [" << err << "] " << std::endl;
std::cout << hipGetErrorString( err ) << std::endl;
exit( -1 );
}
}
__device__
uint8_t atomicIncUint8( uint8_t* address ) {
// Obtain the address of the base integer in which our char is stored
unsigned int *base_address = (unsigned int *)((size_t)address & ~3);
unsigned int old, assumed, new_;
// Get the current value
old = *base_address;
// Now loop until success
do {
//When i do atomic CAS later, I expect to see 'assumed'
assumed = old;
// Now extract the uint8 that I'm interested in
// Endianess is little so
unsigned int masks[] = { 0xFF000000, 0x00FF0000, 0x0000FF00, 0x000000FF};
unsigned int shifts[] = { 24, 16, 8, 0 };
int byte_index = 3 - ((size_t)address & 3);
unsigned int mask = masks[byte_index];
unsigned int shift = shifts[byte_index];
uint8_t old_uint8 = ( old & mask ) >> shift;
uint8_t new_uint8 = old_uint8 + 1;
uint new_int = (new_uint8 << shift) & mask;
new_ = old & ( ~mask );
new_ = new_ | new_int;
old = atomicCAS(base_address, assumed, new_);
} while (assumed != old);
return old;
}
void cudaSafeAlloc( void ** ptr, size_t sz, const std::string& purpose ) {
hipError_t err = hipMalloc( ptr, sz );
check_cuda_error( "Failed to allocate device memory for " + purpose , err);
}
void cudaSafeFree( void * ptr, const std::string& purpose ) {
hipError_t err = hipFree( ptr );
check_cuda_error( "Failed to free device memory for " + purpose , err);
} | 47b9014834960fcf13e0be14aec0bf58e61c068c.cu | #include "cuda_utilities.cuh"
#include <iostream>
__host__
void check_cuda_error( const std::string& message, const cudaError_t err ) {
if( err != cudaSuccess ) {
std::cout << message << " [" << err << "] " << std::endl;
std::cout << cudaGetErrorString( err ) << std::endl;
exit( -1 );
}
}
__device__
uint8_t atomicIncUint8( uint8_t* address ) {
// Obtain the address of the base integer in which our char is stored
unsigned int *base_address = (unsigned int *)((size_t)address & ~3);
unsigned int old, assumed, new_;
// Get the current value
old = *base_address;
// Now loop until success
do {
//When i do atomic CAS later, I expect to see 'assumed'
assumed = old;
// Now extract the uint8 that I'm interested in
// Endianess is little so
unsigned int masks[] = { 0xFF000000, 0x00FF0000, 0x0000FF00, 0x000000FF};
unsigned int shifts[] = { 24, 16, 8, 0 };
int byte_index = 3 - ((size_t)address & 3);
unsigned int mask = masks[byte_index];
unsigned int shift = shifts[byte_index];
uint8_t old_uint8 = ( old & mask ) >> shift;
uint8_t new_uint8 = old_uint8 + 1;
uint new_int = (new_uint8 << shift) & mask;
new_ = old & ( ~mask );
new_ = new_ | new_int;
old = atomicCAS(base_address, assumed, new_);
} while (assumed != old);
return old;
}
void cudaSafeAlloc( void ** ptr, size_t sz, const std::string& purpose ) {
cudaError_t err = cudaMalloc( ptr, sz );
check_cuda_error( "Failed to allocate device memory for " + purpose , err);
}
void cudaSafeFree( void * ptr, const std::string& purpose ) {
cudaError_t err = cudaFree( ptr );
check_cuda_error( "Failed to free device memory for " + purpose , err);
} |
0a057e0b4952fc6d8f27c3349e6ab8cc2d061434.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#####################################################################
// Copyright (c) 2017, Haixiang Liu
// Distributed under the FreeBSD license (see license.txt)
//#####################################################################
#include <iostream>
#include <thrust/device_vector.h>
#include "Linear_Elasticity_CUDA_Optimized.h"
using namespace SPGrid;
#define THREADBLOCK 512
__constant__ float K_mu_device[576];
__constant__ float K_la_device[576];
//template <typename T,typename T_offset_ptr>
struct Parameters{
float* f[3];
const float* u[3];
const float* mu;
const float* lambda;
const unsigned int* b;
unsigned int number_of_blocks;
float dx;
};
// TODO: Remove the explicit template parameters on this one.
__constant__ char p_device[sizeof(Parameters)];
bool symbol_initialized = false;
// Index here: f_i^v = Sum_{jw}(K[i][j][v][w])
const float __attribute__ ((aligned(32))) K_mu[8][8][3][3] =
{
{
{
{ 32.f/72.f, 6.f/72.f, 6.f/72.f},
{ 6.f/72.f, 32.f/72.f, 6.f/72.f},
{ 6.f/72.f, 6.f/72.f, 32.f/72.f}
},
{
{ 4.f/72.f, 3.f/72.f, 6.f/72.f},
{ 3.f/72.f, 4.f/72.f, 6.f/72.f},
{ -6.f/72.f, -6.f/72.f, -8.f/72.f}
},
{
{ 4.f/72.f, 6.f/72.f, 3.f/72.f},
{ -6.f/72.f, -8.f/72.f, -6.f/72.f},
{ 3.f/72.f, 6.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, 3.f/72.f, 3.f/72.f},
{ -3.f/72.f, -10.f/72.f, -6.f/72.f},
{ -3.f/72.f, -6.f/72.f, -10.f/72.f}
},
{
{ -8.f/72.f, -6.f/72.f, -6.f/72.f},
{ 6.f/72.f, 4.f/72.f, 3.f/72.f},
{ 6.f/72.f, 3.f/72.f, 4.f/72.f}
},
{
{ -10.f/72.f, -3.f/72.f, -6.f/72.f},
{ 3.f/72.f, -4.f/72.f, 3.f/72.f},
{ -6.f/72.f, -3.f/72.f, -10.f/72.f}
},
{
{ -10.f/72.f, -6.f/72.f, -3.f/72.f},
{ -6.f/72.f, -10.f/72.f, -3.f/72.f},
{ 3.f/72.f, 3.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, -3.f/72.f, -3.f/72.f},
{ -3.f/72.f, -8.f/72.f, -3.f/72.f},
{ -3.f/72.f, -3.f/72.f, -8.f/72.f}
}
},
{
{
{ 4.f/72.f, 3.f/72.f, -6.f/72.f},
{ 3.f/72.f, 4.f/72.f, -6.f/72.f},
{ 6.f/72.f, 6.f/72.f, -8.f/72.f}
},
{
{ 32.f/72.f, 6.f/72.f, -6.f/72.f},
{ 6.f/72.f, 32.f/72.f, -6.f/72.f},
{ -6.f/72.f, -6.f/72.f, 32.f/72.f}
},
{
{ -4.f/72.f, 3.f/72.f, -3.f/72.f},
{ -3.f/72.f, -10.f/72.f, 6.f/72.f},
{ 3.f/72.f, 6.f/72.f, -10.f/72.f}
},
{
{ 4.f/72.f, 6.f/72.f, -3.f/72.f},
{ -6.f/72.f, -8.f/72.f, 6.f/72.f},
{ -3.f/72.f, -6.f/72.f, 4.f/72.f}
},
{
{ -10.f/72.f, -3.f/72.f, 6.f/72.f},
{ 3.f/72.f, -4.f/72.f, -3.f/72.f},
{ 6.f/72.f, 3.f/72.f, -10.f/72.f}
},
{
{ -8.f/72.f, -6.f/72.f, 6.f/72.f},
{ 6.f/72.f, 4.f/72.f, -3.f/72.f},
{ -6.f/72.f, -3.f/72.f, 4.f/72.f}
},
{
{ -8.f/72.f, -3.f/72.f, 3.f/72.f},
{ -3.f/72.f, -8.f/72.f, 3.f/72.f},
{ 3.f/72.f, 3.f/72.f, -8.f/72.f}
},
{
{ -10.f/72.f, -6.f/72.f, 3.f/72.f},
{ -6.f/72.f, -10.f/72.f, 3.f/72.f},
{ -3.f/72.f, -3.f/72.f, -4.f/72.f}
}
},
{
{
{ 4.f/72.f, -6.f/72.f, 3.f/72.f},
{ 6.f/72.f, -8.f/72.f, 6.f/72.f},
{ 3.f/72.f, -6.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, -3.f/72.f, 3.f/72.f},
{ 3.f/72.f, -10.f/72.f, 6.f/72.f},
{ -3.f/72.f, 6.f/72.f, -10.f/72.f}
},
{
{ 32.f/72.f, -6.f/72.f, 6.f/72.f},
{ -6.f/72.f, 32.f/72.f, -6.f/72.f},
{ 6.f/72.f, -6.f/72.f, 32.f/72.f}
},
{
{ 4.f/72.f, -3.f/72.f, 6.f/72.f},
{ -3.f/72.f, 4.f/72.f, -6.f/72.f},
{ -6.f/72.f, 6.f/72.f, -8.f/72.f}
},
{
{ -10.f/72.f, 6.f/72.f, -3.f/72.f},
{ 6.f/72.f, -10.f/72.f, 3.f/72.f},
{ 3.f/72.f, -3.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, 3.f/72.f, -3.f/72.f},
{ 3.f/72.f, -8.f/72.f, 3.f/72.f},
{ -3.f/72.f, 3.f/72.f, -8.f/72.f}
},
{
{ -8.f/72.f, 6.f/72.f, -6.f/72.f},
{ -6.f/72.f, 4.f/72.f, -3.f/72.f},
{ 6.f/72.f, -3.f/72.f, 4.f/72.f}
},
{
{ -10.f/72.f, 3.f/72.f, -6.f/72.f},
{ -3.f/72.f, -4.f/72.f, -3.f/72.f},
{ -6.f/72.f, 3.f/72.f, -10.f/72.f}
}
},
{
{
{ -4.f/72.f, -3.f/72.f, -3.f/72.f},
{ 3.f/72.f, -10.f/72.f, -6.f/72.f},
{ 3.f/72.f, -6.f/72.f, -10.f/72.f}
},
{
{ 4.f/72.f, -6.f/72.f, -3.f/72.f},
{ 6.f/72.f, -8.f/72.f, -6.f/72.f},
{ -3.f/72.f, 6.f/72.f, 4.f/72.f}
},
{
{ 4.f/72.f, -3.f/72.f, -6.f/72.f},
{ -3.f/72.f, 4.f/72.f, 6.f/72.f},
{ 6.f/72.f, -6.f/72.f, -8.f/72.f}
},
{
{ 32.f/72.f, -6.f/72.f, -6.f/72.f},
{ -6.f/72.f, 32.f/72.f, 6.f/72.f},
{ -6.f/72.f, 6.f/72.f, 32.f/72.f}
},
{
{ -8.f/72.f, 3.f/72.f, 3.f/72.f},
{ 3.f/72.f, -8.f/72.f, -3.f/72.f},
{ 3.f/72.f, -3.f/72.f, -8.f/72.f}
},
{
{ -10.f/72.f, 6.f/72.f, 3.f/72.f},
{ 6.f/72.f, -10.f/72.f, -3.f/72.f},
{ -3.f/72.f, 3.f/72.f, -4.f/72.f}
},
{
{ -10.f/72.f, 3.f/72.f, 6.f/72.f},
{ -3.f/72.f, -4.f/72.f, 3.f/72.f},
{ 6.f/72.f, -3.f/72.f, -10.f/72.f}
},
{
{ -8.f/72.f, 6.f/72.f, 6.f/72.f},
{ -6.f/72.f, 4.f/72.f, 3.f/72.f},
{ -6.f/72.f, 3.f/72.f, 4.f/72.f}
}
},
{
{
{ -8.f/72.f, 6.f/72.f, 6.f/72.f},
{ -6.f/72.f, 4.f/72.f, 3.f/72.f},
{ -6.f/72.f, 3.f/72.f, 4.f/72.f}
},
{
{ -10.f/72.f, 3.f/72.f, 6.f/72.f},
{ -3.f/72.f, -4.f/72.f, 3.f/72.f},
{ 6.f/72.f, -3.f/72.f, -10.f/72.f}
},
{
{ -10.f/72.f, 6.f/72.f, 3.f/72.f},
{ 6.f/72.f, -10.f/72.f, -3.f/72.f},
{ -3.f/72.f, 3.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, 3.f/72.f, 3.f/72.f},
{ 3.f/72.f, -8.f/72.f, -3.f/72.f},
{ 3.f/72.f, -3.f/72.f, -8.f/72.f}
},
{
{ 32.f/72.f, -6.f/72.f, -6.f/72.f},
{ -6.f/72.f, 32.f/72.f, 6.f/72.f},
{ -6.f/72.f, 6.f/72.f, 32.f/72.f}
},
{
{ 4.f/72.f, -3.f/72.f, -6.f/72.f},
{ -3.f/72.f, 4.f/72.f, 6.f/72.f},
{ 6.f/72.f, -6.f/72.f, -8.f/72.f}
},
{
{ 4.f/72.f, -6.f/72.f, -3.f/72.f},
{ 6.f/72.f, -8.f/72.f, -6.f/72.f},
{ -3.f/72.f, 6.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, -3.f/72.f, -3.f/72.f},
{ 3.f/72.f, -10.f/72.f, -6.f/72.f},
{ 3.f/72.f, -6.f/72.f, -10.f/72.f}
}
},
{
{
{ -10.f/72.f, 3.f/72.f, -6.f/72.f},
{ -3.f/72.f, -4.f/72.f, -3.f/72.f},
{ -6.f/72.f, 3.f/72.f, -10.f/72.f}
},
{
{ -8.f/72.f, 6.f/72.f, -6.f/72.f},
{ -6.f/72.f, 4.f/72.f, -3.f/72.f},
{ 6.f/72.f, -3.f/72.f, 4.f/72.f}
},
{
{ -8.f/72.f, 3.f/72.f, -3.f/72.f},
{ 3.f/72.f, -8.f/72.f, 3.f/72.f},
{ -3.f/72.f, 3.f/72.f, -8.f/72.f}
},
{
{ -10.f/72.f, 6.f/72.f, -3.f/72.f},
{ 6.f/72.f, -10.f/72.f, 3.f/72.f},
{ 3.f/72.f, -3.f/72.f, -4.f/72.f}
},
{
{ 4.f/72.f, -3.f/72.f, 6.f/72.f},
{ -3.f/72.f, 4.f/72.f, -6.f/72.f},
{ -6.f/72.f, 6.f/72.f, -8.f/72.f}
},
{
{ 32.f/72.f, -6.f/72.f, 6.f/72.f},
{ -6.f/72.f, 32.f/72.f, -6.f/72.f},
{ 6.f/72.f, -6.f/72.f, 32.f/72.f}
},
{
{ -4.f/72.f, -3.f/72.f, 3.f/72.f},
{ 3.f/72.f, -10.f/72.f, 6.f/72.f},
{ -3.f/72.f, 6.f/72.f, -10.f/72.f}
},
{
{ 4.f/72.f, -6.f/72.f, 3.f/72.f},
{ 6.f/72.f, -8.f/72.f, 6.f/72.f},
{ 3.f/72.f, -6.f/72.f, 4.f/72.f}
}
},
{
{
{ -10.f/72.f, -6.f/72.f, 3.f/72.f},
{ -6.f/72.f, -10.f/72.f, 3.f/72.f},
{ -3.f/72.f, -3.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, -3.f/72.f, 3.f/72.f},
{ -3.f/72.f, -8.f/72.f, 3.f/72.f},
{ 3.f/72.f, 3.f/72.f, -8.f/72.f}
},
{
{ -8.f/72.f, -6.f/72.f, 6.f/72.f},
{ 6.f/72.f, 4.f/72.f, -3.f/72.f},
{ -6.f/72.f, -3.f/72.f, 4.f/72.f}
},
{
{ -10.f/72.f, -3.f/72.f, 6.f/72.f},
{ 3.f/72.f, -4.f/72.f, -3.f/72.f},
{ 6.f/72.f, 3.f/72.f, -10.f/72.f}
},
{
{ 4.f/72.f, 6.f/72.f, -3.f/72.f},
{ -6.f/72.f, -8.f/72.f, 6.f/72.f},
{ -3.f/72.f, -6.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, 3.f/72.f, -3.f/72.f},
{ -3.f/72.f, -10.f/72.f, 6.f/72.f},
{ 3.f/72.f, 6.f/72.f, -10.f/72.f}
},
{
{ 32.f/72.f, 6.f/72.f, -6.f/72.f},
{ 6.f/72.f, 32.f/72.f, -6.f/72.f},
{ -6.f/72.f, -6.f/72.f, 32.f/72.f}
},
{
{ 4.f/72.f, 3.f/72.f, -6.f/72.f},
{ 3.f/72.f, 4.f/72.f, -6.f/72.f},
{ 6.f/72.f, 6.f/72.f, -8.f/72.f}
}
},
{
{
{ -8.f/72.f, -3.f/72.f, -3.f/72.f},
{ -3.f/72.f, -8.f/72.f, -3.f/72.f},
{ -3.f/72.f, -3.f/72.f, -8.f/72.f}
},
{
{ -10.f/72.f, -6.f/72.f, -3.f/72.f},
{ -6.f/72.f, -10.f/72.f, -3.f/72.f},
{ 3.f/72.f, 3.f/72.f, -4.f/72.f}
},
{
{ -10.f/72.f, -3.f/72.f, -6.f/72.f},
{ 3.f/72.f, -4.f/72.f, 3.f/72.f},
{ -6.f/72.f, -3.f/72.f, -10.f/72.f}
},
{
{ -8.f/72.f, -6.f/72.f, -6.f/72.f},
{ 6.f/72.f, 4.f/72.f, 3.f/72.f},
{ 6.f/72.f, 3.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, 3.f/72.f, 3.f/72.f},
{ -3.f/72.f, -10.f/72.f, -6.f/72.f},
{ -3.f/72.f, -6.f/72.f, -10.f/72.f}
},
{
{ 4.f/72.f, 6.f/72.f, 3.f/72.f},
{ -6.f/72.f, -8.f/72.f, -6.f/72.f},
{ 3.f/72.f, 6.f/72.f, 4.f/72.f}
},
{
{ 4.f/72.f, 3.f/72.f, 6.f/72.f},
{ 3.f/72.f, 4.f/72.f, 6.f/72.f},
{ -6.f/72.f, -6.f/72.f, -8.f/72.f}
},
{
{ 32.f/72.f, 6.f/72.f, 6.f/72.f},
{ 6.f/72.f, 32.f/72.f, 6.f/72.f},
{ 6.f/72.f, 6.f/72.f, 32.f/72.f}
}
}
};
const float __attribute__ ((aligned(32))) K_lambda[8][8][3][3] =
{
{
{
{ 8.f/72.f, 6.f/72.f, 6.f/72.f},
{ 6.f/72.f, 8.f/72.f, 6.f/72.f},
{ 6.f/72.f, 6.f/72.f, 8.f/72.f}
},
{
{ 4.f/72.f, 3.f/72.f, -6.f/72.f},
{ 3.f/72.f, 4.f/72.f, -6.f/72.f},
{ 6.f/72.f, 6.f/72.f, -8.f/72.f}
},
{
{ 4.f/72.f, -6.f/72.f, 3.f/72.f},
{ 6.f/72.f, -8.f/72.f, 6.f/72.f},
{ 3.f/72.f, -6.f/72.f, 4.f/72.f}
},
{
{ 2.f/72.f, -3.f/72.f, -3.f/72.f},
{ 3.f/72.f, -4.f/72.f, -6.f/72.f},
{ 3.f/72.f, -6.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, 6.f/72.f, 6.f/72.f},
{ -6.f/72.f, 4.f/72.f, 3.f/72.f},
{ -6.f/72.f, 3.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, 3.f/72.f, -6.f/72.f},
{ -3.f/72.f, 2.f/72.f, -3.f/72.f},
{ -6.f/72.f, 3.f/72.f, -4.f/72.f}
},
{
{ -4.f/72.f, -6.f/72.f, 3.f/72.f},
{ -6.f/72.f, -4.f/72.f, 3.f/72.f},
{ -3.f/72.f, -3.f/72.f, 2.f/72.f}
},
{
{ -2.f/72.f, -3.f/72.f, -3.f/72.f},
{ -3.f/72.f, -2.f/72.f, -3.f/72.f},
{ -3.f/72.f, -3.f/72.f, -2.f/72.f}
}
},
{
{
{ 4.f/72.f, 3.f/72.f, 6.f/72.f},
{ 3.f/72.f, 4.f/72.f, 6.f/72.f},
{ -6.f/72.f, -6.f/72.f, -8.f/72.f}
},
{
{ 8.f/72.f, 6.f/72.f, -6.f/72.f},
{ 6.f/72.f, 8.f/72.f, -6.f/72.f},
{ -6.f/72.f, -6.f/72.f, 8.f/72.f}
},
{
{ 2.f/72.f, -3.f/72.f, 3.f/72.f},
{ 3.f/72.f, -4.f/72.f, 6.f/72.f},
{ -3.f/72.f, 6.f/72.f, -4.f/72.f}
},
{
{ 4.f/72.f, -6.f/72.f, -3.f/72.f},
{ 6.f/72.f, -8.f/72.f, -6.f/72.f},
{ -3.f/72.f, 6.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, 3.f/72.f, 6.f/72.f},
{ -3.f/72.f, 2.f/72.f, 3.f/72.f},
{ 6.f/72.f, -3.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, 6.f/72.f, -6.f/72.f},
{ -6.f/72.f, 4.f/72.f, -3.f/72.f},
{ 6.f/72.f, -3.f/72.f, 4.f/72.f}
},
{
{ -2.f/72.f, -3.f/72.f, 3.f/72.f},
{ -3.f/72.f, -2.f/72.f, 3.f/72.f},
{ 3.f/72.f, 3.f/72.f, -2.f/72.f}
},
{
{ -4.f/72.f, -6.f/72.f, -3.f/72.f},
{ -6.f/72.f, -4.f/72.f, -3.f/72.f},
{ 3.f/72.f, 3.f/72.f, 2.f/72.f}
}
},
{
{
{ 4.f/72.f, 6.f/72.f, 3.f/72.f},
{ -6.f/72.f, -8.f/72.f, -6.f/72.f},
{ 3.f/72.f, 6.f/72.f, 4.f/72.f}
},
{
{ 2.f/72.f, 3.f/72.f, -3.f/72.f},
{ -3.f/72.f, -4.f/72.f, 6.f/72.f},
{ 3.f/72.f, 6.f/72.f, -4.f/72.f}
},
{
{ 8.f/72.f, -6.f/72.f, 6.f/72.f},
{ -6.f/72.f, 8.f/72.f, -6.f/72.f},
{ 6.f/72.f, -6.f/72.f, 8.f/72.f}
},
{
{ 4.f/72.f, -3.f/72.f, -6.f/72.f},
{ -3.f/72.f, 4.f/72.f, 6.f/72.f},
{ 6.f/72.f, -6.f/72.f, -8.f/72.f}
},
{
{ -4.f/72.f, 6.f/72.f, 3.f/72.f},
{ 6.f/72.f, -4.f/72.f, -3.f/72.f},
{ -3.f/72.f, 3.f/72.f, 2.f/72.f}
},
{
{ -2.f/72.f, 3.f/72.f, -3.f/72.f},
{ 3.f/72.f, -2.f/72.f, 3.f/72.f},
{ -3.f/72.f, 3.f/72.f, -2.f/72.f}
},
{
{ -8.f/72.f, -6.f/72.f, 6.f/72.f},
{ 6.f/72.f, 4.f/72.f, -3.f/72.f},
{ -6.f/72.f, -3.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, -3.f/72.f, -6.f/72.f},
{ 3.f/72.f, 2.f/72.f, 3.f/72.f},
{ -6.f/72.f, -3.f/72.f, -4.f/72.f}
}
},
{
{
{ 2.f/72.f, 3.f/72.f, 3.f/72.f},
{ -3.f/72.f, -4.f/72.f, -6.f/72.f},
{ -3.f/72.f, -6.f/72.f, -4.f/72.f}
},
{
{ 4.f/72.f, 6.f/72.f, -3.f/72.f},
{ -6.f/72.f, -8.f/72.f, 6.f/72.f},
{ -3.f/72.f, -6.f/72.f, 4.f/72.f}
},
{
{ 4.f/72.f, -3.f/72.f, 6.f/72.f},
{ -3.f/72.f, 4.f/72.f, -6.f/72.f},
{ -6.f/72.f, 6.f/72.f, -8.f/72.f}
},
{
{ 8.f/72.f, -6.f/72.f, -6.f/72.f},
{ -6.f/72.f, 8.f/72.f, 6.f/72.f},
{ -6.f/72.f, 6.f/72.f, 8.f/72.f}
},
{
{ -2.f/72.f, 3.f/72.f, 3.f/72.f},
{ 3.f/72.f, -2.f/72.f, -3.f/72.f},
{ 3.f/72.f, -3.f/72.f, -2.f/72.f}
},
{
{ -4.f/72.f, 6.f/72.f, -3.f/72.f},
{ 6.f/72.f, -4.f/72.f, 3.f/72.f},
{ 3.f/72.f, -3.f/72.f, 2.f/72.f}
},
{
{ -4.f/72.f, -3.f/72.f, 6.f/72.f},
{ 3.f/72.f, 2.f/72.f, -3.f/72.f},
{ 6.f/72.f, 3.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, -6.f/72.f, -6.f/72.f},
{ 6.f/72.f, 4.f/72.f, 3.f/72.f},
{ 6.f/72.f, 3.f/72.f, 4.f/72.f}
}
},
{
{
{ -8.f/72.f, -6.f/72.f, -6.f/72.f},
{ 6.f/72.f, 4.f/72.f, 3.f/72.f},
{ 6.f/72.f, 3.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, -3.f/72.f, 6.f/72.f},
{ 3.f/72.f, 2.f/72.f, -3.f/72.f},
{ 6.f/72.f, 3.f/72.f, -4.f/72.f}
},
{
{ -4.f/72.f, 6.f/72.f, -3.f/72.f},
{ 6.f/72.f, -4.f/72.f, 3.f/72.f},
{ 3.f/72.f, -3.f/72.f, 2.f/72.f}
},
{
{ -2.f/72.f, 3.f/72.f, 3.f/72.f},
{ 3.f/72.f, -2.f/72.f, -3.f/72.f},
{ 3.f/72.f, -3.f/72.f, -2.f/72.f}
},
{
{ 8.f/72.f, -6.f/72.f, -6.f/72.f},
{ -6.f/72.f, 8.f/72.f, 6.f/72.f},
{ -6.f/72.f, 6.f/72.f, 8.f/72.f}
},
{
{ 4.f/72.f, -3.f/72.f, 6.f/72.f},
{ -3.f/72.f, 4.f/72.f, -6.f/72.f},
{ -6.f/72.f, 6.f/72.f, -8.f/72.f}
},
{
{ 4.f/72.f, 6.f/72.f, -3.f/72.f},
{ -6.f/72.f, -8.f/72.f, 6.f/72.f},
{ -3.f/72.f, -6.f/72.f, 4.f/72.f}
},
{
{ 2.f/72.f, 3.f/72.f, 3.f/72.f},
{ -3.f/72.f, -4.f/72.f, -6.f/72.f},
{ -3.f/72.f, -6.f/72.f, -4.f/72.f}
}
},
{
{
{ -4.f/72.f, -3.f/72.f, -6.f/72.f},
{ 3.f/72.f, 2.f/72.f, 3.f/72.f},
{ -6.f/72.f, -3.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, -6.f/72.f, 6.f/72.f},
{ 6.f/72.f, 4.f/72.f, -3.f/72.f},
{ -6.f/72.f, -3.f/72.f, 4.f/72.f}
},
{
{ -2.f/72.f, 3.f/72.f, -3.f/72.f},
{ 3.f/72.f, -2.f/72.f, 3.f/72.f},
{ -3.f/72.f, 3.f/72.f, -2.f/72.f}
},
{
{ -4.f/72.f, 6.f/72.f, 3.f/72.f},
{ 6.f/72.f, -4.f/72.f, -3.f/72.f},
{ -3.f/72.f, 3.f/72.f, 2.f/72.f}
},
{
{ 4.f/72.f, -3.f/72.f, -6.f/72.f},
{ -3.f/72.f, 4.f/72.f, 6.f/72.f},
{ 6.f/72.f, -6.f/72.f, -8.f/72.f}
},
{
{ 8.f/72.f, -6.f/72.f, 6.f/72.f},
{ -6.f/72.f, 8.f/72.f, -6.f/72.f},
{ 6.f/72.f, -6.f/72.f, 8.f/72.f}
},
{
{ 2.f/72.f, 3.f/72.f, -3.f/72.f},
{ -3.f/72.f, -4.f/72.f, 6.f/72.f},
{ 3.f/72.f, 6.f/72.f, -4.f/72.f}
},
{
{ 4.f/72.f, 6.f/72.f, 3.f/72.f},
{ -6.f/72.f, -8.f/72.f, -6.f/72.f},
{ 3.f/72.f, 6.f/72.f, 4.f/72.f}
}
},
{
{
{ -4.f/72.f, -6.f/72.f, -3.f/72.f},
{ -6.f/72.f, -4.f/72.f, -3.f/72.f},
{ 3.f/72.f, 3.f/72.f, 2.f/72.f}
},
{
{ -2.f/72.f, -3.f/72.f, 3.f/72.f},
{ -3.f/72.f, -2.f/72.f, 3.f/72.f},
{ 3.f/72.f, 3.f/72.f, -2.f/72.f}
},
{
{ -8.f/72.f, 6.f/72.f, -6.f/72.f},
{ -6.f/72.f, 4.f/72.f, -3.f/72.f},
{ 6.f/72.f, -3.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, 3.f/72.f, 6.f/72.f},
{ -3.f/72.f, 2.f/72.f, 3.f/72.f},
{ 6.f/72.f, -3.f/72.f, -4.f/72.f}
},
{
{ 4.f/72.f, -6.f/72.f, -3.f/72.f},
{ 6.f/72.f, -8.f/72.f, -6.f/72.f},
{ -3.f/72.f, 6.f/72.f, 4.f/72.f}
},
{
{ 2.f/72.f, -3.f/72.f, 3.f/72.f},
{ 3.f/72.f, -4.f/72.f, 6.f/72.f},
{ -3.f/72.f, 6.f/72.f, -4.f/72.f}
},
{
{ 8.f/72.f, 6.f/72.f, -6.f/72.f},
{ 6.f/72.f, 8.f/72.f, -6.f/72.f},
{ -6.f/72.f, -6.f/72.f, 8.f/72.f}
},
{
{ 4.f/72.f, 3.f/72.f, 6.f/72.f},
{ 3.f/72.f, 4.f/72.f, 6.f/72.f},
{ -6.f/72.f, -6.f/72.f, -8.f/72.f}
}
},
{
{
{ -2.f/72.f, -3.f/72.f, -3.f/72.f},
{ -3.f/72.f, -2.f/72.f, -3.f/72.f},
{ -3.f/72.f, -3.f/72.f, -2.f/72.f}
},
{
{ -4.f/72.f, -6.f/72.f, 3.f/72.f},
{ -6.f/72.f, -4.f/72.f, 3.f/72.f},
{ -3.f/72.f, -3.f/72.f, 2.f/72.f}
},
{
{ -4.f/72.f, 3.f/72.f, -6.f/72.f},
{ -3.f/72.f, 2.f/72.f, -3.f/72.f},
{ -6.f/72.f, 3.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, 6.f/72.f, 6.f/72.f},
{ -6.f/72.f, 4.f/72.f, 3.f/72.f},
{ -6.f/72.f, 3.f/72.f, 4.f/72.f}
},
{
{ 2.f/72.f, -3.f/72.f, -3.f/72.f},
{ 3.f/72.f, -4.f/72.f, -6.f/72.f},
{ 3.f/72.f, -6.f/72.f, -4.f/72.f}
},
{
{ 4.f/72.f, -6.f/72.f, 3.f/72.f},
{ 6.f/72.f, -8.f/72.f, 6.f/72.f},
{ 3.f/72.f, -6.f/72.f, 4.f/72.f}
},
{
{ 4.f/72.f, 3.f/72.f, -6.f/72.f},
{ 3.f/72.f, 4.f/72.f, -6.f/72.f},
{ 6.f/72.f, 6.f/72.f, -8.f/72.f}
},
{
{ 8.f/72.f, 6.f/72.f, 6.f/72.f},
{ 6.f/72.f, 8.f/72.f, 6.f/72.f},
{ 6.f/72.f, 6.f/72.f, 8.f/72.f}
}
}
};
template<class T,class T_offset_ptr,int block_xsize,int block_ysize,int block_zsize>
__device__ T Lookup_Value_3D(const T* a,const T_offset_ptr b[27],int x,int y,int z)
{
int block_id = 13;
if(z < 0) {block_id -= 1;z += block_zsize;}
if(z >= block_zsize) {block_id += 1;z -= block_zsize;}
if(y < 0) {block_id -= 3;y += block_ysize;}
if(y >= block_ysize) {block_id += 3;y -= block_ysize;}
if(x < 0) {block_id -= 9;x += block_xsize;}
if(x >= block_xsize) {block_id += 9;x -= block_xsize;}
enum{xstride = block_ysize * block_zsize,
ystride = block_zsize,
zstride = 1};
const int entry = z + y * ystride + x * xstride;
return reinterpret_cast<T*>((unsigned long)a + (unsigned long)b[block_id])[entry];
}
template<class T,class T_offset_ptr,int block_xsize,int block_ysize,int block_zsize>
__device__ void Lookup_Two_Values_3D(T& a_out,T& b_out, const T* a_array, const T* b_array, const T_offset_ptr b[27], int x, int y, int z)
{
int block_id = 13;
if(z < 0) {block_id -= 1;z += block_zsize;}
if(z >= block_zsize) {block_id += 1;z -= block_zsize;}
if(y < 0) {block_id -= 3;y += block_ysize;}
if(y >= block_ysize) {block_id += 3;y -= block_ysize;}
if(x < 0) {block_id -= 9;x += block_xsize;}
if(x >= block_xsize) {block_id += 9;x -= block_xsize;}
enum{xstride = block_ysize * block_zsize,
ystride = block_zsize,
zstride = 1};
const int entry = z + y * ystride + x * xstride;
a_out = reinterpret_cast<T*>((unsigned long)a_array + (unsigned long)b[block_id])[entry];
b_out = reinterpret_cast<T*>((unsigned long)b_array + (unsigned long)b[block_id])[entry];
}
template<typename T,class T_offset_ptr,int block_xsize,int block_ysize,int block_zsize>
__device__ void Lookup_Vector_3D(T out[3],const T* a[3],const T_offset_ptr b[27],int x,int y,int z)
{
int block_id = 13;
if(z < 0) {block_id -= 1;z += block_zsize;}
if(z >= block_zsize) {block_id += 1;z -= block_zsize;}
if(y < 0) {block_id -= 3;y += block_ysize;}
if(y >= block_ysize) {block_id += 3;y -= block_ysize;}
if(x < 0) {block_id -= 9;x += block_xsize;}
if(x >= block_xsize) {block_id += 9;x -= block_xsize;}
enum{xstride = block_ysize * block_zsize,
ystride = block_zsize,
zstride = 1};
const int entry = z + y * ystride + x * xstride;
out[0] = reinterpret_cast<T*>((unsigned long)a[0] + (unsigned long)b[block_id])[entry];
out[1] = reinterpret_cast<T*>((unsigned long)a[1] + (unsigned long)b[block_id])[entry];
out[2] = reinterpret_cast<T*>((unsigned long)a[2] + (unsigned long)b[block_id])[entry];
}
template<class T,class T_offset_ptr,int block_xsize,int block_ysize,int block_zsize>
__global__ void Linear_Elasticity_Kernel_3D()
{
enum{d = 3};
enum{nodes_per_cell = 1 << d};
//this kernel assumes we have more threads per block than entry per block
enum{DATABLOCK = block_xsize * block_ysize * block_zsize,
span = THREADBLOCK / DATABLOCK};
enum{xstride = block_ysize * block_zsize,
ystride = block_zsize,
zstride = 1};
enum{padded_node_xsize = block_xsize + 2,
padded_node_ysize = block_ysize + 2,
padded_node_zsize = block_zsize + 2,
padded_node_total_size = padded_node_xsize * padded_node_ysize * padded_node_zsize,
padded_cell_xsize = block_xsize + 1,
padded_cell_ysize = block_ysize + 1,
padded_cell_zsize = block_zsize + 1,
padded_cell_total_size = padded_cell_xsize * padded_cell_ysize * padded_cell_zsize};
Parameters& para = *reinterpret_cast<Parameters*>(p_device);
using T_BLOCK = const T (&)[block_xsize][block_ysize][block_zsize];
static_assert(span == 1,"Only span of 1 is supported");
static_assert(THREADBLOCK * 2 >= padded_node_total_size, "THREADBLOCK is too small!!");
static_assert(THREADBLOCK * 2 >= padded_cell_total_size, "THREADBLOCK is too small!!");
const unsigned int entry = threadIdx.x;
const int z = entry % block_zsize;
const int y = entry / block_zsize % block_ysize;
const int x = entry / block_zsize / block_ysize;
const int cell_z1 = (threadIdx.x % padded_cell_zsize);
const int cell_y1 = (threadIdx.x / padded_cell_zsize % padded_cell_ysize);
const int cell_x1 = (threadIdx.x / padded_cell_zsize / padded_cell_ysize);
const int cell_z2 = ((threadIdx.x + THREADBLOCK) % padded_cell_zsize);
const int cell_y2 = ((threadIdx.x + THREADBLOCK) / padded_cell_zsize % padded_cell_ysize);
const int cell_x2 = ((threadIdx.x + THREADBLOCK) / padded_cell_zsize / padded_cell_ysize);
const int node_z1 = (threadIdx.x % padded_node_zsize);
const int node_y1 = (threadIdx.x / padded_node_zsize % padded_node_ysize);
const int node_x1 = (threadIdx.x / padded_node_zsize / padded_node_ysize);
const int node_z2 = (threadIdx.x + THREADBLOCK) % padded_node_zsize;
const int node_y2 = (threadIdx.x + THREADBLOCK) / padded_node_zsize % padded_node_ysize;
const int node_x2 = (threadIdx.x + THREADBLOCK) / padded_node_zsize / padded_node_ysize;
__shared__ T_offset_ptr block_index[27];
if(threadIdx.x < 27)
block_index[threadIdx.x] = para.b[blockIdx.x * 27 + threadIdx.x];
using T_K = const T (&)[2][2][2][2][2][2][d][d];
T_K K_mu_converted = reinterpret_cast<T_K>(K_mu_device[0]);
T_K K_la_converted = reinterpret_cast<T_K>(K_la_device[0]);
if(blockIdx.x < para.number_of_blocks){
__syncthreads();
__shared__ T u_local[padded_node_xsize][padded_node_ysize][padded_node_zsize][d];
__shared__ T mu_local[padded_cell_xsize][padded_cell_ysize][padded_cell_zsize];
__shared__ T la_local[padded_cell_xsize][padded_cell_ysize][padded_cell_zsize];
Lookup_Two_Values_3D<T,T_offset_ptr,block_xsize,block_ysize,block_zsize>(mu_local[cell_x1][cell_y1][cell_z1],
la_local[cell_x1][cell_y1][cell_z1],
para.mu, para.lambda, block_index,
cell_x1 - 1, cell_y1 - 1, cell_z1 - 1);
if((threadIdx.x + THREADBLOCK) < padded_cell_total_size)
Lookup_Two_Values_3D<T,T_offset_ptr,block_xsize,block_ysize,block_zsize>(mu_local[cell_x2][cell_y2][cell_z2],
la_local[cell_x2][cell_y2][cell_z2],
para.mu, para.lambda, block_index,
cell_x2 - 1, cell_y2 - 1, cell_z2 - 1);
Lookup_Vector_3D<T,T_offset_ptr,block_xsize,block_ysize,block_zsize>(u_local[node_x1][node_y1][node_z1],
para.u, block_index,
node_x1 - 1, node_y1 - 1, node_z1 - 1);
if((threadIdx.x + THREADBLOCK) < padded_node_total_size)
Lookup_Vector_3D<T,T_offset_ptr,block_xsize,block_ysize,block_zsize>(u_local[node_x2][node_y2][node_z2],
para.u, block_index,
node_x2 - 1, node_y2 - 1, node_z2 - 1);
__syncthreads();
T f_node[d] = {0,0,0};
T K_tmp[9];
using T_MATRIX = const T (&)[d][d];
T_MATRIX m = reinterpret_cast<T_MATRIX>(K_tmp[0]);
// -1, -1, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y][z] * (&K_mu_converted[1][1][1][0][0][0][0][0])[i] +
la_local[x][y][z] * (&K_la_converted[1][1][1][0][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y][z][v];
// -1, -1, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y][z ] * (&K_mu_converted[1][1][1][0][0][1][0][0])[i] +
la_local[x][y][z ] * (&K_la_converted[1][1][1][0][0][1][0][0])[i] +
mu_local[x][y][z + 1] * (&K_mu_converted[1][1][0][0][0][0][0][0])[i] +
la_local[x][y][z + 1] * (&K_la_converted[1][1][0][0][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y][z + 1][v];
// -1, -1, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y][z + 1] * (&K_mu_converted[1][1][0][0][0][1][0][0])[i] +
la_local[x][y][z + 1] * (&K_la_converted[1][1][0][0][0][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y][z + 2][v];
// -1, 0, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y ][z] * (&K_mu_converted[1][1][1][0][1][0][0][0])[i] +
la_local[x][y ][z] * (&K_la_converted[1][1][1][0][1][0][0][0])[i] +
mu_local[x][y + 1][z] * (&K_mu_converted[1][0][1][0][0][0][0][0])[i] +
la_local[x][y + 1][z] * (&K_la_converted[1][0][1][0][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y + 1][z][v];
// -1, 0, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y ][z ] * (&K_mu_converted[1][1][1][0][1][1][0][0])[i] +
la_local[x][y ][z ] * (&K_la_converted[1][1][1][0][1][1][0][0])[i] +
mu_local[x][y ][z + 1] * (&K_mu_converted[1][1][0][0][1][0][0][0])[i] +
la_local[x][y ][z + 1] * (&K_la_converted[1][1][0][0][1][0][0][0])[i] +
mu_local[x][y + 1][z ] * (&K_mu_converted[1][0][1][0][0][1][0][0])[i] +
la_local[x][y + 1][z ] * (&K_la_converted[1][0][1][0][0][1][0][0])[i] +
mu_local[x][y + 1][z + 1] * (&K_mu_converted[1][0][0][0][0][0][0][0])[i] +
la_local[x][y + 1][z + 1] * (&K_la_converted[1][0][0][0][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y + 1][z + 1][v];
// -1, 0, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y ][z + 1] * (&K_mu_converted[1][1][0][0][1][1][0][0])[i] +
la_local[x][y ][z + 1] * (&K_la_converted[1][1][0][0][1][1][0][0])[i] +
mu_local[x][y + 1][z + 1] * (&K_mu_converted[1][0][0][0][0][1][0][0])[i] +
la_local[x][y + 1][z + 1] * (&K_la_converted[1][0][0][0][0][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y + 1][z + 2][v];
// -1, +1, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y + 1][z] * (&K_mu_converted[1][0][1][0][1][0][0][0])[i] +
la_local[x][y + 1][z] * (&K_la_converted[1][0][1][0][1][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y + 2][z][v];
// -1, +1, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y + 1][z ] * (&K_mu_converted[1][0][1][0][1][1][0][0])[i] +
la_local[x][y + 1][z ] * (&K_la_converted[1][0][1][0][1][1][0][0])[i] +
mu_local[x][y + 1][z + 1] * (&K_mu_converted[1][0][0][0][1][0][0][0])[i] +
la_local[x][y + 1][z + 1] * (&K_la_converted[1][0][0][0][1][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y + 2][z + 1][v];
// -1, +1, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y + 1][z + 1] * (&K_mu_converted[1][0][0][0][1][1][0][0])[i] +
la_local[x][y + 1][z + 1] * (&K_la_converted[1][0][0][0][1][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y + 2][z + 2][v];
// 0, -1, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y][z] * (&K_mu_converted[1][1][1][1][0][0][0][0])[i] +
la_local[x ][y][z] * (&K_la_converted[1][1][1][1][0][0][0][0])[i] +
mu_local[x + 1][y][z] * (&K_mu_converted[0][1][1][0][0][0][0][0])[i] +
la_local[x + 1][y][z] * (&K_la_converted[0][1][1][0][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y][z][v];
// 0, -1, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y][z ] * (&K_mu_converted[1][1][1][1][0][1][0][0])[i] +
la_local[x ][y][z ] * (&K_la_converted[1][1][1][1][0][1][0][0])[i] +
mu_local[x ][y][z + 1] * (&K_mu_converted[1][1][0][1][0][0][0][0])[i] +
la_local[x ][y][z + 1] * (&K_la_converted[1][1][0][1][0][0][0][0])[i] +
mu_local[x + 1][y][z ] * (&K_mu_converted[0][1][1][0][0][1][0][0])[i] +
la_local[x + 1][y][z ] * (&K_la_converted[0][1][1][0][0][1][0][0])[i] +
mu_local[x + 1][y][z + 1] * (&K_mu_converted[0][1][0][0][0][0][0][0])[i] +
la_local[x + 1][y][z + 1] * (&K_la_converted[0][1][0][0][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y][z + 1][v];
// 0, -1, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y][z + 1] * (&K_mu_converted[1][1][0][1][0][1][0][0])[i] +
la_local[x ][y][z + 1] * (&K_la_converted[1][1][0][1][0][1][0][0])[i] +
mu_local[x + 1][y][z + 1] * (&K_mu_converted[0][1][0][0][0][1][0][0])[i] +
la_local[x + 1][y][z + 1] * (&K_la_converted[0][1][0][0][0][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y][z + 2][v];
// 0, 0, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y ][z] * (&K_mu_converted[1][1][1][1][1][0][0][0])[i] +
la_local[x ][y ][z] * (&K_la_converted[1][1][1][1][1][0][0][0])[i] +
mu_local[x ][y + 1][z] * (&K_mu_converted[1][0][1][1][0][0][0][0])[i] +
la_local[x ][y + 1][z] * (&K_la_converted[1][0][1][1][0][0][0][0])[i] +
mu_local[x + 1][y ][z] * (&K_mu_converted[0][1][1][0][1][0][0][0])[i] +
la_local[x + 1][y ][z] * (&K_la_converted[0][1][1][0][1][0][0][0])[i] +
mu_local[x + 1][y + 1][z] * (&K_mu_converted[0][0][1][0][0][0][0][0])[i] +
la_local[x + 1][y + 1][z] * (&K_la_converted[0][0][1][0][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y + 1][z][v];
// 0, 0, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y ][z ] * (&K_mu_converted[1][1][1][1][1][1][0][0])[i] +
la_local[x ][y ][z ] * (&K_la_converted[1][1][1][1][1][1][0][0])[i] +
mu_local[x ][y ][z + 1] * (&K_mu_converted[1][1][0][1][1][0][0][0])[i] +
la_local[x ][y ][z + 1] * (&K_la_converted[1][1][0][1][1][0][0][0])[i] +
mu_local[x ][y + 1][z ] * (&K_mu_converted[1][0][1][1][0][1][0][0])[i] +
la_local[x ][y + 1][z ] * (&K_la_converted[1][0][1][1][0][1][0][0])[i] +
mu_local[x ][y + 1][z + 1] * (&K_mu_converted[1][0][0][1][0][0][0][0])[i] +
la_local[x ][y + 1][z + 1] * (&K_la_converted[1][0][0][1][0][0][0][0])[i] +
mu_local[x + 1][y ][z ] * (&K_mu_converted[0][1][1][0][1][1][0][0])[i] +
la_local[x + 1][y ][z ] * (&K_la_converted[0][1][1][0][1][1][0][0])[i] +
mu_local[x + 1][y ][z + 1] * (&K_mu_converted[0][1][0][0][1][0][0][0])[i] +
la_local[x + 1][y ][z + 1] * (&K_la_converted[0][1][0][0][1][0][0][0])[i] +
mu_local[x + 1][y + 1][z ] * (&K_mu_converted[0][0][1][0][0][1][0][0])[i] +
la_local[x + 1][y + 1][z ] * (&K_la_converted[0][0][1][0][0][1][0][0])[i] +
mu_local[x + 1][y + 1][z + 1] * (&K_mu_converted[0][0][0][0][0][0][0][0])[i] +
la_local[x + 1][y + 1][z + 1] * (&K_la_converted[0][0][0][0][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y + 1][z + 1][v];
// 0, 0, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y ][z + 1] * (&K_mu_converted[1][1][0][1][1][1][0][0])[i] +
la_local[x ][y ][z + 1] * (&K_la_converted[1][1][0][1][1][1][0][0])[i] +
mu_local[x ][y + 1][z + 1] * (&K_mu_converted[1][0][0][1][0][1][0][0])[i] +
la_local[x ][y + 1][z + 1] * (&K_la_converted[1][0][0][1][0][1][0][0])[i] +
mu_local[x + 1][y ][z + 1] * (&K_mu_converted[0][1][0][0][1][1][0][0])[i] +
la_local[x + 1][y ][z + 1] * (&K_la_converted[0][1][0][0][1][1][0][0])[i] +
mu_local[x + 1][y + 1][z + 1] * (&K_mu_converted[0][0][0][0][0][1][0][0])[i] +
la_local[x + 1][y + 1][z + 1] * (&K_la_converted[0][0][0][0][0][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y + 1][z + 2][v];
// 0, +1, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y + 1][z] * (&K_mu_converted[1][0][1][1][1][0][0][0])[i] +
la_local[x ][y + 1][z] * (&K_la_converted[1][0][1][1][1][0][0][0])[i] +
mu_local[x + 1][y + 1][z] * (&K_mu_converted[0][0][1][0][1][0][0][0])[i] +
la_local[x + 1][y + 1][z] * (&K_la_converted[0][0][1][0][1][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y + 2][z][v];
// 0, +1, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y + 1][z ] * (&K_mu_converted[1][0][1][1][1][1][0][0])[i] +
la_local[x ][y + 1][z ] * (&K_la_converted[1][0][1][1][1][1][0][0])[i] +
mu_local[x ][y + 1][z + 1] * (&K_mu_converted[1][0][0][1][1][0][0][0])[i] +
la_local[x ][y + 1][z + 1] * (&K_la_converted[1][0][0][1][1][0][0][0])[i] +
mu_local[x + 1][y + 1][z ] * (&K_mu_converted[0][0][1][0][1][1][0][0])[i] +
la_local[x + 1][y + 1][z ] * (&K_la_converted[0][0][1][0][1][1][0][0])[i] +
mu_local[x + 1][y + 1][z + 1] * (&K_mu_converted[0][0][0][0][1][0][0][0])[i] +
la_local[x + 1][y + 1][z + 1] * (&K_la_converted[0][0][0][0][1][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y + 2][z + 1][v];
// 0, +1, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y + 1][z + 1] * (&K_mu_converted[1][0][0][1][1][1][0][0])[i] +
la_local[x ][y + 1][z + 1] * (&K_la_converted[1][0][0][1][1][1][0][0])[i] +
mu_local[x + 1][y + 1][z + 1] * (&K_mu_converted[0][0][0][0][1][1][0][0])[i] +
la_local[x + 1][y + 1][z + 1] * (&K_la_converted[0][0][0][0][1][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y + 2][z + 2][v];
// +1, -1, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y][z] * (&K_mu_converted[0][1][1][1][0][0][0][0])[i] +
la_local[x + 1][y][z] * (&K_la_converted[0][1][1][1][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y][z][v];
// +1, -1, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y][z ] * (&K_mu_converted[0][1][1][1][0][1][0][0])[i] +
la_local[x + 1][y][z ] * (&K_la_converted[0][1][1][1][0][1][0][0])[i] +
mu_local[x + 1][y][z + 1] * (&K_mu_converted[0][1][0][1][0][0][0][0])[i] +
la_local[x + 1][y][z + 1] * (&K_la_converted[0][1][0][1][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y][z + 1][v];
// +1, -1, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y][z + 1] * (&K_mu_converted[0][1][0][1][0][1][0][0])[i] +
la_local[x + 1][y][z + 1] * (&K_la_converted[0][1][0][1][0][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y][z + 2][v];
// +1, 0, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y ][z] * (&K_mu_converted[0][1][1][1][1][0][0][0])[i] +
la_local[x + 1][y ][z] * (&K_la_converted[0][1][1][1][1][0][0][0])[i] +
mu_local[x + 1][y + 1][z] * (&K_mu_converted[0][0][1][1][0][0][0][0])[i] +
la_local[x + 1][y + 1][z] * (&K_la_converted[0][0][1][1][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y + 1][z][v];
// +1, 0, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y ][z ] * (&K_mu_converted[0][1][1][1][1][1][0][0])[i] +
la_local[x + 1][y ][z ] * (&K_la_converted[0][1][1][1][1][1][0][0])[i] +
mu_local[x + 1][y ][z + 1] * (&K_mu_converted[0][1][0][1][1][0][0][0])[i] +
la_local[x + 1][y ][z + 1] * (&K_la_converted[0][1][0][1][1][0][0][0])[i] +
mu_local[x + 1][y + 1][z ] * (&K_mu_converted[0][0][1][1][0][1][0][0])[i] +
la_local[x + 1][y + 1][z ] * (&K_la_converted[0][0][1][1][0][1][0][0])[i] +
mu_local[x + 1][y + 1][z + 1] * (&K_mu_converted[0][0][0][1][0][0][0][0])[i] +
la_local[x + 1][y + 1][z + 1] * (&K_la_converted[0][0][0][1][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y + 1][z + 1][v];
// +1, 0, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y ][z + 1] * (&K_mu_converted[0][1][0][1][1][1][0][0])[i] +
la_local[x + 1][y ][z + 1] * (&K_la_converted[0][1][0][1][1][1][0][0])[i] +
mu_local[x + 1][y + 1][z + 1] * (&K_mu_converted[0][0][0][1][0][1][0][0])[i] +
la_local[x + 1][y + 1][z + 1] * (&K_la_converted[0][0][0][1][0][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y + 1][z + 2][v];
// +1, +1, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y + 1][z] * (&K_mu_converted[0][0][1][1][1][0][0][0])[i] +
la_local[x + 1][y + 1][z] * (&K_la_converted[0][0][1][1][1][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y + 2][z][v];
// +1, +1, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y + 1][z ] * (&K_mu_converted[0][0][1][1][1][1][0][0])[i] +
la_local[x + 1][y + 1][z ] * (&K_la_converted[0][0][1][1][1][1][0][0])[i] +
mu_local[x + 1][y + 1][z + 1] * (&K_mu_converted[0][0][0][1][1][0][0][0])[i] +
la_local[x + 1][y + 1][z + 1] * (&K_la_converted[0][0][0][1][1][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y + 2][z + 1][v];
// +1, +1, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y + 1][z + 1] * (&K_mu_converted[0][0][0][1][1][1][0][0])[i] +
la_local[x + 1][y + 1][z + 1] * (&K_la_converted[0][0][0][1][1][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y + 2][z + 2][v];
for(int v = 0;v < d;++v)
reinterpret_cast<T*>((unsigned long)para.f[v] + (unsigned long)block_index[13])[entry] = f_node[v] * para.dx;
}
}
//#####################################################################
// Constructor 3D
//#####################################################################
template <class T, int log2_struct,class T_offset_ptr>
Linear_Elasticity_CUDA_Optimized<T,log2_struct,3,T_offset_ptr>::Linear_Elasticity_CUDA_Optimized(T* const f_input[d],const T* const u_input[d],
const T* const mu_input,const T* const lambda_input,
const T_offset_ptr* const b_input,const int size_input,
const T dx_input)
:mu(mu_input),lambda(lambda_input),b(b_input),size(size_input),dx(dx_input)
{
for(int v=0;v<d;++v){f[v]=f_input[v];u[v]=u_input[v];}
}
//#####################################################################
// Function Run
//#####################################################################
template <class T,int log2_struct,class T_offset_ptr>
void Linear_Elasticity_CUDA_Optimized<T,log2_struct,3,T_offset_ptr>::Run()
{
if(!symbol_initialized){
hipMemcpyToSymbol(K_mu_device,&K_mu[0][0][0][0],576*sizeof(float),0,hipMemcpyHostToDevice);
hipMemcpyToSymbol(K_la_device,&K_lambda[0][0][0][0],576*sizeof(float),0,hipMemcpyHostToDevice);
symbol_initialized = true;}
hipDeviceSynchronize();
//std::cout << "Block size: " << block_xsize << " * " << block_ysize << " * " << block_xsize << std::endl;
int number_of_cuda_blocks = size;
if(number_of_cuda_blocks == 0) return;
Parameters p;
for(int v=0;v<d;++v){
p.f[v] = f[v];
p.u[v] = u[v];}
p.mu = mu;
p.lambda = lambda;
p.b = b;
p.number_of_blocks = size;
p.dx = dx;
hipMemcpyToSymbol(p_device,(void*)&p,sizeof(Parameters),0,hipMemcpyHostToDevice);
auto cudaerror = hipGetLastError();
if(hipSuccess!=cudaerror){
std::cerr<<"CUDA ERROR: "<<hipGetErrorString(cudaerror)<<std::endl;abort();}
hipLaunchKernelGGL(( Linear_Elasticity_Kernel_3D<T,T_offset_ptr,block_xsize,block_ysize,block_zsize>)
, dim3(number_of_cuda_blocks),dim3(THREADBLOCK),0, 0, );
#if 0
hipDeviceSynchronize();
cudaerror = hipGetLastError();
if(hipSuccess!=cudaerror){
std::cerr<<"CUDA ERROR: "<<hipGetErrorString(cudaerror)<<std::endl;abort();}
#endif
}
//#####################################################################################################
template class Linear_Elasticity_CUDA_Optimized<float,3,3,unsigned int>;
| 0a057e0b4952fc6d8f27c3349e6ab8cc2d061434.cu | //#####################################################################
// Copyright (c) 2017, Haixiang Liu
// Distributed under the FreeBSD license (see license.txt)
//#####################################################################
#include <iostream>
#include <thrust/device_vector.h>
#include "Linear_Elasticity_CUDA_Optimized.h"
using namespace SPGrid;
#define THREADBLOCK 512
__constant__ float K_mu_device[576];
__constant__ float K_la_device[576];
//template <typename T,typename T_offset_ptr>
struct Parameters{
float* f[3];
const float* u[3];
const float* mu;
const float* lambda;
const unsigned int* b;
unsigned int number_of_blocks;
float dx;
};
// TODO: Remove the explicit template parameters on this one.
__constant__ char p_device[sizeof(Parameters)];
bool symbol_initialized = false;
// Index here: f_i^v = Sum_{jw}(K[i][j][v][w])
const float __attribute__ ((aligned(32))) K_mu[8][8][3][3] =
{
{
{
{ 32.f/72.f, 6.f/72.f, 6.f/72.f},
{ 6.f/72.f, 32.f/72.f, 6.f/72.f},
{ 6.f/72.f, 6.f/72.f, 32.f/72.f}
},
{
{ 4.f/72.f, 3.f/72.f, 6.f/72.f},
{ 3.f/72.f, 4.f/72.f, 6.f/72.f},
{ -6.f/72.f, -6.f/72.f, -8.f/72.f}
},
{
{ 4.f/72.f, 6.f/72.f, 3.f/72.f},
{ -6.f/72.f, -8.f/72.f, -6.f/72.f},
{ 3.f/72.f, 6.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, 3.f/72.f, 3.f/72.f},
{ -3.f/72.f, -10.f/72.f, -6.f/72.f},
{ -3.f/72.f, -6.f/72.f, -10.f/72.f}
},
{
{ -8.f/72.f, -6.f/72.f, -6.f/72.f},
{ 6.f/72.f, 4.f/72.f, 3.f/72.f},
{ 6.f/72.f, 3.f/72.f, 4.f/72.f}
},
{
{ -10.f/72.f, -3.f/72.f, -6.f/72.f},
{ 3.f/72.f, -4.f/72.f, 3.f/72.f},
{ -6.f/72.f, -3.f/72.f, -10.f/72.f}
},
{
{ -10.f/72.f, -6.f/72.f, -3.f/72.f},
{ -6.f/72.f, -10.f/72.f, -3.f/72.f},
{ 3.f/72.f, 3.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, -3.f/72.f, -3.f/72.f},
{ -3.f/72.f, -8.f/72.f, -3.f/72.f},
{ -3.f/72.f, -3.f/72.f, -8.f/72.f}
}
},
{
{
{ 4.f/72.f, 3.f/72.f, -6.f/72.f},
{ 3.f/72.f, 4.f/72.f, -6.f/72.f},
{ 6.f/72.f, 6.f/72.f, -8.f/72.f}
},
{
{ 32.f/72.f, 6.f/72.f, -6.f/72.f},
{ 6.f/72.f, 32.f/72.f, -6.f/72.f},
{ -6.f/72.f, -6.f/72.f, 32.f/72.f}
},
{
{ -4.f/72.f, 3.f/72.f, -3.f/72.f},
{ -3.f/72.f, -10.f/72.f, 6.f/72.f},
{ 3.f/72.f, 6.f/72.f, -10.f/72.f}
},
{
{ 4.f/72.f, 6.f/72.f, -3.f/72.f},
{ -6.f/72.f, -8.f/72.f, 6.f/72.f},
{ -3.f/72.f, -6.f/72.f, 4.f/72.f}
},
{
{ -10.f/72.f, -3.f/72.f, 6.f/72.f},
{ 3.f/72.f, -4.f/72.f, -3.f/72.f},
{ 6.f/72.f, 3.f/72.f, -10.f/72.f}
},
{
{ -8.f/72.f, -6.f/72.f, 6.f/72.f},
{ 6.f/72.f, 4.f/72.f, -3.f/72.f},
{ -6.f/72.f, -3.f/72.f, 4.f/72.f}
},
{
{ -8.f/72.f, -3.f/72.f, 3.f/72.f},
{ -3.f/72.f, -8.f/72.f, 3.f/72.f},
{ 3.f/72.f, 3.f/72.f, -8.f/72.f}
},
{
{ -10.f/72.f, -6.f/72.f, 3.f/72.f},
{ -6.f/72.f, -10.f/72.f, 3.f/72.f},
{ -3.f/72.f, -3.f/72.f, -4.f/72.f}
}
},
{
{
{ 4.f/72.f, -6.f/72.f, 3.f/72.f},
{ 6.f/72.f, -8.f/72.f, 6.f/72.f},
{ 3.f/72.f, -6.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, -3.f/72.f, 3.f/72.f},
{ 3.f/72.f, -10.f/72.f, 6.f/72.f},
{ -3.f/72.f, 6.f/72.f, -10.f/72.f}
},
{
{ 32.f/72.f, -6.f/72.f, 6.f/72.f},
{ -6.f/72.f, 32.f/72.f, -6.f/72.f},
{ 6.f/72.f, -6.f/72.f, 32.f/72.f}
},
{
{ 4.f/72.f, -3.f/72.f, 6.f/72.f},
{ -3.f/72.f, 4.f/72.f, -6.f/72.f},
{ -6.f/72.f, 6.f/72.f, -8.f/72.f}
},
{
{ -10.f/72.f, 6.f/72.f, -3.f/72.f},
{ 6.f/72.f, -10.f/72.f, 3.f/72.f},
{ 3.f/72.f, -3.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, 3.f/72.f, -3.f/72.f},
{ 3.f/72.f, -8.f/72.f, 3.f/72.f},
{ -3.f/72.f, 3.f/72.f, -8.f/72.f}
},
{
{ -8.f/72.f, 6.f/72.f, -6.f/72.f},
{ -6.f/72.f, 4.f/72.f, -3.f/72.f},
{ 6.f/72.f, -3.f/72.f, 4.f/72.f}
},
{
{ -10.f/72.f, 3.f/72.f, -6.f/72.f},
{ -3.f/72.f, -4.f/72.f, -3.f/72.f},
{ -6.f/72.f, 3.f/72.f, -10.f/72.f}
}
},
{
{
{ -4.f/72.f, -3.f/72.f, -3.f/72.f},
{ 3.f/72.f, -10.f/72.f, -6.f/72.f},
{ 3.f/72.f, -6.f/72.f, -10.f/72.f}
},
{
{ 4.f/72.f, -6.f/72.f, -3.f/72.f},
{ 6.f/72.f, -8.f/72.f, -6.f/72.f},
{ -3.f/72.f, 6.f/72.f, 4.f/72.f}
},
{
{ 4.f/72.f, -3.f/72.f, -6.f/72.f},
{ -3.f/72.f, 4.f/72.f, 6.f/72.f},
{ 6.f/72.f, -6.f/72.f, -8.f/72.f}
},
{
{ 32.f/72.f, -6.f/72.f, -6.f/72.f},
{ -6.f/72.f, 32.f/72.f, 6.f/72.f},
{ -6.f/72.f, 6.f/72.f, 32.f/72.f}
},
{
{ -8.f/72.f, 3.f/72.f, 3.f/72.f},
{ 3.f/72.f, -8.f/72.f, -3.f/72.f},
{ 3.f/72.f, -3.f/72.f, -8.f/72.f}
},
{
{ -10.f/72.f, 6.f/72.f, 3.f/72.f},
{ 6.f/72.f, -10.f/72.f, -3.f/72.f},
{ -3.f/72.f, 3.f/72.f, -4.f/72.f}
},
{
{ -10.f/72.f, 3.f/72.f, 6.f/72.f},
{ -3.f/72.f, -4.f/72.f, 3.f/72.f},
{ 6.f/72.f, -3.f/72.f, -10.f/72.f}
},
{
{ -8.f/72.f, 6.f/72.f, 6.f/72.f},
{ -6.f/72.f, 4.f/72.f, 3.f/72.f},
{ -6.f/72.f, 3.f/72.f, 4.f/72.f}
}
},
{
{
{ -8.f/72.f, 6.f/72.f, 6.f/72.f},
{ -6.f/72.f, 4.f/72.f, 3.f/72.f},
{ -6.f/72.f, 3.f/72.f, 4.f/72.f}
},
{
{ -10.f/72.f, 3.f/72.f, 6.f/72.f},
{ -3.f/72.f, -4.f/72.f, 3.f/72.f},
{ 6.f/72.f, -3.f/72.f, -10.f/72.f}
},
{
{ -10.f/72.f, 6.f/72.f, 3.f/72.f},
{ 6.f/72.f, -10.f/72.f, -3.f/72.f},
{ -3.f/72.f, 3.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, 3.f/72.f, 3.f/72.f},
{ 3.f/72.f, -8.f/72.f, -3.f/72.f},
{ 3.f/72.f, -3.f/72.f, -8.f/72.f}
},
{
{ 32.f/72.f, -6.f/72.f, -6.f/72.f},
{ -6.f/72.f, 32.f/72.f, 6.f/72.f},
{ -6.f/72.f, 6.f/72.f, 32.f/72.f}
},
{
{ 4.f/72.f, -3.f/72.f, -6.f/72.f},
{ -3.f/72.f, 4.f/72.f, 6.f/72.f},
{ 6.f/72.f, -6.f/72.f, -8.f/72.f}
},
{
{ 4.f/72.f, -6.f/72.f, -3.f/72.f},
{ 6.f/72.f, -8.f/72.f, -6.f/72.f},
{ -3.f/72.f, 6.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, -3.f/72.f, -3.f/72.f},
{ 3.f/72.f, -10.f/72.f, -6.f/72.f},
{ 3.f/72.f, -6.f/72.f, -10.f/72.f}
}
},
{
{
{ -10.f/72.f, 3.f/72.f, -6.f/72.f},
{ -3.f/72.f, -4.f/72.f, -3.f/72.f},
{ -6.f/72.f, 3.f/72.f, -10.f/72.f}
},
{
{ -8.f/72.f, 6.f/72.f, -6.f/72.f},
{ -6.f/72.f, 4.f/72.f, -3.f/72.f},
{ 6.f/72.f, -3.f/72.f, 4.f/72.f}
},
{
{ -8.f/72.f, 3.f/72.f, -3.f/72.f},
{ 3.f/72.f, -8.f/72.f, 3.f/72.f},
{ -3.f/72.f, 3.f/72.f, -8.f/72.f}
},
{
{ -10.f/72.f, 6.f/72.f, -3.f/72.f},
{ 6.f/72.f, -10.f/72.f, 3.f/72.f},
{ 3.f/72.f, -3.f/72.f, -4.f/72.f}
},
{
{ 4.f/72.f, -3.f/72.f, 6.f/72.f},
{ -3.f/72.f, 4.f/72.f, -6.f/72.f},
{ -6.f/72.f, 6.f/72.f, -8.f/72.f}
},
{
{ 32.f/72.f, -6.f/72.f, 6.f/72.f},
{ -6.f/72.f, 32.f/72.f, -6.f/72.f},
{ 6.f/72.f, -6.f/72.f, 32.f/72.f}
},
{
{ -4.f/72.f, -3.f/72.f, 3.f/72.f},
{ 3.f/72.f, -10.f/72.f, 6.f/72.f},
{ -3.f/72.f, 6.f/72.f, -10.f/72.f}
},
{
{ 4.f/72.f, -6.f/72.f, 3.f/72.f},
{ 6.f/72.f, -8.f/72.f, 6.f/72.f},
{ 3.f/72.f, -6.f/72.f, 4.f/72.f}
}
},
{
{
{ -10.f/72.f, -6.f/72.f, 3.f/72.f},
{ -6.f/72.f, -10.f/72.f, 3.f/72.f},
{ -3.f/72.f, -3.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, -3.f/72.f, 3.f/72.f},
{ -3.f/72.f, -8.f/72.f, 3.f/72.f},
{ 3.f/72.f, 3.f/72.f, -8.f/72.f}
},
{
{ -8.f/72.f, -6.f/72.f, 6.f/72.f},
{ 6.f/72.f, 4.f/72.f, -3.f/72.f},
{ -6.f/72.f, -3.f/72.f, 4.f/72.f}
},
{
{ -10.f/72.f, -3.f/72.f, 6.f/72.f},
{ 3.f/72.f, -4.f/72.f, -3.f/72.f},
{ 6.f/72.f, 3.f/72.f, -10.f/72.f}
},
{
{ 4.f/72.f, 6.f/72.f, -3.f/72.f},
{ -6.f/72.f, -8.f/72.f, 6.f/72.f},
{ -3.f/72.f, -6.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, 3.f/72.f, -3.f/72.f},
{ -3.f/72.f, -10.f/72.f, 6.f/72.f},
{ 3.f/72.f, 6.f/72.f, -10.f/72.f}
},
{
{ 32.f/72.f, 6.f/72.f, -6.f/72.f},
{ 6.f/72.f, 32.f/72.f, -6.f/72.f},
{ -6.f/72.f, -6.f/72.f, 32.f/72.f}
},
{
{ 4.f/72.f, 3.f/72.f, -6.f/72.f},
{ 3.f/72.f, 4.f/72.f, -6.f/72.f},
{ 6.f/72.f, 6.f/72.f, -8.f/72.f}
}
},
{
{
{ -8.f/72.f, -3.f/72.f, -3.f/72.f},
{ -3.f/72.f, -8.f/72.f, -3.f/72.f},
{ -3.f/72.f, -3.f/72.f, -8.f/72.f}
},
{
{ -10.f/72.f, -6.f/72.f, -3.f/72.f},
{ -6.f/72.f, -10.f/72.f, -3.f/72.f},
{ 3.f/72.f, 3.f/72.f, -4.f/72.f}
},
{
{ -10.f/72.f, -3.f/72.f, -6.f/72.f},
{ 3.f/72.f, -4.f/72.f, 3.f/72.f},
{ -6.f/72.f, -3.f/72.f, -10.f/72.f}
},
{
{ -8.f/72.f, -6.f/72.f, -6.f/72.f},
{ 6.f/72.f, 4.f/72.f, 3.f/72.f},
{ 6.f/72.f, 3.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, 3.f/72.f, 3.f/72.f},
{ -3.f/72.f, -10.f/72.f, -6.f/72.f},
{ -3.f/72.f, -6.f/72.f, -10.f/72.f}
},
{
{ 4.f/72.f, 6.f/72.f, 3.f/72.f},
{ -6.f/72.f, -8.f/72.f, -6.f/72.f},
{ 3.f/72.f, 6.f/72.f, 4.f/72.f}
},
{
{ 4.f/72.f, 3.f/72.f, 6.f/72.f},
{ 3.f/72.f, 4.f/72.f, 6.f/72.f},
{ -6.f/72.f, -6.f/72.f, -8.f/72.f}
},
{
{ 32.f/72.f, 6.f/72.f, 6.f/72.f},
{ 6.f/72.f, 32.f/72.f, 6.f/72.f},
{ 6.f/72.f, 6.f/72.f, 32.f/72.f}
}
}
};
const float __attribute__ ((aligned(32))) K_lambda[8][8][3][3] =
{
{
{
{ 8.f/72.f, 6.f/72.f, 6.f/72.f},
{ 6.f/72.f, 8.f/72.f, 6.f/72.f},
{ 6.f/72.f, 6.f/72.f, 8.f/72.f}
},
{
{ 4.f/72.f, 3.f/72.f, -6.f/72.f},
{ 3.f/72.f, 4.f/72.f, -6.f/72.f},
{ 6.f/72.f, 6.f/72.f, -8.f/72.f}
},
{
{ 4.f/72.f, -6.f/72.f, 3.f/72.f},
{ 6.f/72.f, -8.f/72.f, 6.f/72.f},
{ 3.f/72.f, -6.f/72.f, 4.f/72.f}
},
{
{ 2.f/72.f, -3.f/72.f, -3.f/72.f},
{ 3.f/72.f, -4.f/72.f, -6.f/72.f},
{ 3.f/72.f, -6.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, 6.f/72.f, 6.f/72.f},
{ -6.f/72.f, 4.f/72.f, 3.f/72.f},
{ -6.f/72.f, 3.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, 3.f/72.f, -6.f/72.f},
{ -3.f/72.f, 2.f/72.f, -3.f/72.f},
{ -6.f/72.f, 3.f/72.f, -4.f/72.f}
},
{
{ -4.f/72.f, -6.f/72.f, 3.f/72.f},
{ -6.f/72.f, -4.f/72.f, 3.f/72.f},
{ -3.f/72.f, -3.f/72.f, 2.f/72.f}
},
{
{ -2.f/72.f, -3.f/72.f, -3.f/72.f},
{ -3.f/72.f, -2.f/72.f, -3.f/72.f},
{ -3.f/72.f, -3.f/72.f, -2.f/72.f}
}
},
{
{
{ 4.f/72.f, 3.f/72.f, 6.f/72.f},
{ 3.f/72.f, 4.f/72.f, 6.f/72.f},
{ -6.f/72.f, -6.f/72.f, -8.f/72.f}
},
{
{ 8.f/72.f, 6.f/72.f, -6.f/72.f},
{ 6.f/72.f, 8.f/72.f, -6.f/72.f},
{ -6.f/72.f, -6.f/72.f, 8.f/72.f}
},
{
{ 2.f/72.f, -3.f/72.f, 3.f/72.f},
{ 3.f/72.f, -4.f/72.f, 6.f/72.f},
{ -3.f/72.f, 6.f/72.f, -4.f/72.f}
},
{
{ 4.f/72.f, -6.f/72.f, -3.f/72.f},
{ 6.f/72.f, -8.f/72.f, -6.f/72.f},
{ -3.f/72.f, 6.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, 3.f/72.f, 6.f/72.f},
{ -3.f/72.f, 2.f/72.f, 3.f/72.f},
{ 6.f/72.f, -3.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, 6.f/72.f, -6.f/72.f},
{ -6.f/72.f, 4.f/72.f, -3.f/72.f},
{ 6.f/72.f, -3.f/72.f, 4.f/72.f}
},
{
{ -2.f/72.f, -3.f/72.f, 3.f/72.f},
{ -3.f/72.f, -2.f/72.f, 3.f/72.f},
{ 3.f/72.f, 3.f/72.f, -2.f/72.f}
},
{
{ -4.f/72.f, -6.f/72.f, -3.f/72.f},
{ -6.f/72.f, -4.f/72.f, -3.f/72.f},
{ 3.f/72.f, 3.f/72.f, 2.f/72.f}
}
},
{
{
{ 4.f/72.f, 6.f/72.f, 3.f/72.f},
{ -6.f/72.f, -8.f/72.f, -6.f/72.f},
{ 3.f/72.f, 6.f/72.f, 4.f/72.f}
},
{
{ 2.f/72.f, 3.f/72.f, -3.f/72.f},
{ -3.f/72.f, -4.f/72.f, 6.f/72.f},
{ 3.f/72.f, 6.f/72.f, -4.f/72.f}
},
{
{ 8.f/72.f, -6.f/72.f, 6.f/72.f},
{ -6.f/72.f, 8.f/72.f, -6.f/72.f},
{ 6.f/72.f, -6.f/72.f, 8.f/72.f}
},
{
{ 4.f/72.f, -3.f/72.f, -6.f/72.f},
{ -3.f/72.f, 4.f/72.f, 6.f/72.f},
{ 6.f/72.f, -6.f/72.f, -8.f/72.f}
},
{
{ -4.f/72.f, 6.f/72.f, 3.f/72.f},
{ 6.f/72.f, -4.f/72.f, -3.f/72.f},
{ -3.f/72.f, 3.f/72.f, 2.f/72.f}
},
{
{ -2.f/72.f, 3.f/72.f, -3.f/72.f},
{ 3.f/72.f, -2.f/72.f, 3.f/72.f},
{ -3.f/72.f, 3.f/72.f, -2.f/72.f}
},
{
{ -8.f/72.f, -6.f/72.f, 6.f/72.f},
{ 6.f/72.f, 4.f/72.f, -3.f/72.f},
{ -6.f/72.f, -3.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, -3.f/72.f, -6.f/72.f},
{ 3.f/72.f, 2.f/72.f, 3.f/72.f},
{ -6.f/72.f, -3.f/72.f, -4.f/72.f}
}
},
{
{
{ 2.f/72.f, 3.f/72.f, 3.f/72.f},
{ -3.f/72.f, -4.f/72.f, -6.f/72.f},
{ -3.f/72.f, -6.f/72.f, -4.f/72.f}
},
{
{ 4.f/72.f, 6.f/72.f, -3.f/72.f},
{ -6.f/72.f, -8.f/72.f, 6.f/72.f},
{ -3.f/72.f, -6.f/72.f, 4.f/72.f}
},
{
{ 4.f/72.f, -3.f/72.f, 6.f/72.f},
{ -3.f/72.f, 4.f/72.f, -6.f/72.f},
{ -6.f/72.f, 6.f/72.f, -8.f/72.f}
},
{
{ 8.f/72.f, -6.f/72.f, -6.f/72.f},
{ -6.f/72.f, 8.f/72.f, 6.f/72.f},
{ -6.f/72.f, 6.f/72.f, 8.f/72.f}
},
{
{ -2.f/72.f, 3.f/72.f, 3.f/72.f},
{ 3.f/72.f, -2.f/72.f, -3.f/72.f},
{ 3.f/72.f, -3.f/72.f, -2.f/72.f}
},
{
{ -4.f/72.f, 6.f/72.f, -3.f/72.f},
{ 6.f/72.f, -4.f/72.f, 3.f/72.f},
{ 3.f/72.f, -3.f/72.f, 2.f/72.f}
},
{
{ -4.f/72.f, -3.f/72.f, 6.f/72.f},
{ 3.f/72.f, 2.f/72.f, -3.f/72.f},
{ 6.f/72.f, 3.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, -6.f/72.f, -6.f/72.f},
{ 6.f/72.f, 4.f/72.f, 3.f/72.f},
{ 6.f/72.f, 3.f/72.f, 4.f/72.f}
}
},
{
{
{ -8.f/72.f, -6.f/72.f, -6.f/72.f},
{ 6.f/72.f, 4.f/72.f, 3.f/72.f},
{ 6.f/72.f, 3.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, -3.f/72.f, 6.f/72.f},
{ 3.f/72.f, 2.f/72.f, -3.f/72.f},
{ 6.f/72.f, 3.f/72.f, -4.f/72.f}
},
{
{ -4.f/72.f, 6.f/72.f, -3.f/72.f},
{ 6.f/72.f, -4.f/72.f, 3.f/72.f},
{ 3.f/72.f, -3.f/72.f, 2.f/72.f}
},
{
{ -2.f/72.f, 3.f/72.f, 3.f/72.f},
{ 3.f/72.f, -2.f/72.f, -3.f/72.f},
{ 3.f/72.f, -3.f/72.f, -2.f/72.f}
},
{
{ 8.f/72.f, -6.f/72.f, -6.f/72.f},
{ -6.f/72.f, 8.f/72.f, 6.f/72.f},
{ -6.f/72.f, 6.f/72.f, 8.f/72.f}
},
{
{ 4.f/72.f, -3.f/72.f, 6.f/72.f},
{ -3.f/72.f, 4.f/72.f, -6.f/72.f},
{ -6.f/72.f, 6.f/72.f, -8.f/72.f}
},
{
{ 4.f/72.f, 6.f/72.f, -3.f/72.f},
{ -6.f/72.f, -8.f/72.f, 6.f/72.f},
{ -3.f/72.f, -6.f/72.f, 4.f/72.f}
},
{
{ 2.f/72.f, 3.f/72.f, 3.f/72.f},
{ -3.f/72.f, -4.f/72.f, -6.f/72.f},
{ -3.f/72.f, -6.f/72.f, -4.f/72.f}
}
},
{
{
{ -4.f/72.f, -3.f/72.f, -6.f/72.f},
{ 3.f/72.f, 2.f/72.f, 3.f/72.f},
{ -6.f/72.f, -3.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, -6.f/72.f, 6.f/72.f},
{ 6.f/72.f, 4.f/72.f, -3.f/72.f},
{ -6.f/72.f, -3.f/72.f, 4.f/72.f}
},
{
{ -2.f/72.f, 3.f/72.f, -3.f/72.f},
{ 3.f/72.f, -2.f/72.f, 3.f/72.f},
{ -3.f/72.f, 3.f/72.f, -2.f/72.f}
},
{
{ -4.f/72.f, 6.f/72.f, 3.f/72.f},
{ 6.f/72.f, -4.f/72.f, -3.f/72.f},
{ -3.f/72.f, 3.f/72.f, 2.f/72.f}
},
{
{ 4.f/72.f, -3.f/72.f, -6.f/72.f},
{ -3.f/72.f, 4.f/72.f, 6.f/72.f},
{ 6.f/72.f, -6.f/72.f, -8.f/72.f}
},
{
{ 8.f/72.f, -6.f/72.f, 6.f/72.f},
{ -6.f/72.f, 8.f/72.f, -6.f/72.f},
{ 6.f/72.f, -6.f/72.f, 8.f/72.f}
},
{
{ 2.f/72.f, 3.f/72.f, -3.f/72.f},
{ -3.f/72.f, -4.f/72.f, 6.f/72.f},
{ 3.f/72.f, 6.f/72.f, -4.f/72.f}
},
{
{ 4.f/72.f, 6.f/72.f, 3.f/72.f},
{ -6.f/72.f, -8.f/72.f, -6.f/72.f},
{ 3.f/72.f, 6.f/72.f, 4.f/72.f}
}
},
{
{
{ -4.f/72.f, -6.f/72.f, -3.f/72.f},
{ -6.f/72.f, -4.f/72.f, -3.f/72.f},
{ 3.f/72.f, 3.f/72.f, 2.f/72.f}
},
{
{ -2.f/72.f, -3.f/72.f, 3.f/72.f},
{ -3.f/72.f, -2.f/72.f, 3.f/72.f},
{ 3.f/72.f, 3.f/72.f, -2.f/72.f}
},
{
{ -8.f/72.f, 6.f/72.f, -6.f/72.f},
{ -6.f/72.f, 4.f/72.f, -3.f/72.f},
{ 6.f/72.f, -3.f/72.f, 4.f/72.f}
},
{
{ -4.f/72.f, 3.f/72.f, 6.f/72.f},
{ -3.f/72.f, 2.f/72.f, 3.f/72.f},
{ 6.f/72.f, -3.f/72.f, -4.f/72.f}
},
{
{ 4.f/72.f, -6.f/72.f, -3.f/72.f},
{ 6.f/72.f, -8.f/72.f, -6.f/72.f},
{ -3.f/72.f, 6.f/72.f, 4.f/72.f}
},
{
{ 2.f/72.f, -3.f/72.f, 3.f/72.f},
{ 3.f/72.f, -4.f/72.f, 6.f/72.f},
{ -3.f/72.f, 6.f/72.f, -4.f/72.f}
},
{
{ 8.f/72.f, 6.f/72.f, -6.f/72.f},
{ 6.f/72.f, 8.f/72.f, -6.f/72.f},
{ -6.f/72.f, -6.f/72.f, 8.f/72.f}
},
{
{ 4.f/72.f, 3.f/72.f, 6.f/72.f},
{ 3.f/72.f, 4.f/72.f, 6.f/72.f},
{ -6.f/72.f, -6.f/72.f, -8.f/72.f}
}
},
{
{
{ -2.f/72.f, -3.f/72.f, -3.f/72.f},
{ -3.f/72.f, -2.f/72.f, -3.f/72.f},
{ -3.f/72.f, -3.f/72.f, -2.f/72.f}
},
{
{ -4.f/72.f, -6.f/72.f, 3.f/72.f},
{ -6.f/72.f, -4.f/72.f, 3.f/72.f},
{ -3.f/72.f, -3.f/72.f, 2.f/72.f}
},
{
{ -4.f/72.f, 3.f/72.f, -6.f/72.f},
{ -3.f/72.f, 2.f/72.f, -3.f/72.f},
{ -6.f/72.f, 3.f/72.f, -4.f/72.f}
},
{
{ -8.f/72.f, 6.f/72.f, 6.f/72.f},
{ -6.f/72.f, 4.f/72.f, 3.f/72.f},
{ -6.f/72.f, 3.f/72.f, 4.f/72.f}
},
{
{ 2.f/72.f, -3.f/72.f, -3.f/72.f},
{ 3.f/72.f, -4.f/72.f, -6.f/72.f},
{ 3.f/72.f, -6.f/72.f, -4.f/72.f}
},
{
{ 4.f/72.f, -6.f/72.f, 3.f/72.f},
{ 6.f/72.f, -8.f/72.f, 6.f/72.f},
{ 3.f/72.f, -6.f/72.f, 4.f/72.f}
},
{
{ 4.f/72.f, 3.f/72.f, -6.f/72.f},
{ 3.f/72.f, 4.f/72.f, -6.f/72.f},
{ 6.f/72.f, 6.f/72.f, -8.f/72.f}
},
{
{ 8.f/72.f, 6.f/72.f, 6.f/72.f},
{ 6.f/72.f, 8.f/72.f, 6.f/72.f},
{ 6.f/72.f, 6.f/72.f, 8.f/72.f}
}
}
};
template<class T,class T_offset_ptr,int block_xsize,int block_ysize,int block_zsize>
__device__ T Lookup_Value_3D(const T* a,const T_offset_ptr b[27],int x,int y,int z)
{
int block_id = 13;
if(z < 0) {block_id -= 1;z += block_zsize;}
if(z >= block_zsize) {block_id += 1;z -= block_zsize;}
if(y < 0) {block_id -= 3;y += block_ysize;}
if(y >= block_ysize) {block_id += 3;y -= block_ysize;}
if(x < 0) {block_id -= 9;x += block_xsize;}
if(x >= block_xsize) {block_id += 9;x -= block_xsize;}
enum{xstride = block_ysize * block_zsize,
ystride = block_zsize,
zstride = 1};
const int entry = z + y * ystride + x * xstride;
return reinterpret_cast<T*>((unsigned long)a + (unsigned long)b[block_id])[entry];
}
template<class T,class T_offset_ptr,int block_xsize,int block_ysize,int block_zsize>
__device__ void Lookup_Two_Values_3D(T& a_out,T& b_out, const T* a_array, const T* b_array, const T_offset_ptr b[27], int x, int y, int z)
{
int block_id = 13;
if(z < 0) {block_id -= 1;z += block_zsize;}
if(z >= block_zsize) {block_id += 1;z -= block_zsize;}
if(y < 0) {block_id -= 3;y += block_ysize;}
if(y >= block_ysize) {block_id += 3;y -= block_ysize;}
if(x < 0) {block_id -= 9;x += block_xsize;}
if(x >= block_xsize) {block_id += 9;x -= block_xsize;}
enum{xstride = block_ysize * block_zsize,
ystride = block_zsize,
zstride = 1};
const int entry = z + y * ystride + x * xstride;
a_out = reinterpret_cast<T*>((unsigned long)a_array + (unsigned long)b[block_id])[entry];
b_out = reinterpret_cast<T*>((unsigned long)b_array + (unsigned long)b[block_id])[entry];
}
template<typename T,class T_offset_ptr,int block_xsize,int block_ysize,int block_zsize>
__device__ void Lookup_Vector_3D(T out[3],const T* a[3],const T_offset_ptr b[27],int x,int y,int z)
{
int block_id = 13;
if(z < 0) {block_id -= 1;z += block_zsize;}
if(z >= block_zsize) {block_id += 1;z -= block_zsize;}
if(y < 0) {block_id -= 3;y += block_ysize;}
if(y >= block_ysize) {block_id += 3;y -= block_ysize;}
if(x < 0) {block_id -= 9;x += block_xsize;}
if(x >= block_xsize) {block_id += 9;x -= block_xsize;}
enum{xstride = block_ysize * block_zsize,
ystride = block_zsize,
zstride = 1};
const int entry = z + y * ystride + x * xstride;
out[0] = reinterpret_cast<T*>((unsigned long)a[0] + (unsigned long)b[block_id])[entry];
out[1] = reinterpret_cast<T*>((unsigned long)a[1] + (unsigned long)b[block_id])[entry];
out[2] = reinterpret_cast<T*>((unsigned long)a[2] + (unsigned long)b[block_id])[entry];
}
template<class T,class T_offset_ptr,int block_xsize,int block_ysize,int block_zsize>
__global__ void Linear_Elasticity_Kernel_3D()
{
enum{d = 3};
enum{nodes_per_cell = 1 << d};
//this kernel assumes we have more threads per block than entry per block
enum{DATABLOCK = block_xsize * block_ysize * block_zsize,
span = THREADBLOCK / DATABLOCK};
enum{xstride = block_ysize * block_zsize,
ystride = block_zsize,
zstride = 1};
enum{padded_node_xsize = block_xsize + 2,
padded_node_ysize = block_ysize + 2,
padded_node_zsize = block_zsize + 2,
padded_node_total_size = padded_node_xsize * padded_node_ysize * padded_node_zsize,
padded_cell_xsize = block_xsize + 1,
padded_cell_ysize = block_ysize + 1,
padded_cell_zsize = block_zsize + 1,
padded_cell_total_size = padded_cell_xsize * padded_cell_ysize * padded_cell_zsize};
Parameters& para = *reinterpret_cast<Parameters*>(p_device);
using T_BLOCK = const T (&)[block_xsize][block_ysize][block_zsize];
static_assert(span == 1,"Only span of 1 is supported");
static_assert(THREADBLOCK * 2 >= padded_node_total_size, "THREADBLOCK is too small!!");
static_assert(THREADBLOCK * 2 >= padded_cell_total_size, "THREADBLOCK is too small!!");
const unsigned int entry = threadIdx.x;
const int z = entry % block_zsize;
const int y = entry / block_zsize % block_ysize;
const int x = entry / block_zsize / block_ysize;
const int cell_z1 = (threadIdx.x % padded_cell_zsize);
const int cell_y1 = (threadIdx.x / padded_cell_zsize % padded_cell_ysize);
const int cell_x1 = (threadIdx.x / padded_cell_zsize / padded_cell_ysize);
const int cell_z2 = ((threadIdx.x + THREADBLOCK) % padded_cell_zsize);
const int cell_y2 = ((threadIdx.x + THREADBLOCK) / padded_cell_zsize % padded_cell_ysize);
const int cell_x2 = ((threadIdx.x + THREADBLOCK) / padded_cell_zsize / padded_cell_ysize);
const int node_z1 = (threadIdx.x % padded_node_zsize);
const int node_y1 = (threadIdx.x / padded_node_zsize % padded_node_ysize);
const int node_x1 = (threadIdx.x / padded_node_zsize / padded_node_ysize);
const int node_z2 = (threadIdx.x + THREADBLOCK) % padded_node_zsize;
const int node_y2 = (threadIdx.x + THREADBLOCK) / padded_node_zsize % padded_node_ysize;
const int node_x2 = (threadIdx.x + THREADBLOCK) / padded_node_zsize / padded_node_ysize;
__shared__ T_offset_ptr block_index[27];
if(threadIdx.x < 27)
block_index[threadIdx.x] = para.b[blockIdx.x * 27 + threadIdx.x];
using T_K = const T (&)[2][2][2][2][2][2][d][d];
T_K K_mu_converted = reinterpret_cast<T_K>(K_mu_device[0]);
T_K K_la_converted = reinterpret_cast<T_K>(K_la_device[0]);
if(blockIdx.x < para.number_of_blocks){
__syncthreads();
__shared__ T u_local[padded_node_xsize][padded_node_ysize][padded_node_zsize][d];
__shared__ T mu_local[padded_cell_xsize][padded_cell_ysize][padded_cell_zsize];
__shared__ T la_local[padded_cell_xsize][padded_cell_ysize][padded_cell_zsize];
Lookup_Two_Values_3D<T,T_offset_ptr,block_xsize,block_ysize,block_zsize>(mu_local[cell_x1][cell_y1][cell_z1],
la_local[cell_x1][cell_y1][cell_z1],
para.mu, para.lambda, block_index,
cell_x1 - 1, cell_y1 - 1, cell_z1 - 1);
if((threadIdx.x + THREADBLOCK) < padded_cell_total_size)
Lookup_Two_Values_3D<T,T_offset_ptr,block_xsize,block_ysize,block_zsize>(mu_local[cell_x2][cell_y2][cell_z2],
la_local[cell_x2][cell_y2][cell_z2],
para.mu, para.lambda, block_index,
cell_x2 - 1, cell_y2 - 1, cell_z2 - 1);
Lookup_Vector_3D<T,T_offset_ptr,block_xsize,block_ysize,block_zsize>(u_local[node_x1][node_y1][node_z1],
para.u, block_index,
node_x1 - 1, node_y1 - 1, node_z1 - 1);
if((threadIdx.x + THREADBLOCK) < padded_node_total_size)
Lookup_Vector_3D<T,T_offset_ptr,block_xsize,block_ysize,block_zsize>(u_local[node_x2][node_y2][node_z2],
para.u, block_index,
node_x2 - 1, node_y2 - 1, node_z2 - 1);
__syncthreads();
T f_node[d] = {0,0,0};
T K_tmp[9];
using T_MATRIX = const T (&)[d][d];
T_MATRIX m = reinterpret_cast<T_MATRIX>(K_tmp[0]);
// -1, -1, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y][z] * (&K_mu_converted[1][1][1][0][0][0][0][0])[i] +
la_local[x][y][z] * (&K_la_converted[1][1][1][0][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y][z][v];
// -1, -1, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y][z ] * (&K_mu_converted[1][1][1][0][0][1][0][0])[i] +
la_local[x][y][z ] * (&K_la_converted[1][1][1][0][0][1][0][0])[i] +
mu_local[x][y][z + 1] * (&K_mu_converted[1][1][0][0][0][0][0][0])[i] +
la_local[x][y][z + 1] * (&K_la_converted[1][1][0][0][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y][z + 1][v];
// -1, -1, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y][z + 1] * (&K_mu_converted[1][1][0][0][0][1][0][0])[i] +
la_local[x][y][z + 1] * (&K_la_converted[1][1][0][0][0][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y][z + 2][v];
// -1, 0, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y ][z] * (&K_mu_converted[1][1][1][0][1][0][0][0])[i] +
la_local[x][y ][z] * (&K_la_converted[1][1][1][0][1][0][0][0])[i] +
mu_local[x][y + 1][z] * (&K_mu_converted[1][0][1][0][0][0][0][0])[i] +
la_local[x][y + 1][z] * (&K_la_converted[1][0][1][0][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y + 1][z][v];
// -1, 0, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y ][z ] * (&K_mu_converted[1][1][1][0][1][1][0][0])[i] +
la_local[x][y ][z ] * (&K_la_converted[1][1][1][0][1][1][0][0])[i] +
mu_local[x][y ][z + 1] * (&K_mu_converted[1][1][0][0][1][0][0][0])[i] +
la_local[x][y ][z + 1] * (&K_la_converted[1][1][0][0][1][0][0][0])[i] +
mu_local[x][y + 1][z ] * (&K_mu_converted[1][0][1][0][0][1][0][0])[i] +
la_local[x][y + 1][z ] * (&K_la_converted[1][0][1][0][0][1][0][0])[i] +
mu_local[x][y + 1][z + 1] * (&K_mu_converted[1][0][0][0][0][0][0][0])[i] +
la_local[x][y + 1][z + 1] * (&K_la_converted[1][0][0][0][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y + 1][z + 1][v];
// -1, 0, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y ][z + 1] * (&K_mu_converted[1][1][0][0][1][1][0][0])[i] +
la_local[x][y ][z + 1] * (&K_la_converted[1][1][0][0][1][1][0][0])[i] +
mu_local[x][y + 1][z + 1] * (&K_mu_converted[1][0][0][0][0][1][0][0])[i] +
la_local[x][y + 1][z + 1] * (&K_la_converted[1][0][0][0][0][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y + 1][z + 2][v];
// -1, +1, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y + 1][z] * (&K_mu_converted[1][0][1][0][1][0][0][0])[i] +
la_local[x][y + 1][z] * (&K_la_converted[1][0][1][0][1][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y + 2][z][v];
// -1, +1, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y + 1][z ] * (&K_mu_converted[1][0][1][0][1][1][0][0])[i] +
la_local[x][y + 1][z ] * (&K_la_converted[1][0][1][0][1][1][0][0])[i] +
mu_local[x][y + 1][z + 1] * (&K_mu_converted[1][0][0][0][1][0][0][0])[i] +
la_local[x][y + 1][z + 1] * (&K_la_converted[1][0][0][0][1][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y + 2][z + 1][v];
// -1, +1, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x][y + 1][z + 1] * (&K_mu_converted[1][0][0][0][1][1][0][0])[i] +
la_local[x][y + 1][z + 1] * (&K_la_converted[1][0][0][0][1][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x][y + 2][z + 2][v];
// 0, -1, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y][z] * (&K_mu_converted[1][1][1][1][0][0][0][0])[i] +
la_local[x ][y][z] * (&K_la_converted[1][1][1][1][0][0][0][0])[i] +
mu_local[x + 1][y][z] * (&K_mu_converted[0][1][1][0][0][0][0][0])[i] +
la_local[x + 1][y][z] * (&K_la_converted[0][1][1][0][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y][z][v];
// 0, -1, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y][z ] * (&K_mu_converted[1][1][1][1][0][1][0][0])[i] +
la_local[x ][y][z ] * (&K_la_converted[1][1][1][1][0][1][0][0])[i] +
mu_local[x ][y][z + 1] * (&K_mu_converted[1][1][0][1][0][0][0][0])[i] +
la_local[x ][y][z + 1] * (&K_la_converted[1][1][0][1][0][0][0][0])[i] +
mu_local[x + 1][y][z ] * (&K_mu_converted[0][1][1][0][0][1][0][0])[i] +
la_local[x + 1][y][z ] * (&K_la_converted[0][1][1][0][0][1][0][0])[i] +
mu_local[x + 1][y][z + 1] * (&K_mu_converted[0][1][0][0][0][0][0][0])[i] +
la_local[x + 1][y][z + 1] * (&K_la_converted[0][1][0][0][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y][z + 1][v];
// 0, -1, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y][z + 1] * (&K_mu_converted[1][1][0][1][0][1][0][0])[i] +
la_local[x ][y][z + 1] * (&K_la_converted[1][1][0][1][0][1][0][0])[i] +
mu_local[x + 1][y][z + 1] * (&K_mu_converted[0][1][0][0][0][1][0][0])[i] +
la_local[x + 1][y][z + 1] * (&K_la_converted[0][1][0][0][0][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y][z + 2][v];
// 0, 0, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y ][z] * (&K_mu_converted[1][1][1][1][1][0][0][0])[i] +
la_local[x ][y ][z] * (&K_la_converted[1][1][1][1][1][0][0][0])[i] +
mu_local[x ][y + 1][z] * (&K_mu_converted[1][0][1][1][0][0][0][0])[i] +
la_local[x ][y + 1][z] * (&K_la_converted[1][0][1][1][0][0][0][0])[i] +
mu_local[x + 1][y ][z] * (&K_mu_converted[0][1][1][0][1][0][0][0])[i] +
la_local[x + 1][y ][z] * (&K_la_converted[0][1][1][0][1][0][0][0])[i] +
mu_local[x + 1][y + 1][z] * (&K_mu_converted[0][0][1][0][0][0][0][0])[i] +
la_local[x + 1][y + 1][z] * (&K_la_converted[0][0][1][0][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y + 1][z][v];
// 0, 0, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y ][z ] * (&K_mu_converted[1][1][1][1][1][1][0][0])[i] +
la_local[x ][y ][z ] * (&K_la_converted[1][1][1][1][1][1][0][0])[i] +
mu_local[x ][y ][z + 1] * (&K_mu_converted[1][1][0][1][1][0][0][0])[i] +
la_local[x ][y ][z + 1] * (&K_la_converted[1][1][0][1][1][0][0][0])[i] +
mu_local[x ][y + 1][z ] * (&K_mu_converted[1][0][1][1][0][1][0][0])[i] +
la_local[x ][y + 1][z ] * (&K_la_converted[1][0][1][1][0][1][0][0])[i] +
mu_local[x ][y + 1][z + 1] * (&K_mu_converted[1][0][0][1][0][0][0][0])[i] +
la_local[x ][y + 1][z + 1] * (&K_la_converted[1][0][0][1][0][0][0][0])[i] +
mu_local[x + 1][y ][z ] * (&K_mu_converted[0][1][1][0][1][1][0][0])[i] +
la_local[x + 1][y ][z ] * (&K_la_converted[0][1][1][0][1][1][0][0])[i] +
mu_local[x + 1][y ][z + 1] * (&K_mu_converted[0][1][0][0][1][0][0][0])[i] +
la_local[x + 1][y ][z + 1] * (&K_la_converted[0][1][0][0][1][0][0][0])[i] +
mu_local[x + 1][y + 1][z ] * (&K_mu_converted[0][0][1][0][0][1][0][0])[i] +
la_local[x + 1][y + 1][z ] * (&K_la_converted[0][0][1][0][0][1][0][0])[i] +
mu_local[x + 1][y + 1][z + 1] * (&K_mu_converted[0][0][0][0][0][0][0][0])[i] +
la_local[x + 1][y + 1][z + 1] * (&K_la_converted[0][0][0][0][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y + 1][z + 1][v];
// 0, 0, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y ][z + 1] * (&K_mu_converted[1][1][0][1][1][1][0][0])[i] +
la_local[x ][y ][z + 1] * (&K_la_converted[1][1][0][1][1][1][0][0])[i] +
mu_local[x ][y + 1][z + 1] * (&K_mu_converted[1][0][0][1][0][1][0][0])[i] +
la_local[x ][y + 1][z + 1] * (&K_la_converted[1][0][0][1][0][1][0][0])[i] +
mu_local[x + 1][y ][z + 1] * (&K_mu_converted[0][1][0][0][1][1][0][0])[i] +
la_local[x + 1][y ][z + 1] * (&K_la_converted[0][1][0][0][1][1][0][0])[i] +
mu_local[x + 1][y + 1][z + 1] * (&K_mu_converted[0][0][0][0][0][1][0][0])[i] +
la_local[x + 1][y + 1][z + 1] * (&K_la_converted[0][0][0][0][0][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y + 1][z + 2][v];
// 0, +1, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y + 1][z] * (&K_mu_converted[1][0][1][1][1][0][0][0])[i] +
la_local[x ][y + 1][z] * (&K_la_converted[1][0][1][1][1][0][0][0])[i] +
mu_local[x + 1][y + 1][z] * (&K_mu_converted[0][0][1][0][1][0][0][0])[i] +
la_local[x + 1][y + 1][z] * (&K_la_converted[0][0][1][0][1][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y + 2][z][v];
// 0, +1, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y + 1][z ] * (&K_mu_converted[1][0][1][1][1][1][0][0])[i] +
la_local[x ][y + 1][z ] * (&K_la_converted[1][0][1][1][1][1][0][0])[i] +
mu_local[x ][y + 1][z + 1] * (&K_mu_converted[1][0][0][1][1][0][0][0])[i] +
la_local[x ][y + 1][z + 1] * (&K_la_converted[1][0][0][1][1][0][0][0])[i] +
mu_local[x + 1][y + 1][z ] * (&K_mu_converted[0][0][1][0][1][1][0][0])[i] +
la_local[x + 1][y + 1][z ] * (&K_la_converted[0][0][1][0][1][1][0][0])[i] +
mu_local[x + 1][y + 1][z + 1] * (&K_mu_converted[0][0][0][0][1][0][0][0])[i] +
la_local[x + 1][y + 1][z + 1] * (&K_la_converted[0][0][0][0][1][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y + 2][z + 1][v];
// 0, +1, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x ][y + 1][z + 1] * (&K_mu_converted[1][0][0][1][1][1][0][0])[i] +
la_local[x ][y + 1][z + 1] * (&K_la_converted[1][0][0][1][1][1][0][0])[i] +
mu_local[x + 1][y + 1][z + 1] * (&K_mu_converted[0][0][0][0][1][1][0][0])[i] +
la_local[x + 1][y + 1][z + 1] * (&K_la_converted[0][0][0][0][1][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 1][y + 2][z + 2][v];
// +1, -1, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y][z] * (&K_mu_converted[0][1][1][1][0][0][0][0])[i] +
la_local[x + 1][y][z] * (&K_la_converted[0][1][1][1][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y][z][v];
// +1, -1, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y][z ] * (&K_mu_converted[0][1][1][1][0][1][0][0])[i] +
la_local[x + 1][y][z ] * (&K_la_converted[0][1][1][1][0][1][0][0])[i] +
mu_local[x + 1][y][z + 1] * (&K_mu_converted[0][1][0][1][0][0][0][0])[i] +
la_local[x + 1][y][z + 1] * (&K_la_converted[0][1][0][1][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y][z + 1][v];
// +1, -1, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y][z + 1] * (&K_mu_converted[0][1][0][1][0][1][0][0])[i] +
la_local[x + 1][y][z + 1] * (&K_la_converted[0][1][0][1][0][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y][z + 2][v];
// +1, 0, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y ][z] * (&K_mu_converted[0][1][1][1][1][0][0][0])[i] +
la_local[x + 1][y ][z] * (&K_la_converted[0][1][1][1][1][0][0][0])[i] +
mu_local[x + 1][y + 1][z] * (&K_mu_converted[0][0][1][1][0][0][0][0])[i] +
la_local[x + 1][y + 1][z] * (&K_la_converted[0][0][1][1][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y + 1][z][v];
// +1, 0, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y ][z ] * (&K_mu_converted[0][1][1][1][1][1][0][0])[i] +
la_local[x + 1][y ][z ] * (&K_la_converted[0][1][1][1][1][1][0][0])[i] +
mu_local[x + 1][y ][z + 1] * (&K_mu_converted[0][1][0][1][1][0][0][0])[i] +
la_local[x + 1][y ][z + 1] * (&K_la_converted[0][1][0][1][1][0][0][0])[i] +
mu_local[x + 1][y + 1][z ] * (&K_mu_converted[0][0][1][1][0][1][0][0])[i] +
la_local[x + 1][y + 1][z ] * (&K_la_converted[0][0][1][1][0][1][0][0])[i] +
mu_local[x + 1][y + 1][z + 1] * (&K_mu_converted[0][0][0][1][0][0][0][0])[i] +
la_local[x + 1][y + 1][z + 1] * (&K_la_converted[0][0][0][1][0][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y + 1][z + 1][v];
// +1, 0, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y ][z + 1] * (&K_mu_converted[0][1][0][1][1][1][0][0])[i] +
la_local[x + 1][y ][z + 1] * (&K_la_converted[0][1][0][1][1][1][0][0])[i] +
mu_local[x + 1][y + 1][z + 1] * (&K_mu_converted[0][0][0][1][0][1][0][0])[i] +
la_local[x + 1][y + 1][z + 1] * (&K_la_converted[0][0][0][1][0][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y + 1][z + 2][v];
// +1, +1, -1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y + 1][z] * (&K_mu_converted[0][0][1][1][1][0][0][0])[i] +
la_local[x + 1][y + 1][z] * (&K_la_converted[0][0][1][1][1][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y + 2][z][v];
// +1, +1, 0 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y + 1][z ] * (&K_mu_converted[0][0][1][1][1][1][0][0])[i] +
la_local[x + 1][y + 1][z ] * (&K_la_converted[0][0][1][1][1][1][0][0])[i] +
mu_local[x + 1][y + 1][z + 1] * (&K_mu_converted[0][0][0][1][1][0][0][0])[i] +
la_local[x + 1][y + 1][z + 1] * (&K_la_converted[0][0][0][1][1][0][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y + 2][z + 1][v];
// +1, +1, +1 stencil
for(int i = 0; i < 9;++i)
K_tmp[i] = mu_local[x + 1][y + 1][z + 1] * (&K_mu_converted[0][0][0][1][1][1][0][0])[i] +
la_local[x + 1][y + 1][z + 1] * (&K_la_converted[0][0][0][1][1][1][0][0])[i];
for(int w = 0; w < d;++w)
for(int v = 0; v < d;++v)
f_node[w] += m[w][v] * u_local[x + 2][y + 2][z + 2][v];
for(int v = 0;v < d;++v)
reinterpret_cast<T*>((unsigned long)para.f[v] + (unsigned long)block_index[13])[entry] = f_node[v] * para.dx;
}
}
//#####################################################################
// Constructor 3D
//#####################################################################
template <class T, int log2_struct,class T_offset_ptr>
Linear_Elasticity_CUDA_Optimized<T,log2_struct,3,T_offset_ptr>::Linear_Elasticity_CUDA_Optimized(T* const f_input[d],const T* const u_input[d],
const T* const mu_input,const T* const lambda_input,
const T_offset_ptr* const b_input,const int size_input,
const T dx_input)
:mu(mu_input),lambda(lambda_input),b(b_input),size(size_input),dx(dx_input)
{
for(int v=0;v<d;++v){f[v]=f_input[v];u[v]=u_input[v];}
}
//#####################################################################
// Function Run
//#####################################################################
template <class T,int log2_struct,class T_offset_ptr>
void Linear_Elasticity_CUDA_Optimized<T,log2_struct,3,T_offset_ptr>::Run()
{
if(!symbol_initialized){
cudaMemcpyToSymbol(K_mu_device,&K_mu[0][0][0][0],576*sizeof(float),0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(K_la_device,&K_lambda[0][0][0][0],576*sizeof(float),0,cudaMemcpyHostToDevice);
symbol_initialized = true;}
cudaDeviceSynchronize();
//std::cout << "Block size: " << block_xsize << " * " << block_ysize << " * " << block_xsize << std::endl;
int number_of_cuda_blocks = size;
if(number_of_cuda_blocks == 0) return;
Parameters p;
for(int v=0;v<d;++v){
p.f[v] = f[v];
p.u[v] = u[v];}
p.mu = mu;
p.lambda = lambda;
p.b = b;
p.number_of_blocks = size;
p.dx = dx;
cudaMemcpyToSymbol(p_device,(void*)&p,sizeof(Parameters),0,cudaMemcpyHostToDevice);
auto cudaerror = cudaGetLastError();
if(cudaSuccess!=cudaerror){
std::cerr<<"CUDA ERROR: "<<cudaGetErrorString(cudaerror)<<std::endl;abort();}
Linear_Elasticity_Kernel_3D<T,T_offset_ptr,block_xsize,block_ysize,block_zsize>
<<<number_of_cuda_blocks,THREADBLOCK,0>>>();
#if 0
cudaDeviceSynchronize();
cudaerror = cudaGetLastError();
if(cudaSuccess!=cudaerror){
std::cerr<<"CUDA ERROR: "<<cudaGetErrorString(cudaerror)<<std::endl;abort();}
#endif
}
//#####################################################################################################
template class Linear_Elasticity_CUDA_Optimized<float,3,3,unsigned int>;
|
2f21ba1a15f7362d483cdcc6d82c1f7c6a287793.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathReduce.hip"
#else
#include <c10/hip/HIPException.h>
#if !defined(THC_REAL_IS_BOOL)
void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<scalar_t>(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
dimension = at::maybe_wrap_dim(dimension, src);
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<scalar_t>::gt(value, scalar_cast<scalar_t>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions");
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
int64_t numel = THCTensor_(nElement)(state, data);
if (numel > 0) {
ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0);
dim3 grid( THTensor_sizeLegacyNoScalars(data, 0));
// NOTE: only with this specific number of threads can this work on GPUs with a warp size != 32 (such as AMD). Do not alter w/o changing buffer size in kernel.
dim3 threads(32);
hipLaunchKernelGGL(( THCTensor_kernel_renorm<scalar_t, accreal>)
, dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), THCTensor_(data)(state, data),
scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
C10_HIP_KERNEL_LAUNCH_CHECK();
}
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
#endif
#endif
#endif
| 2f21ba1a15f7362d483cdcc6d82c1f7c6a287793.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathReduce.cu"
#else
#include <c10/cuda/CUDAException.h>
#if !defined(THC_REAL_IS_BOOL)
void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<scalar_t>(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
dimension = at::maybe_wrap_dim(dimension, src);
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<scalar_t>::gt(value, scalar_cast<scalar_t>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions");
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
int64_t numel = THCTensor_(nElement)(state, data);
if (numel > 0) {
ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0);
dim3 grid( THTensor_sizeLegacyNoScalars(data, 0));
// NOTE: only with this specific number of threads can this work on GPUs with a warp size != 32 (such as AMD). Do not alter w/o changing buffer size in kernel.
dim3 threads(32);
THCTensor_kernel_renorm<scalar_t, accreal>
<<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(THCTensor_(data)(state, data),
scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
#endif
#endif
#endif
|
2c549c3eddbf023516729e8f2ff862669fe757d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace epilogue {
namespace thread {
using FunctorPlanarComplexF32F32 = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
float,
4,
float,
float>;
__global__ void epilogue_thread_functor_planar_complex_f32_f32(
float *output_ptr,
float const *accum_ptr,
float const *source_ptr,
typename FunctorPlanarComplexF32F32::Params params) {
FunctorPlanarComplexF32F32 linear_combination_op(params);
auto accum = *reinterpret_cast<cutlass::ArrayPlanarComplex<float , 4> const *>(accum_ptr);
auto source = *reinterpret_cast<cutlass::ArrayPlanarComplex<float, 4> const *>(source_ptr);
*reinterpret_cast<cutlass::ArrayPlanarComplex<float, 4>*>(output_ptr) = linear_combination_op(accum, source);
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination_planar_complex, f32) {
using Element = float;
using ElementOutput = float;
int const kCount = 4;
using Functor = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
ElementOutput,
kCount,
Element,
Element>;
cutlass::complex<Element> alpha(Element(2), Element(1));
cutlass::complex<Element> beta(Element(1), Element(-1));
typename Functor::Params params(alpha, beta);
Functor linear_combination_op(params);
cutlass::ArrayPlanarComplex<ElementOutput, kCount> source;
cutlass::ArrayPlanarComplex<Element, kCount> accum;
// Define arbitrary inputs
for (int i = 0; i < kCount; ++i) {
accum.real[i] = Element(i * 2);
accum.imag[i] = Element((i * 3 % 6) - 3);
source.real[i] = ElementOutput((i * 7 % 9) - 4);
source.imag[i] = ElementOutput(((i * 5 + 2) % 9) - 4);
}
cutlass::ArrayPlanarComplex<ElementOutput, kCount> destination = linear_combination_op(accum, source);
// Verify each result
for (int i = 0; i < kCount; ++i) {
cutlass::complex<Element> expected = alpha * cutlass::complex<Element>(accum.real[i], accum.imag[i]) +
beta * cutlass::complex<Element>(Element(source.real[i]), Element(source.imag[i]));
cutlass::complex<ElementOutput> got(destination.real[i], destination.imag[i]);
EXPECT_TRUE(ElementOutput(expected.real()) == got.real());
EXPECT_TRUE(ElementOutput(expected.imag()) == got.imag());
EXPECT_TRUE(expected.real() != Element(0) || expected.imag() != Element(0));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace epilogue {
namespace thread {
using FunctorPlanarComplexF16F32 = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
cutlass::half_t,
4,
float,
float>;
__global__ void epilogue_thread_functor_planar_complex_f16_f32(
cutlass::half_t *output_ptr,
float const *accum_ptr,
cutlass::half_t const *source_ptr,
typename FunctorPlanarComplexF16F32::Params params,
int N) {
FunctorPlanarComplexF16F32 linear_combination_op(params);
auto accum = *reinterpret_cast<cutlass::ArrayPlanarComplex<float , 4> const *>(accum_ptr);
auto source = *reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4> const *>(source_ptr);
#pragma unroll 1
for (int n = 0; n < N; ++n) {
source = linear_combination_op(accum, source);
}
*reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4>*>(output_ptr) = source;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination_planar_complex, f16_f32) {
using Element = float;
using ElementOutput = cutlass::half_t;
int const kCount = 4;
using Functor = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
ElementOutput,
kCount,
Element,
Element>;
cutlass::complex<Element> alpha(Element(2), Element(1));
cutlass::complex<Element> beta(Element(1), Element(-1));
typename Functor::Params params(alpha, beta);
Functor linear_combination_op(params);
cutlass::ArrayPlanarComplex<ElementOutput, kCount> source;
cutlass::ArrayPlanarComplex<Element, kCount> accum;
// Define arbitrary inputs
for (int i = 0; i < kCount; ++i) {
accum.real[i] = Element(i * 2);
accum.imag[i] = Element((i * 3 % 6) - 3);
source.real[i] = ElementOutput((i * 7 % 9) - 4);
source.imag[i] = ElementOutput(((i * 5 + 2) % 9) - 4);
}
cutlass::ArrayPlanarComplex<ElementOutput, kCount> destination = linear_combination_op(accum, source);
// Verify each result
for (int i = 0; i < kCount; ++i) {
cutlass::complex<Element> expected = alpha * cutlass::complex<Element>(accum.real[i], accum.imag[i]) +
beta * cutlass::complex<Element>(Element(source.real[i]), Element(source.imag[i]));
cutlass::complex<ElementOutput> got(destination.real[i], destination.imag[i]);
EXPECT_TRUE(ElementOutput(expected.real()) == got.real());
EXPECT_TRUE(ElementOutput(expected.imag()) == got.imag());
EXPECT_TRUE(expected.real() != Element(0) || expected.imag() != Element(0));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace epilogue {
namespace thread {
using FunctorPlanarComplexF16F16 = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
cutlass::half_t,
4,
cutlass::half_t,
cutlass::half_t>;
__global__ void epilogue_thread_functor_planar_complex_f16_f16(
cutlass::half_t *output_ptr,
cutlass::half_t const *accum_ptr,
cutlass::half_t const *source_ptr,
typename FunctorPlanarComplexF16F16::Params params,
int N) {
FunctorPlanarComplexF16F16 linear_combination_op(params);
auto accum = *reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4> const *>(accum_ptr);
auto source = *reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4> const *>(source_ptr);
#pragma unroll 1
for (int n = 0; n < N; ++n) {
source = linear_combination_op(accum, source);
}
*reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4>*>(output_ptr) = source;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination_planar_complex, f16_f16) {
using Element = cutlass::half_t;
using ElementOutput = cutlass::half_t;
int const kCount = 8;
using Functor = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
ElementOutput,
kCount,
Element,
Element>;
cutlass::complex<Element> alpha(Element(2), Element(1));
cutlass::complex<Element> beta(Element(1), Element(-1));
typename Functor::Params params(alpha, beta);
Functor linear_combination_op(params);
cutlass::ArrayPlanarComplex<ElementOutput, kCount> source;
cutlass::ArrayPlanarComplex<Element, kCount> accum;
// Define arbitrary inputs
for (int i = 0; i < kCount; ++i) {
accum.real[i] = Element(i * 2);
accum.imag[i] = Element((i * 3 % 6) - 3);
source.real[i] = ElementOutput((i * 7 % 9) - 4);
source.imag[i] = ElementOutput(((i * 5 + 2) % 9) - 4);
}
cutlass::ArrayPlanarComplex<ElementOutput, kCount> destination = linear_combination_op(accum, source);
// Verify each result
for (int i = 0; i < kCount; ++i) {
cutlass::complex<Element> expected = alpha * cutlass::complex<Element>(accum.real[i], accum.imag[i]) +
beta * cutlass::complex<Element>(Element(source.real[i]), Element(source.imag[i]));
cutlass::complex<ElementOutput> got(destination.real[i], destination.imag[i]);
EXPECT_TRUE(ElementOutput(expected.real()) == got.real());
EXPECT_TRUE(ElementOutput(expected.imag()) == got.imag());
EXPECT_TRUE(expected.real() != Element(0) || expected.imag() != Element(0));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 2c549c3eddbf023516729e8f2ff862669fe757d3.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace epilogue {
namespace thread {
using FunctorPlanarComplexF32F32 = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
float,
4,
float,
float>;
__global__ void epilogue_thread_functor_planar_complex_f32_f32(
float *output_ptr,
float const *accum_ptr,
float const *source_ptr,
typename FunctorPlanarComplexF32F32::Params params) {
FunctorPlanarComplexF32F32 linear_combination_op(params);
auto accum = *reinterpret_cast<cutlass::ArrayPlanarComplex<float , 4> const *>(accum_ptr);
auto source = *reinterpret_cast<cutlass::ArrayPlanarComplex<float, 4> const *>(source_ptr);
*reinterpret_cast<cutlass::ArrayPlanarComplex<float, 4>*>(output_ptr) = linear_combination_op(accum, source);
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination_planar_complex, f32) {
using Element = float;
using ElementOutput = float;
int const kCount = 4;
using Functor = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
ElementOutput,
kCount,
Element,
Element>;
cutlass::complex<Element> alpha(Element(2), Element(1));
cutlass::complex<Element> beta(Element(1), Element(-1));
typename Functor::Params params(alpha, beta);
Functor linear_combination_op(params);
cutlass::ArrayPlanarComplex<ElementOutput, kCount> source;
cutlass::ArrayPlanarComplex<Element, kCount> accum;
// Define arbitrary inputs
for (int i = 0; i < kCount; ++i) {
accum.real[i] = Element(i * 2);
accum.imag[i] = Element((i * 3 % 6) - 3);
source.real[i] = ElementOutput((i * 7 % 9) - 4);
source.imag[i] = ElementOutput(((i * 5 + 2) % 9) - 4);
}
cutlass::ArrayPlanarComplex<ElementOutput, kCount> destination = linear_combination_op(accum, source);
// Verify each result
for (int i = 0; i < kCount; ++i) {
cutlass::complex<Element> expected = alpha * cutlass::complex<Element>(accum.real[i], accum.imag[i]) +
beta * cutlass::complex<Element>(Element(source.real[i]), Element(source.imag[i]));
cutlass::complex<ElementOutput> got(destination.real[i], destination.imag[i]);
EXPECT_TRUE(ElementOutput(expected.real()) == got.real());
EXPECT_TRUE(ElementOutput(expected.imag()) == got.imag());
EXPECT_TRUE(expected.real() != Element(0) || expected.imag() != Element(0));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace epilogue {
namespace thread {
using FunctorPlanarComplexF16F32 = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
cutlass::half_t,
4,
float,
float>;
__global__ void epilogue_thread_functor_planar_complex_f16_f32(
cutlass::half_t *output_ptr,
float const *accum_ptr,
cutlass::half_t const *source_ptr,
typename FunctorPlanarComplexF16F32::Params params,
int N) {
FunctorPlanarComplexF16F32 linear_combination_op(params);
auto accum = *reinterpret_cast<cutlass::ArrayPlanarComplex<float , 4> const *>(accum_ptr);
auto source = *reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4> const *>(source_ptr);
#pragma unroll 1
for (int n = 0; n < N; ++n) {
source = linear_combination_op(accum, source);
}
*reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4>*>(output_ptr) = source;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination_planar_complex, f16_f32) {
using Element = float;
using ElementOutput = cutlass::half_t;
int const kCount = 4;
using Functor = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
ElementOutput,
kCount,
Element,
Element>;
cutlass::complex<Element> alpha(Element(2), Element(1));
cutlass::complex<Element> beta(Element(1), Element(-1));
typename Functor::Params params(alpha, beta);
Functor linear_combination_op(params);
cutlass::ArrayPlanarComplex<ElementOutput, kCount> source;
cutlass::ArrayPlanarComplex<Element, kCount> accum;
// Define arbitrary inputs
for (int i = 0; i < kCount; ++i) {
accum.real[i] = Element(i * 2);
accum.imag[i] = Element((i * 3 % 6) - 3);
source.real[i] = ElementOutput((i * 7 % 9) - 4);
source.imag[i] = ElementOutput(((i * 5 + 2) % 9) - 4);
}
cutlass::ArrayPlanarComplex<ElementOutput, kCount> destination = linear_combination_op(accum, source);
// Verify each result
for (int i = 0; i < kCount; ++i) {
cutlass::complex<Element> expected = alpha * cutlass::complex<Element>(accum.real[i], accum.imag[i]) +
beta * cutlass::complex<Element>(Element(source.real[i]), Element(source.imag[i]));
cutlass::complex<ElementOutput> got(destination.real[i], destination.imag[i]);
EXPECT_TRUE(ElementOutput(expected.real()) == got.real());
EXPECT_TRUE(ElementOutput(expected.imag()) == got.imag());
EXPECT_TRUE(expected.real() != Element(0) || expected.imag() != Element(0));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace epilogue {
namespace thread {
using FunctorPlanarComplexF16F16 = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
cutlass::half_t,
4,
cutlass::half_t,
cutlass::half_t>;
__global__ void epilogue_thread_functor_planar_complex_f16_f16(
cutlass::half_t *output_ptr,
cutlass::half_t const *accum_ptr,
cutlass::half_t const *source_ptr,
typename FunctorPlanarComplexF16F16::Params params,
int N) {
FunctorPlanarComplexF16F16 linear_combination_op(params);
auto accum = *reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4> const *>(accum_ptr);
auto source = *reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4> const *>(source_ptr);
#pragma unroll 1
for (int n = 0; n < N; ++n) {
source = linear_combination_op(accum, source);
}
*reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4>*>(output_ptr) = source;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination_planar_complex, f16_f16) {
using Element = cutlass::half_t;
using ElementOutput = cutlass::half_t;
int const kCount = 8;
using Functor = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
ElementOutput,
kCount,
Element,
Element>;
cutlass::complex<Element> alpha(Element(2), Element(1));
cutlass::complex<Element> beta(Element(1), Element(-1));
typename Functor::Params params(alpha, beta);
Functor linear_combination_op(params);
cutlass::ArrayPlanarComplex<ElementOutput, kCount> source;
cutlass::ArrayPlanarComplex<Element, kCount> accum;
// Define arbitrary inputs
for (int i = 0; i < kCount; ++i) {
accum.real[i] = Element(i * 2);
accum.imag[i] = Element((i * 3 % 6) - 3);
source.real[i] = ElementOutput((i * 7 % 9) - 4);
source.imag[i] = ElementOutput(((i * 5 + 2) % 9) - 4);
}
cutlass::ArrayPlanarComplex<ElementOutput, kCount> destination = linear_combination_op(accum, source);
// Verify each result
for (int i = 0; i < kCount; ++i) {
cutlass::complex<Element> expected = alpha * cutlass::complex<Element>(accum.real[i], accum.imag[i]) +
beta * cutlass::complex<Element>(Element(source.real[i]), Element(source.imag[i]));
cutlass::complex<ElementOutput> got(destination.real[i], destination.imag[i]);
EXPECT_TRUE(ElementOutput(expected.real()) == got.real());
EXPECT_TRUE(ElementOutput(expected.imag()) == got.imag());
EXPECT_TRUE(expected.real() != Element(0) || expected.imag() != Element(0));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
ae4f212a09d832d333baaeb9bc8acc98cade76a9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <string>
#include <iostream>
#include <time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include "iterator_kernel.cuh"
#include "image_output.h"
#include "text_display.cuh"
using namespace std;
int main(int argc, char** argv)
{
uint2 res = { 1028u * 4, 1028u * 4 };
unsigned char* M;
int* blackDepth;
int* nextMaxIter;
int tempBlackDepth;
hipMallocManaged(&blackDepth, sizeof(int));
hipMallocManaged(&nextMaxIter, sizeof(int));
hipMallocManaged(&M, res.x * res.y * sizeof(unsigned char));
*nextMaxIter = 255; // Sets the initial iteration cap to 255
// Run kernel on whole image
//dim3 blockSize(32, 8);
//dim3 numBlocks((xRes + blockSize.x - 1) / blockSize.x, (yRes + blockSize.y - 1) / blockSize.y);
int blockSize = 256;
int numBlocks = 256;
double2 zoomCenter = { -0.7999075073, -0.16746163399 };
double2 zoomBounds = { 1.5, 0.000002 }; // vertical radius at start of zoom, vertical radius at end of zoom
double aspectRatio = 1.; // horizontal radius / vertical radius
double radius = zoomBounds.x;
int animLength = 400;
double shrinkFactor = pow(zoomBounds.y / zoomBounds.x, 1. / animLength); // Factor by which the vertical radius of the image decreases each frame
for (int i = 0; i < animLength; i++) {
double4 bounds = { zoomCenter.x - radius* aspectRatio, zoomCenter.x + radius* aspectRatio,
zoomCenter.y - radius, zoomCenter.y + radius }; // xmin, xmax, ymin, ymax
radius *= shrinkFactor;
printf("Frame number %d , Max iteration cap:%d ", i, *nextMaxIter);
tempBlackDepth = *blackDepth;
*blackDepth = 10000000;
std::cout << shrinkFactor;
// Launches the kernel
mandleBrot << <numBlocks, blockSize >> > (res, bounds, tempBlackDepth, *nextMaxIter * 1.05, M, blackDepth, nextMaxIter);
hipDeviceSynchronize();
// File and console output
writePGM(res.x, res.y, (char*)M, ".\\renders\\new_" + to_string(i));
displayGreyscale(M, res.x, res.y, 4*40, 4 * 20);
}
// Free device memory
hipFree(M);
hipFree(blackDepth);
hipFree(nextMaxIter);
return 0;
} | ae4f212a09d832d333baaeb9bc8acc98cade76a9.cu | #include <stdio.h>
#include <math.h>
#include <string>
#include <iostream>
#include <time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include "iterator_kernel.cuh"
#include "image_output.h"
#include "text_display.cuh"
using namespace std;
int main(int argc, char** argv)
{
uint2 res = { 1028u * 4, 1028u * 4 };
unsigned char* M;
int* blackDepth;
int* nextMaxIter;
int tempBlackDepth;
cudaMallocManaged(&blackDepth, sizeof(int));
cudaMallocManaged(&nextMaxIter, sizeof(int));
cudaMallocManaged(&M, res.x * res.y * sizeof(unsigned char));
*nextMaxIter = 255; // Sets the initial iteration cap to 255
// Run kernel on whole image
//dim3 blockSize(32, 8);
//dim3 numBlocks((xRes + blockSize.x - 1) / blockSize.x, (yRes + blockSize.y - 1) / blockSize.y);
int blockSize = 256;
int numBlocks = 256;
double2 zoomCenter = { -0.7999075073, -0.16746163399 };
double2 zoomBounds = { 1.5, 0.000002 }; // vertical radius at start of zoom, vertical radius at end of zoom
double aspectRatio = 1.; // horizontal radius / vertical radius
double radius = zoomBounds.x;
int animLength = 400;
double shrinkFactor = pow(zoomBounds.y / zoomBounds.x, 1. / animLength); // Factor by which the vertical radius of the image decreases each frame
for (int i = 0; i < animLength; i++) {
double4 bounds = { zoomCenter.x - radius* aspectRatio, zoomCenter.x + radius* aspectRatio,
zoomCenter.y - radius, zoomCenter.y + radius }; // xmin, xmax, ymin, ymax
radius *= shrinkFactor;
printf("Frame number %d , Max iteration cap:%d ", i, *nextMaxIter);
tempBlackDepth = *blackDepth;
*blackDepth = 10000000;
std::cout << shrinkFactor;
// Launches the kernel
mandleBrot << <numBlocks, blockSize >> > (res, bounds, tempBlackDepth, *nextMaxIter * 1.05, M, blackDepth, nextMaxIter);
cudaDeviceSynchronize();
// File and console output
writePGM(res.x, res.y, (char*)M, ".\\renders\\new_" + to_string(i));
displayGreyscale(M, res.x, res.y, 4*40, 4 * 20);
}
// Free device memory
cudaFree(M);
cudaFree(blackDepth);
cudaFree(nextMaxIter);
return 0;
} |
77b3429db6260cbbe95c9a6233ee42183e5a5f62.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void gt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "gt_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a > b;
});
});
}
REGISTER_DISPATCH(gt_stub, >_kernel_cuda);
}} // namespace at::native
| 77b3429db6260cbbe95c9a6233ee42183e5a5f62.cu | #include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void gt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "gt_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a > b;
});
});
}
REGISTER_DISPATCH(gt_stub, >_kernel_cuda);
}} // namespace at::native
|
44aea5e179efebac9838ffc900d16eff0364438d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathReduce.hip"
#else
#if !defined(THC_REAL_IS_BOOL)
void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<scalar_t>(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
dimension = at::maybe_wrap_dim(dimension, src);
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<scalar_t>::gt(value, scalar_cast<scalar_t>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions");
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
int64_t numel = THCTensor_(nElement)(state, data);
if (numel > 0) {
ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0);
dim3 grid( THTensor_sizeLegacyNoScalars(data, 0));
// NOTE: only with this specific number of threads can this work on GPUs with a warp size != 32 (such as AMD). Do not alter w/o changing buffer size in kernel.
dim3 threads(32);
hipLaunchKernelGGL(( THCTensor_kernel_renorm<scalar_t, accreal>)
, dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
}
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
accreal THCTensor_(std_all)(THCState *state, THCTensor *self, bool unbiased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(var_all)(state, self, unbiased)));
}
accreal THCTensor_(var_all)(THCState *state, THCTensor *self, bool unbiased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THTensor_wrap(self).mean().item<accreal>();
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
SquareFunctor<accreal>(mean),
ReduceAdd<accreal>(),
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
scalar_cast<accreal>(std::max<int64_t>(0, THCTensor_(nElement)(state, self) - (unbiased ? 1 : 0)))
);
THCudaCheck(hipGetLastError());
return val;
}
#endif
#endif
#endif
| 44aea5e179efebac9838ffc900d16eff0364438d.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathReduce.cu"
#else
#if !defined(THC_REAL_IS_BOOL)
void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<scalar_t>(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
dimension = at::maybe_wrap_dim(dimension, src);
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<scalar_t>::gt(value, scalar_cast<scalar_t>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions");
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
int64_t numel = THCTensor_(nElement)(state, data);
if (numel > 0) {
ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0);
dim3 grid( THTensor_sizeLegacyNoScalars(data, 0));
// NOTE: only with this specific number of threads can this work on GPUs with a warp size != 32 (such as AMD). Do not alter w/o changing buffer size in kernel.
dim3 threads(32);
THCTensor_kernel_renorm<scalar_t, accreal>
<<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>
(THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
cudaError_t errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
}
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
accreal THCTensor_(std_all)(THCState *state, THCTensor *self, bool unbiased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(var_all)(state, self, unbiased)));
}
accreal THCTensor_(var_all)(THCState *state, THCTensor *self, bool unbiased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THTensor_wrap(self).mean().item<accreal>();
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
SquareFunctor<accreal>(mean),
ReduceAdd<accreal>(),
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
scalar_cast<accreal>(std::max<int64_t>(0, THCTensor_(nElement)(state, self) - (unbiased ? 1 : 0)))
);
THCudaCheck(cudaGetLastError());
return val;
}
#endif
#endif
#endif
|
35511d69fdac939c980084d1936a91279ea042b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void ComputeHistogramKernel( float *globalMemData, int *globalHist )
{
//the kernel should be only 1D
int globalThreadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
int localThreadId = threadIdx.x;
extern __shared__ int partialHist[];
if(localThreadId < D_BINS)
{
//set the partial histogram in shared memory to zero
partialHist[localThreadId] = 0;
}
__syncthreads();
//if the global thread id is within bounds of the data array size
if(globalThreadId < D_MEMORY_BLOCK_SIZE)
{
//copy the global data to local memory
float myLocalDataValue = globalMemData[globalThreadId];
int binIdToWrite = 0 + (D_BINS - 1) * (myLocalDataValue > D_MAX_VALUE);
//if the local value is within limits
if(myLocalDataValue >= D_MIN_VALUE && myLocalDataValue <= D_MAX_VALUE)
{
float biasedValue = myLocalDataValue - D_MIN_VALUE;
binIdToWrite = (int)floor((double)(biasedValue/D_BIN_VALUE_WIDTH)) + 1;
if(myLocalDataValue == D_MAX_VALUE)
{
binIdToWrite = D_BINS - 2;
}
}
//write to local histogram
atomicAdd( &(partialHist[binIdToWrite]), 1);
__syncthreads();
if(localThreadId < D_BINS)
{
//copy values to global histogam
atomicAdd( &(globalHist[localThreadId]), partialHist[localThreadId]);
}
}
} | 35511d69fdac939c980084d1936a91279ea042b8.cu | #include "includes.h"
__global__ void ComputeHistogramKernel( float *globalMemData, int *globalHist )
{
//the kernel should be only 1D
int globalThreadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
int localThreadId = threadIdx.x;
extern __shared__ int partialHist[];
if(localThreadId < D_BINS)
{
//set the partial histogram in shared memory to zero
partialHist[localThreadId] = 0;
}
__syncthreads();
//if the global thread id is within bounds of the data array size
if(globalThreadId < D_MEMORY_BLOCK_SIZE)
{
//copy the global data to local memory
float myLocalDataValue = globalMemData[globalThreadId];
int binIdToWrite = 0 + (D_BINS - 1) * (myLocalDataValue > D_MAX_VALUE);
//if the local value is within limits
if(myLocalDataValue >= D_MIN_VALUE && myLocalDataValue <= D_MAX_VALUE)
{
float biasedValue = myLocalDataValue - D_MIN_VALUE;
binIdToWrite = (int)floor((double)(biasedValue/D_BIN_VALUE_WIDTH)) + 1;
if(myLocalDataValue == D_MAX_VALUE)
{
binIdToWrite = D_BINS - 2;
}
}
//write to local histogram
atomicAdd( &(partialHist[binIdToWrite]), 1);
__syncthreads();
if(localThreadId < D_BINS)
{
//copy values to global histogam
atomicAdd( &(globalHist[localThreadId]), partialHist[localThreadId]);
}
}
} |
5330723c5b14f3aa30757ec39bba6a8bcc9bd0fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// MIT License
//
// Copyright (c) 2017 Advanced Micro Devices, Inc. All Rights Reserved.
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use, copy,
// modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
// BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
// ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include <stdio.h>
#include <hip/hip_host_runtime_api.h>
#define N 3
#define M 3
#define P 3
__global__
void matrixMul(int *matrixA, int *matrixB, int *matrixC,
int ARows, int ACols, int BCols )
{
int i = blockIdx.x;
int j = blockIdx.y;
if (i < ARows && j < BCols)
{
int value = 0;
for (int k = 0; k < ACols; ++k)
{
value += matrixA[i*ACols+k] * matrixB[k*BCols+j];
}
matrixC[i*BCols+j] = value;
}
}
void printMatrix(int *matrix, int Rows, int Cols)
{
for (int i = 0; i < Rows; ++i)
{
printf("\n[");
bool first = true;
for (int j = 0; j < Cols; ++j)
{
if (first)
{
printf("%d", matrix[i*Cols+j]);
first = false;
}
else
{
printf(", %d", matrix[i*Cols+j]);
}
}
printf("]");
}
}
void printHipError(hipError_t error)
{
printf("Hip Error: %s\n", hipGetErrorString(error));
}
void randomizeMatrix(int *matrix, int Rows, int Cols)
{
for (int i = 0; i < Rows*Cols; ++i)
matrix[i] = rand() % 10;
}
void clearMatrix(int *matrix, int Rows, int Cols )
{
for (int i = 0; i < Rows*Cols; ++i)
matrix[i] = 0;
}
bool hipCallSuccessful(hipError_t error)
{
if (error != hipSuccess)
printHipError(error);
return error == hipSuccess;
}
bool deviceCanCompute(int deviceID)
{
bool canCompute = false;
hipDeviceProp_t deviceProp;
bool devicePropIsAvailable =
hipCallSuccessful(hipGetDeviceProperties(&deviceProp, deviceID));
if (devicePropIsAvailable)
{
canCompute = deviceProp.computeMode != hipComputeModeProhibited;
if (!canCompute)
printf("Compute mode is prohibited\n");
}
return canCompute;
}
bool deviceIsAvailable(int *deviceID)
{
return hipCallSuccessful(hipGetDevice(deviceID));
}
// We always use device 0
bool haveComputeDevice()
{
int deviceID = 0;
return deviceIsAvailable(&deviceID) && deviceCanCompute(deviceID);
}
int main()
{
int hostSrcMatA[N*M];
int hostSrcMatB[M*P];
int hostDstMat[N*P];
if (!haveComputeDevice())
{
printf("No compute device available\n");
return 0;
}
randomizeMatrix(hostSrcMatA, N, M);
randomizeMatrix(hostSrcMatB, M, P);
clearMatrix(hostDstMat, N, P);
printf("A: ");
printMatrix(hostSrcMatA, N, M);
printf("\nB: ");
printMatrix(hostSrcMatB, M ,P);
printf("\n");
int *deviceSrcMatA = NULL;
int *deviceSrcMatB = NULL;
int *deviceDstMat = NULL;
bool matrixAAllocated =
hipCallSuccessful(hipMalloc((void **)&deviceSrcMatA, N*M*sizeof(int)));
bool matrixBAllocated =
hipCallSuccessful(hipMalloc((void **)&deviceSrcMatB, M*P*sizeof(int)));
bool matrixCAllocated =
hipCallSuccessful(hipMalloc((void **)&deviceDstMat, N*P*sizeof(int)));
if (matrixAAllocated && matrixBAllocated && matrixCAllocated)
{
bool copiedSrcMatA =
hipCallSuccessful(hipMemcpy(deviceSrcMatA, hostSrcMatA,
N*M*sizeof(int),
hipMemcpyHostToDevice));
bool copiedSrcMatB =
hipCallSuccessful(hipMemcpy(deviceSrcMatB, hostSrcMatB,
M*P*sizeof(int),
hipMemcpyHostToDevice));
if (copiedSrcMatA && copiedSrcMatB)
{
dim3 dimGrid(N,P);
hipLaunchKernelGGL(( matrixMul), dim3(dimGrid), dim3(1), 0, 0, deviceSrcMatA, deviceSrcMatB, deviceDstMat,
N, M, P);
if (hipCallSuccessful(hipMemcpy(hostDstMat,
deviceDstMat,
N*P*sizeof(int),
hipMemcpyDeviceToHost)))
{
printf("Mul: ");
printMatrix(hostDstMat, N, P);
printf("\n");
}
else
{
printf("Unable to copy memory from device to host\n");
}
}
}
if (matrixAAllocated)
hipFree(deviceSrcMatA);
if (matrixBAllocated)
hipFree(deviceSrcMatB);
if (matrixCAllocated)
hipFree(deviceDstMat);
return 0;
}
| 5330723c5b14f3aa30757ec39bba6a8bcc9bd0fc.cu | // MIT License
//
// Copyright (c) 2017 Advanced Micro Devices, Inc. All Rights Reserved.
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use, copy,
// modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
// BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
// ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include <stdio.h>
#include <hip/hip_host_runtime_api.h>
#define N 3
#define M 3
#define P 3
__global__
void matrixMul(int *matrixA, int *matrixB, int *matrixC,
int ARows, int ACols, int BCols )
{
int i = blockIdx.x;
int j = blockIdx.y;
if (i < ARows && j < BCols)
{
int value = 0;
for (int k = 0; k < ACols; ++k)
{
value += matrixA[i*ACols+k] * matrixB[k*BCols+j];
}
matrixC[i*BCols+j] = value;
}
}
void printMatrix(int *matrix, int Rows, int Cols)
{
for (int i = 0; i < Rows; ++i)
{
printf("\n[");
bool first = true;
for (int j = 0; j < Cols; ++j)
{
if (first)
{
printf("%d", matrix[i*Cols+j]);
first = false;
}
else
{
printf(", %d", matrix[i*Cols+j]);
}
}
printf("]");
}
}
void printHipError(hipError_t error)
{
printf("Hip Error: %s\n", hipGetErrorString(error));
}
void randomizeMatrix(int *matrix, int Rows, int Cols)
{
for (int i = 0; i < Rows*Cols; ++i)
matrix[i] = rand() % 10;
}
void clearMatrix(int *matrix, int Rows, int Cols )
{
for (int i = 0; i < Rows*Cols; ++i)
matrix[i] = 0;
}
bool hipCallSuccessful(hipError_t error)
{
if (error != hipSuccess)
printHipError(error);
return error == hipSuccess;
}
bool deviceCanCompute(int deviceID)
{
bool canCompute = false;
hipDeviceProp_t deviceProp;
bool devicePropIsAvailable =
hipCallSuccessful(hipGetDeviceProperties(&deviceProp, deviceID));
if (devicePropIsAvailable)
{
canCompute = deviceProp.computeMode != hipComputeModeProhibited;
if (!canCompute)
printf("Compute mode is prohibited\n");
}
return canCompute;
}
bool deviceIsAvailable(int *deviceID)
{
return hipCallSuccessful(hipGetDevice(deviceID));
}
// We always use device 0
bool haveComputeDevice()
{
int deviceID = 0;
return deviceIsAvailable(&deviceID) && deviceCanCompute(deviceID);
}
int main()
{
int hostSrcMatA[N*M];
int hostSrcMatB[M*P];
int hostDstMat[N*P];
if (!haveComputeDevice())
{
printf("No compute device available\n");
return 0;
}
randomizeMatrix(hostSrcMatA, N, M);
randomizeMatrix(hostSrcMatB, M, P);
clearMatrix(hostDstMat, N, P);
printf("A: ");
printMatrix(hostSrcMatA, N, M);
printf("\nB: ");
printMatrix(hostSrcMatB, M ,P);
printf("\n");
int *deviceSrcMatA = NULL;
int *deviceSrcMatB = NULL;
int *deviceDstMat = NULL;
bool matrixAAllocated =
hipCallSuccessful(hipMalloc((void **)&deviceSrcMatA, N*M*sizeof(int)));
bool matrixBAllocated =
hipCallSuccessful(hipMalloc((void **)&deviceSrcMatB, M*P*sizeof(int)));
bool matrixCAllocated =
hipCallSuccessful(hipMalloc((void **)&deviceDstMat, N*P*sizeof(int)));
if (matrixAAllocated && matrixBAllocated && matrixCAllocated)
{
bool copiedSrcMatA =
hipCallSuccessful(hipMemcpy(deviceSrcMatA, hostSrcMatA,
N*M*sizeof(int),
hipMemcpyHostToDevice));
bool copiedSrcMatB =
hipCallSuccessful(hipMemcpy(deviceSrcMatB, hostSrcMatB,
M*P*sizeof(int),
hipMemcpyHostToDevice));
if (copiedSrcMatA && copiedSrcMatB)
{
dim3 dimGrid(N,P);
matrixMul<<<dimGrid, 1>>>(deviceSrcMatA, deviceSrcMatB, deviceDstMat,
N, M, P);
if (hipCallSuccessful(hipMemcpy(hostDstMat,
deviceDstMat,
N*P*sizeof(int),
hipMemcpyDeviceToHost)))
{
printf("Mul: ");
printMatrix(hostDstMat, N, P);
printf("\n");
}
else
{
printf("Unable to copy memory from device to host\n");
}
}
}
if (matrixAAllocated)
hipFree(deviceSrcMatA);
if (matrixBAllocated)
hipFree(deviceSrcMatB);
if (matrixCAllocated)
hipFree(deviceDstMat);
return 0;
}
|
57a2d9214c8beb9c9ce982f7538020fab4ad2904.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#define MAX(a, b) (((a) > (b)) ? (a): (b))
#define MIN(a, b) (((a) < (b)) ? (a): (b))
#define BLOCK_DIM 32
#define FILTER_SIZE 9
#include "utils.h"
#include <algorithm>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
if ( idx >= numCols || idy >= numRows )
{
return;
}
float sum = 0.0;
for (int filter_y = -filterWidth/2; filter_y <= filterWidth/2; ++filter_y) {
for (int filter_x = -filterWidth/2; filter_x <= filterWidth/2; ++filter_x) {
int image_vert_clamp = MIN(MAX(filter_y + idy, 0), static_cast<int>(numRows - 1));
int image_horiz_clamp = MIN(MAX(filter_x + idx, 0), static_cast<int>(numCols - 1));
float img_value = static_cast<float>(inputChannel[image_vert_clamp * numCols + image_horiz_clamp]);
float filter_value = filter[(filterWidth/2 + filter_y)*filterWidth + (filterWidth/2 + filter_x)];
sum += img_value * filter_value;
}
}
outputChannel[idy * numCols + idx] = sum;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
if ( idx >= numCols || idy >= numRows )
{
return;
}
uchar4 pixel = inputImageRGBA[idy * numCols + idx];
redChannel[idy * numCols + idx] = pixel.x;
greenChannel[idy * numCols + idx] = pixel.y;
blueChannel[idy * numCols + idx] = pixel.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(32,32);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols / blockSize.x + 1, numRows / blockSize.y + 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_redBlurred, d_greenBlurred, d_blueBlurred);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_greenBlurred, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blueBlurred, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
| 57a2d9214c8beb9c9ce982f7538020fab4ad2904.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#define MAX(a, b) (((a) > (b)) ? (a): (b))
#define MIN(a, b) (((a) < (b)) ? (a): (b))
#define BLOCK_DIM 32
#define FILTER_SIZE 9
#include "utils.h"
#include <algorithm>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
if ( idx >= numCols || idy >= numRows )
{
return;
}
float sum = 0.0;
for (int filter_y = -filterWidth/2; filter_y <= filterWidth/2; ++filter_y) {
for (int filter_x = -filterWidth/2; filter_x <= filterWidth/2; ++filter_x) {
int image_vert_clamp = MIN(MAX(filter_y + idy, 0), static_cast<int>(numRows - 1));
int image_horiz_clamp = MIN(MAX(filter_x + idx, 0), static_cast<int>(numCols - 1));
float img_value = static_cast<float>(inputChannel[image_vert_clamp * numCols + image_horiz_clamp]);
float filter_value = filter[(filterWidth/2 + filter_y)*filterWidth + (filterWidth/2 + filter_x)];
sum += img_value * filter_value;
}
}
outputChannel[idy * numCols + idx] = sum;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
if ( idx >= numCols || idy >= numRows )
{
return;
}
uchar4 pixel = inputImageRGBA[idy * numCols + idx];
redChannel[idy * numCols + idx] = pixel.x;
greenChannel[idy * numCols + idx] = pixel.y;
blueChannel[idy * numCols + idx] = pixel.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(32,32);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols / blockSize.x + 1, numRows / blockSize.y + 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_redBlurred, d_greenBlurred, d_blueBlurred);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_redBlurred, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_greenBlurred, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blueBlurred, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
fd0cc51c19e3d8d58ebf9f88a54c90ecab1a3ca7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Equihash CUDA solver
// Copyright (c) 2016 John Tromp
#define XINTREE
#define UNROLL
#define htole32(x) (x)
#define HAVE_DECL_HTOLE32 1
#include "../cpu_tromp/equi.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <functional>
#include <vector>
#include <iostream>
#include "eqcuda.hpp"
#include "blake2b.cu"
typedef uint16_t u16;
typedef uint64_t u64;
#ifndef RESTBITS
#define RESTBITS 4
#endif
// 2_log of number of buckets
#define BUCKBITS (DIGITBITS-RESTBITS)
#ifndef SAVEMEM
#if RESTBITS == 4
// can't save memory in such small buckets
#define SAVEMEM 1
#elif RESTBITS >= 8
// take advantage of law of large numbers (sum of 2^8 random numbers)
// this reduces (200,9) memory to under 144MB, with negligible discarding
#define SAVEMEM 9/14
#endif
#endif
// number of buckets
static const u32 NBUCKETS = 1 << BUCKBITS;
// bucket mask
static const u32 BUCKMASK = NBUCKETS - 1;
// 2_log of number of slots per bucket
static const u32 SLOTBITS = RESTBITS + 1 + 1;
static const u32 SLOTRANGE = 1 << SLOTBITS;
// number of slots per bucket
static const u32 NSLOTS = SLOTRANGE * SAVEMEM;
// SLOTBITS mask
static const u32 SLOTMASK = SLOTRANGE - 1;
// number of possible values of xhash (rest of n) bits
static const u32 NRESTS = 1 << RESTBITS;
// RESTBITS mask
static const u32 RESTMASK = NRESTS - 1;
// number of blocks of hashes extracted from single 512 bit blake2b output
static const u32 NBLOCKS = (NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE;
// nothing larger found in 100000 runs
static const u32 MAXSOLS = 8;
// tree node identifying its children as two different slots in
// a bucket on previous layer with the same rest bits (x-tra hash)
struct tree {
u32 bid_s0_s1_x; // manual bitfields
__device__ tree(const u32 idx, const u32 xh) {
bid_s0_s1_x = idx << RESTBITS | xh;
}
__device__ tree(const u32 idx) {
bid_s0_s1_x = idx;
}
__device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) {
#ifdef XINTREE
bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh;
#else
bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1;
#endif
}
__device__ u32 getindex() const {
#ifdef XINTREE
return bid_s0_s1_x >> RESTBITS;
#else
return bid_s0_s1_x;
#endif
}
__device__ u32 bucketid() const {
#ifdef XINTREE
return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS);
#else
return bid_s0_s1_x >> (2 * SLOTBITS);
#endif
}
__device__ u32 slotid0() const {
#ifdef XINTREE
return (bid_s0_s1_x >> SLOTBITS + RESTBITS) & SLOTMASK;
#else
return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK;
#endif
}
__device__ u32 slotid1() const {
#ifdef XINTREE
return (bid_s0_s1_x >> RESTBITS) & SLOTMASK;
#else
return bid_s0_s1_x & SLOTMASK;
#endif
}
__device__ u32 xhash() const {
return bid_s0_s1_x & RESTMASK;
}
};
union hashunit {
u32 word;
uchar bytes[sizeof(u32)];
};
#define WORDS(bits) ((bits + 31) / 32)
#define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS)
#define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS)
struct slot0 {
tree attr;
hashunit hash[HASHWORDS0];
};
struct slot1 {
tree attr;
hashunit hash[HASHWORDS1];
};
// a bucket is NSLOTS treenodes
typedef slot0 bucket0[NSLOTS];
typedef slot1 bucket1[NSLOTS];
// the N-bit hash consists of K+1 n-bit "digits"
// each of which corresponds to a layer of NBUCKETS buckets
typedef bucket0 digit0[NBUCKETS];
typedef bucket1 digit1[NBUCKETS];
// size (in bytes) of hash in round 0 <= r < WK
u32 hhashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r + 1) * DIGITBITS;
#else
const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
// size (in bytes) of hash in round 0 <= r < WK
__device__ u32 hashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r + 1) * DIGITBITS;
#else
const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
u32 hhashwords(u32 bytes) {
return (bytes + 3) / 4;
}
__device__ u32 hashwords(u32 bytes) {
return (bytes + 3) / 4;
}
// manages hash and tree data
struct htalloc {
bucket0 *trees0[(WK + 1) / 2];
bucket1 *trees1[WK / 2];
};
typedef u32 bsizes[NBUCKETS];
struct equi {
blake2b_state blake_ctx;
htalloc hta;
bsizes *nslots;
proof *sols;
u32 nsols;
u32 nthreads;
equi(const u32 n_threads) {
nthreads = n_threads;
}
void setheadernonce(const char *header, const u32 len, const char* nonce, const u32 nlen) {
setheader(&blake_ctx, header, len, nonce, nlen);
checkCudaErrors(hipMemset(nslots, 0, NBUCKETS * sizeof(u32)));
nsols = 0;
}
__device__ u32 getnslots0(const u32 bid) {
u32 &nslot = nslots[0][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ u32 getnslots1(const u32 bid) {
u32 &nslot = nslots[1][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ void orderindices(u32 *indices, u32 size) {
if (indices[0] > indices[size]) {
for (u32 i = 0; i < size; i++) {
const u32 tmp = indices[i];
indices[i] = indices[size + i];
indices[size + i] = tmp;
}
}
}
__device__ void listindices1(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[0][t.bucketid()];
const u32 size = 1 << 0;
indices[0] = buck[t.slotid0()].attr.getindex();
indices[size] = buck[t.slotid1()].attr.getindex();
orderindices(indices, size);
}
__device__ void listindices2(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[0][t.bucketid()];
const u32 size = 1 << 1;
listindices1(buck[t.slotid0()].attr, indices);
listindices1(buck[t.slotid1()].attr, indices + size);
orderindices(indices, size);
}
__device__ void listindices3(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[1][t.bucketid()];
const u32 size = 1 << 2;
listindices2(buck[t.slotid0()].attr, indices);
listindices2(buck[t.slotid1()].attr, indices + size);
orderindices(indices, size);
}
__device__ void listindices4(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[1][t.bucketid()];
const u32 size = 1 << 3;
listindices3(buck[t.slotid0()].attr, indices);
listindices3(buck[t.slotid1()].attr, indices + size);
orderindices(indices, size);
}
__device__ void listindices5(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[2][t.bucketid()];
const u32 size = 1 << 4;
listindices4(buck[t.slotid0()].attr, indices);
listindices4(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices6(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[2][t.bucketid()];
const u32 size = 1 << 5;
listindices5(buck[t.slotid0()].attr, indices);
listindices5(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices7(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[3][t.bucketid()];
const u32 size = 1 << 6;
listindices6(buck[t.slotid0()].attr, indices);
listindices6(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices8(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[3][t.bucketid()];
const u32 size = 1 << 7;
listindices7(buck[t.slotid0()].attr, indices);
listindices7(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices9(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[4][t.bucketid()];
const u32 size = 1 << 8;
listindices8(buck[t.slotid0()].attr, indices);
listindices8(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void candidate(const tree t) {
proof prf;
#if WK==9
listindices9(t, prf);
#elif WK==5
listindices5(t, prf);
#else
#error not implemented
#endif
if (probdupe(prf))
return;
u32 soli = atomicAdd(&nsols, 1);
if (soli < MAXSOLS)
#if WK==9
listindices9(t, sols[soli]);
#elif WK==5
listindices5(t, sols[soli]);
#else
#error not implemented
#endif
}
void showbsizes(u32 r) {
#if defined(HIST) || defined(SPARK) || defined(LOGSPARK)
u32 ns[NBUCKETS];
checkCudaErrors(hipMemcpy(ns, nslots[r & 1], NBUCKETS * sizeof(u32), hipMemcpyDeviceToHost));
u32 binsizes[65];
memset(binsizes, 0, 65 * sizeof(u32));
for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) {
u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS - 6);
binsizes[bsize]++;
}
for (u32 i = 0; i < 65; i++) {
#ifdef HIST
printf(" %d:%d", i, binsizes[i]);
#else
#ifdef SPARK
u32 sparks = binsizes[i] / SPARKSCALE;
#else
u32 sparks = 0;
for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++;
sparks = sparks * 7 / SPARKSCALE;
#endif
printf("\342\226%c", '\201' + sparks);
#endif
}
printf("\n");
#endif
}
// proper dupe test is a little costly on GPU, so allow false negatives
__device__ bool probdupe(u32 *prf) {
unsigned short susp[PROOFSIZE];
memset(susp, 0xffff, PROOFSIZE * sizeof(unsigned short));
for (u32 i=0; i<PROOFSIZE; i++) {
u32 bin = prf[i] & (PROOFSIZE-1);
unsigned short msb = prf[i]>>WK;
if (msb == susp[bin])
return true;
susp[bin] = msb;
}
return false;
}
struct htlayout {
htalloc hta;
u32 prevhashunits;
u32 nexthashunits;
u32 dunits;
u32 prevbo;
u32 nextbo;
__device__ htlayout(equi *eq, u32 r) : hta(eq->hta), prevhashunits(0), dunits(0) {
u32 nexthashbytes = hashsize(r);
nexthashunits = hashwords(nexthashbytes);
prevbo = 0;
nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3
if (r) {
u32 prevhashbytes = hashsize(r-1);
prevhashunits = hashwords(prevhashbytes);
prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3
dunits = prevhashunits - nexthashunits;
}
}
__device__ u32 getxhash0(const slot0* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif WN == 200 && RESTBITS == 4
return pslot->hash->bytes[prevbo] >> 4;
#elif WN == 200 && RESTBITS == 8
return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo + 1] >> 4;
#elif WN == 144 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 6
return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#else
#error non implemented
#endif
}
__device__ u32 getxhash1(const slot1* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif WN == 200 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 8
return pslot->hash->bytes[prevbo];
#elif WN == 144 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 6
return pslot->hash->bytes[prevbo] & 0x3f;
#else
#error non implemented
#endif
}
__device__ bool equal(const hashunit *hash0, const hashunit *hash1) const {
return hash0[prevhashunits - 1].word == hash1[prevhashunits - 1].word;
}
};
struct collisiondata {
#ifdef XBITMAP
#if NSLOTS > 64
#error cant use XBITMAP with more than 64 slots
#endif
u64 xhashmap[NRESTS];
u64 xmap;
#else
#if RESTBITS <= 6
typedef uchar xslot;
#else
typedef u16 xslot;
#endif
static const xslot xnil = ~0;
xslot xhashslots[NRESTS];
xslot nextxhashslot[NSLOTS];
xslot nextslot;
#endif
u32 s0;
__device__ void clear() {
#ifdef XBITMAP
memset(xhashmap, 0, NRESTS * sizeof(u64));
#else
memset(xhashslots, xnil, NRESTS * sizeof(xslot));
memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot));
#endif
}
__device__ bool addslot(u32 s1, u32 xh) {
#ifdef XBITMAP
xmap = xhashmap[xh];
xhashmap[xh] |= (u64)1 << s1;
s0 = ~0;
return true;
#else
nextslot = xhashslots[xh];
nextxhashslot[s1] = nextslot;
xhashslots[xh] = s1;
return true;
#endif
}
__device__ bool nextcollision() const {
#ifdef XBITMAP
return xmap != 0;
#else
return nextslot != xnil;
#endif
}
__device__ u32 slot() {
#ifdef XBITMAP
const u32 ffs = __ffsll(xmap);
s0 += ffs; xmap >>= ffs;
#else
nextslot = nextxhashslot[s0 = nextslot];
#endif
return s0;
}
};
};
__global__ void digitH(equi *eq) {
uchar hash[HASHOUT];
blake2b_state state;
equi::htlayout htl(eq, 0);
const u32 hashbytes = hashsize(0); // always 23 ?
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 block = id; block < NBLOCKS; block += eq->nthreads) {
state = eq->blake_ctx;
blake2b_gpu_hash(&state, block, hash, HASHOUT);
for (u32 i = 0; i<HASHESPERBLAKE; i++) {
const uchar *ph = hash + i * WN / 8;
#if BUCKBITS == 16 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 8) | ph[1];
#ifdef XINTREE
const u32 xhash = ph[2] >> 4;
#endif
#elif BUCKBITS == 14 && RESTBITS == 6
const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2;
#elif BUCKBITS == 12 && RESTBITS == 8
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#elif BUCKBITS == 20 && RESTBITS == 4
const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4;
#ifdef XINTREE
const u32 xhash = ph[2] & 0xf;
#endif
#elif BUCKBITS == 12 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
const u32 xhash = ph[1] & 0xf;
#else
#error not implemented
#endif
const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1);
if (slot >= NSLOTS)
continue;
slot0 &s = eq->hta.trees0[0][bucketid][slot];
#ifdef XINTREE
s.attr = tree(block*HASHESPERBLAKE+i, xhash);
#else
s.attr = tree(block*HASHESPERBLAKE+i);
#endif
memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes);
}
}
}
__global__ void digitO(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(r - 1) / 2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
u32 xhash;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8)
| (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1])) << 4
| (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
xhash &= 0xf;
#elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4)
| (xhash = bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4;
xhash &= 0xf;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4)
| (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
xhash &= 0xf;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) & 0xf) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 2
| (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 6;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i=htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
__global__ void digitE(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[(r - 1) / 2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
u32 xhash;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8)
| (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]);
xhash = (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
#elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4)
| (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 6)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 2;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[r / 2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i = htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
#ifdef UNROLL
__global__ void digit_1(equi *eq) {
equi::htlayout htl(eq, 1);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[0][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word;
}
}
}
}
__global__ void digit2(equi *eq) {
equi::htlayout htl(eq, 2);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[0][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit3(equi *eq) {
equi::htlayout htl(eq, 3);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[1][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x1234);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit4(equi *eq) {
equi::htlayout htl(eq, 4);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[1][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4123);
const u32 xorbucketid = bexor >> 8;
const u32 xhash = bexor >> 4 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit5(equi *eq) {
equi::htlayout htl(eq, 5);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit6(equi *eq) {
equi::htlayout htl(eq, 6);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
}
}
}
}
__global__ void digit7(equi *eq) {
equi::htlayout htl(eq, 7);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[3][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4012);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
}
}
}
}
__global__ void digit8(equi *eq) {
equi::htlayout htl(eq, 8);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[3][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x3456);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
}
}
}
}
#endif
__global__ void digitK(equi *eq) {
equi::collisiondata cd;
equi::htlayout htl(eq, WK);
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(WK - 1) / 2][bucketid];
u32 bsize = eq->getnslots0(bucketid); // assume WK odd
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1))) // assume WK odd
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash)) {
#ifdef XINTREE
eq->candidate(tree(bucketid, s0, s1, 0));
#else
eq->candidate(tree(bucketid, s0, s1));
#endif
}
}
}
}
}
eq_cuda_context::eq_cuda_context(int tpb, int blocks, int id)
: threadsperblock(tpb), totalblocks(blocks), device_id(id)
{
eq = new equi(threadsperblock * totalblocks);
sol_memory = malloc(sizeof(proof) * MAXSOLS + 4096);
solutions = (proof*)(((long long)sol_memory + 4095) & -4096);
checkCudaErrors(hipSetDevice(device_id));
checkCudaErrors(hipDeviceReset());
checkCudaErrors(hipSetDeviceFlags(hipDeviceScheduleBlockingSync));
checkCudaErrors(hipDeviceSetCacheConfig(hipFuncCachePreferL1));
checkCudaErrors(hipMalloc((void**)&heap0, sizeof(digit0)));
checkCudaErrors(hipMalloc((void**)&heap1, sizeof(digit1)));
for (u32 r = 0; r < WK; r++)
if ((r & 1) == 0)
eq->hta.trees0[r / 2] = (bucket0 *)(heap0 + r / 2);
else
eq->hta.trees1[r / 2] = (bucket1 *)(heap1 + r / 2);
checkCudaErrors(hipMalloc((void**)&eq->nslots, 2 * NBUCKETS * sizeof(u32)));
checkCudaErrors(hipMalloc((void**)&eq->sols, MAXSOLS * sizeof(proof)));
checkCudaErrors(hipMalloc((void**)&device_eq, sizeof(equi)));
}
eq_cuda_context::~eq_cuda_context()
{
/*checkCudaErrors(hipFree(eq->nslots));
checkCudaErrors(hipFree(eq->sols));
checkCudaErrors(hipFree(eq->hta.trees0[0]));
checkCudaErrors(hipFree(eq->hta.trees1[0]));*/
checkCudaErrors(hipSetDevice(device_id));
checkCudaErrors(hipDeviceReset());
free(sol_memory);
delete eq;
}
void eq_cuda_context::solve(const char *tequihash_header,
unsigned int tequihash_header_len,
const char* nonce,
unsigned int nonce_len,
std::function<bool()> cancelf,
std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> solutionf,
std::function<void(void)> hashdonef)
{
checkCudaErrors(hipSetDevice(device_id));
eq->setheadernonce(tequihash_header, tequihash_header_len, nonce, nonce_len);
checkCudaErrors(hipMemcpy(device_eq, eq, sizeof(equi), hipMemcpyHostToDevice));
digitH << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
#if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL)
digit_1 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit2 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit3 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit4 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit5 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit6 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit7 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit8 << <totalblocks, threadsperblock >> >(device_eq);
#else
for (u32 r = 1; r < WK; r++) {
r & 1 ? digitO << <totalblocks, threadsperblock >> >(device_eq, r)
: digitE << <totalblocks, threadsperblock >> >(device_eq, r);
}
#endif
if (cancelf()) return;
digitK << <totalblocks, threadsperblock >> >(device_eq);
checkCudaErrors(hipMemcpy(eq, device_eq, sizeof(equi), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(solutions, eq->sols, MAXSOLS * sizeof(proof), hipMemcpyDeviceToHost));
for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++)
{
std::vector<uint32_t> index_vector(PROOFSIZE);
for (u32 i = 0; i < PROOFSIZE; i++) {
index_vector[i] = solutions[s][i];
}
solutionf(index_vector, DIGITBITS, nullptr);
if (cancelf()) return;
}
hashdonef();
} | fd0cc51c19e3d8d58ebf9f88a54c90ecab1a3ca7.cu | // Equihash CUDA solver
// Copyright (c) 2016 John Tromp
#define XINTREE
#define UNROLL
#define htole32(x) (x)
#define HAVE_DECL_HTOLE32 1
#include "../cpu_tromp/equi.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <functional>
#include <vector>
#include <iostream>
#include "eqcuda.hpp"
#include "blake2b.cu"
typedef uint16_t u16;
typedef uint64_t u64;
#ifndef RESTBITS
#define RESTBITS 4
#endif
// 2_log of number of buckets
#define BUCKBITS (DIGITBITS-RESTBITS)
#ifndef SAVEMEM
#if RESTBITS == 4
// can't save memory in such small buckets
#define SAVEMEM 1
#elif RESTBITS >= 8
// take advantage of law of large numbers (sum of 2^8 random numbers)
// this reduces (200,9) memory to under 144MB, with negligible discarding
#define SAVEMEM 9/14
#endif
#endif
// number of buckets
static const u32 NBUCKETS = 1 << BUCKBITS;
// bucket mask
static const u32 BUCKMASK = NBUCKETS - 1;
// 2_log of number of slots per bucket
static const u32 SLOTBITS = RESTBITS + 1 + 1;
static const u32 SLOTRANGE = 1 << SLOTBITS;
// number of slots per bucket
static const u32 NSLOTS = SLOTRANGE * SAVEMEM;
// SLOTBITS mask
static const u32 SLOTMASK = SLOTRANGE - 1;
// number of possible values of xhash (rest of n) bits
static const u32 NRESTS = 1 << RESTBITS;
// RESTBITS mask
static const u32 RESTMASK = NRESTS - 1;
// number of blocks of hashes extracted from single 512 bit blake2b output
static const u32 NBLOCKS = (NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE;
// nothing larger found in 100000 runs
static const u32 MAXSOLS = 8;
// tree node identifying its children as two different slots in
// a bucket on previous layer with the same rest bits (x-tra hash)
struct tree {
u32 bid_s0_s1_x; // manual bitfields
__device__ tree(const u32 idx, const u32 xh) {
bid_s0_s1_x = idx << RESTBITS | xh;
}
__device__ tree(const u32 idx) {
bid_s0_s1_x = idx;
}
__device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) {
#ifdef XINTREE
bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh;
#else
bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1;
#endif
}
__device__ u32 getindex() const {
#ifdef XINTREE
return bid_s0_s1_x >> RESTBITS;
#else
return bid_s0_s1_x;
#endif
}
__device__ u32 bucketid() const {
#ifdef XINTREE
return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS);
#else
return bid_s0_s1_x >> (2 * SLOTBITS);
#endif
}
__device__ u32 slotid0() const {
#ifdef XINTREE
return (bid_s0_s1_x >> SLOTBITS + RESTBITS) & SLOTMASK;
#else
return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK;
#endif
}
__device__ u32 slotid1() const {
#ifdef XINTREE
return (bid_s0_s1_x >> RESTBITS) & SLOTMASK;
#else
return bid_s0_s1_x & SLOTMASK;
#endif
}
__device__ u32 xhash() const {
return bid_s0_s1_x & RESTMASK;
}
};
union hashunit {
u32 word;
uchar bytes[sizeof(u32)];
};
#define WORDS(bits) ((bits + 31) / 32)
#define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS)
#define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS)
struct slot0 {
tree attr;
hashunit hash[HASHWORDS0];
};
struct slot1 {
tree attr;
hashunit hash[HASHWORDS1];
};
// a bucket is NSLOTS treenodes
typedef slot0 bucket0[NSLOTS];
typedef slot1 bucket1[NSLOTS];
// the N-bit hash consists of K+1 n-bit "digits"
// each of which corresponds to a layer of NBUCKETS buckets
typedef bucket0 digit0[NBUCKETS];
typedef bucket1 digit1[NBUCKETS];
// size (in bytes) of hash in round 0 <= r < WK
u32 hhashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r + 1) * DIGITBITS;
#else
const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
// size (in bytes) of hash in round 0 <= r < WK
__device__ u32 hashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r + 1) * DIGITBITS;
#else
const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
u32 hhashwords(u32 bytes) {
return (bytes + 3) / 4;
}
__device__ u32 hashwords(u32 bytes) {
return (bytes + 3) / 4;
}
// manages hash and tree data
struct htalloc {
bucket0 *trees0[(WK + 1) / 2];
bucket1 *trees1[WK / 2];
};
typedef u32 bsizes[NBUCKETS];
struct equi {
blake2b_state blake_ctx;
htalloc hta;
bsizes *nslots;
proof *sols;
u32 nsols;
u32 nthreads;
equi(const u32 n_threads) {
nthreads = n_threads;
}
void setheadernonce(const char *header, const u32 len, const char* nonce, const u32 nlen) {
setheader(&blake_ctx, header, len, nonce, nlen);
checkCudaErrors(cudaMemset(nslots, 0, NBUCKETS * sizeof(u32)));
nsols = 0;
}
__device__ u32 getnslots0(const u32 bid) {
u32 &nslot = nslots[0][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ u32 getnslots1(const u32 bid) {
u32 &nslot = nslots[1][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ void orderindices(u32 *indices, u32 size) {
if (indices[0] > indices[size]) {
for (u32 i = 0; i < size; i++) {
const u32 tmp = indices[i];
indices[i] = indices[size + i];
indices[size + i] = tmp;
}
}
}
__device__ void listindices1(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[0][t.bucketid()];
const u32 size = 1 << 0;
indices[0] = buck[t.slotid0()].attr.getindex();
indices[size] = buck[t.slotid1()].attr.getindex();
orderindices(indices, size);
}
__device__ void listindices2(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[0][t.bucketid()];
const u32 size = 1 << 1;
listindices1(buck[t.slotid0()].attr, indices);
listindices1(buck[t.slotid1()].attr, indices + size);
orderindices(indices, size);
}
__device__ void listindices3(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[1][t.bucketid()];
const u32 size = 1 << 2;
listindices2(buck[t.slotid0()].attr, indices);
listindices2(buck[t.slotid1()].attr, indices + size);
orderindices(indices, size);
}
__device__ void listindices4(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[1][t.bucketid()];
const u32 size = 1 << 3;
listindices3(buck[t.slotid0()].attr, indices);
listindices3(buck[t.slotid1()].attr, indices + size);
orderindices(indices, size);
}
__device__ void listindices5(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[2][t.bucketid()];
const u32 size = 1 << 4;
listindices4(buck[t.slotid0()].attr, indices);
listindices4(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices6(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[2][t.bucketid()];
const u32 size = 1 << 5;
listindices5(buck[t.slotid0()].attr, indices);
listindices5(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices7(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[3][t.bucketid()];
const u32 size = 1 << 6;
listindices6(buck[t.slotid0()].attr, indices);
listindices6(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices8(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[3][t.bucketid()];
const u32 size = 1 << 7;
listindices7(buck[t.slotid0()].attr, indices);
listindices7(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices9(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[4][t.bucketid()];
const u32 size = 1 << 8;
listindices8(buck[t.slotid0()].attr, indices);
listindices8(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void candidate(const tree t) {
proof prf;
#if WK==9
listindices9(t, prf);
#elif WK==5
listindices5(t, prf);
#else
#error not implemented
#endif
if (probdupe(prf))
return;
u32 soli = atomicAdd(&nsols, 1);
if (soli < MAXSOLS)
#if WK==9
listindices9(t, sols[soli]);
#elif WK==5
listindices5(t, sols[soli]);
#else
#error not implemented
#endif
}
void showbsizes(u32 r) {
#if defined(HIST) || defined(SPARK) || defined(LOGSPARK)
u32 ns[NBUCKETS];
checkCudaErrors(cudaMemcpy(ns, nslots[r & 1], NBUCKETS * sizeof(u32), cudaMemcpyDeviceToHost));
u32 binsizes[65];
memset(binsizes, 0, 65 * sizeof(u32));
for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) {
u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS - 6);
binsizes[bsize]++;
}
for (u32 i = 0; i < 65; i++) {
#ifdef HIST
printf(" %d:%d", i, binsizes[i]);
#else
#ifdef SPARK
u32 sparks = binsizes[i] / SPARKSCALE;
#else
u32 sparks = 0;
for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++;
sparks = sparks * 7 / SPARKSCALE;
#endif
printf("\342\226%c", '\201' + sparks);
#endif
}
printf("\n");
#endif
}
// proper dupe test is a little costly on GPU, so allow false negatives
__device__ bool probdupe(u32 *prf) {
unsigned short susp[PROOFSIZE];
memset(susp, 0xffff, PROOFSIZE * sizeof(unsigned short));
for (u32 i=0; i<PROOFSIZE; i++) {
u32 bin = prf[i] & (PROOFSIZE-1);
unsigned short msb = prf[i]>>WK;
if (msb == susp[bin])
return true;
susp[bin] = msb;
}
return false;
}
struct htlayout {
htalloc hta;
u32 prevhashunits;
u32 nexthashunits;
u32 dunits;
u32 prevbo;
u32 nextbo;
__device__ htlayout(equi *eq, u32 r) : hta(eq->hta), prevhashunits(0), dunits(0) {
u32 nexthashbytes = hashsize(r);
nexthashunits = hashwords(nexthashbytes);
prevbo = 0;
nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3
if (r) {
u32 prevhashbytes = hashsize(r-1);
prevhashunits = hashwords(prevhashbytes);
prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3
dunits = prevhashunits - nexthashunits;
}
}
__device__ u32 getxhash0(const slot0* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif WN == 200 && RESTBITS == 4
return pslot->hash->bytes[prevbo] >> 4;
#elif WN == 200 && RESTBITS == 8
return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo + 1] >> 4;
#elif WN == 144 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 6
return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#else
#error non implemented
#endif
}
__device__ u32 getxhash1(const slot1* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif WN == 200 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 8
return pslot->hash->bytes[prevbo];
#elif WN == 144 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 6
return pslot->hash->bytes[prevbo] & 0x3f;
#else
#error non implemented
#endif
}
__device__ bool equal(const hashunit *hash0, const hashunit *hash1) const {
return hash0[prevhashunits - 1].word == hash1[prevhashunits - 1].word;
}
};
struct collisiondata {
#ifdef XBITMAP
#if NSLOTS > 64
#error cant use XBITMAP with more than 64 slots
#endif
u64 xhashmap[NRESTS];
u64 xmap;
#else
#if RESTBITS <= 6
typedef uchar xslot;
#else
typedef u16 xslot;
#endif
static const xslot xnil = ~0;
xslot xhashslots[NRESTS];
xslot nextxhashslot[NSLOTS];
xslot nextslot;
#endif
u32 s0;
__device__ void clear() {
#ifdef XBITMAP
memset(xhashmap, 0, NRESTS * sizeof(u64));
#else
memset(xhashslots, xnil, NRESTS * sizeof(xslot));
memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot));
#endif
}
__device__ bool addslot(u32 s1, u32 xh) {
#ifdef XBITMAP
xmap = xhashmap[xh];
xhashmap[xh] |= (u64)1 << s1;
s0 = ~0;
return true;
#else
nextslot = xhashslots[xh];
nextxhashslot[s1] = nextslot;
xhashslots[xh] = s1;
return true;
#endif
}
__device__ bool nextcollision() const {
#ifdef XBITMAP
return xmap != 0;
#else
return nextslot != xnil;
#endif
}
__device__ u32 slot() {
#ifdef XBITMAP
const u32 ffs = __ffsll(xmap);
s0 += ffs; xmap >>= ffs;
#else
nextslot = nextxhashslot[s0 = nextslot];
#endif
return s0;
}
};
};
__global__ void digitH(equi *eq) {
uchar hash[HASHOUT];
blake2b_state state;
equi::htlayout htl(eq, 0);
const u32 hashbytes = hashsize(0); // always 23 ?
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 block = id; block < NBLOCKS; block += eq->nthreads) {
state = eq->blake_ctx;
blake2b_gpu_hash(&state, block, hash, HASHOUT);
for (u32 i = 0; i<HASHESPERBLAKE; i++) {
const uchar *ph = hash + i * WN / 8;
#if BUCKBITS == 16 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 8) | ph[1];
#ifdef XINTREE
const u32 xhash = ph[2] >> 4;
#endif
#elif BUCKBITS == 14 && RESTBITS == 6
const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2;
#elif BUCKBITS == 12 && RESTBITS == 8
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#elif BUCKBITS == 20 && RESTBITS == 4
const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4;
#ifdef XINTREE
const u32 xhash = ph[2] & 0xf;
#endif
#elif BUCKBITS == 12 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
const u32 xhash = ph[1] & 0xf;
#else
#error not implemented
#endif
const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1);
if (slot >= NSLOTS)
continue;
slot0 &s = eq->hta.trees0[0][bucketid][slot];
#ifdef XINTREE
s.attr = tree(block*HASHESPERBLAKE+i, xhash);
#else
s.attr = tree(block*HASHESPERBLAKE+i);
#endif
memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes);
}
}
}
__global__ void digitO(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(r - 1) / 2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
u32 xhash;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8)
| (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1])) << 4
| (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
xhash &= 0xf;
#elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4)
| (xhash = bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4;
xhash &= 0xf;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4)
| (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
xhash &= 0xf;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) & 0xf) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 2
| (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 6;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i=htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
__global__ void digitE(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[(r - 1) / 2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
u32 xhash;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8)
| (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]);
xhash = (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
#elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4)
| (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 6)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 2;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[r / 2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i = htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
#ifdef UNROLL
__global__ void digit_1(equi *eq) {
equi::htlayout htl(eq, 1);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[0][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word;
}
}
}
}
__global__ void digit2(equi *eq) {
equi::htlayout htl(eq, 2);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[0][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit3(equi *eq) {
equi::htlayout htl(eq, 3);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[1][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x1234);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit4(equi *eq) {
equi::htlayout htl(eq, 4);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[1][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4123);
const u32 xorbucketid = bexor >> 8;
const u32 xhash = bexor >> 4 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit5(equi *eq) {
equi::htlayout htl(eq, 5);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit6(equi *eq) {
equi::htlayout htl(eq, 6);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
}
}
}
}
__global__ void digit7(equi *eq) {
equi::htlayout htl(eq, 7);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[3][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4012);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
}
}
}
}
__global__ void digit8(equi *eq) {
equi::htlayout htl(eq, 8);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[3][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x3456);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
}
}
}
}
#endif
__global__ void digitK(equi *eq) {
equi::collisiondata cd;
equi::htlayout htl(eq, WK);
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(WK - 1) / 2][bucketid];
u32 bsize = eq->getnslots0(bucketid); // assume WK odd
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1))) // assume WK odd
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash)) {
#ifdef XINTREE
eq->candidate(tree(bucketid, s0, s1, 0));
#else
eq->candidate(tree(bucketid, s0, s1));
#endif
}
}
}
}
}
eq_cuda_context::eq_cuda_context(int tpb, int blocks, int id)
: threadsperblock(tpb), totalblocks(blocks), device_id(id)
{
eq = new equi(threadsperblock * totalblocks);
sol_memory = malloc(sizeof(proof) * MAXSOLS + 4096);
solutions = (proof*)(((long long)sol_memory + 4095) & -4096);
checkCudaErrors(cudaSetDevice(device_id));
checkCudaErrors(cudaDeviceReset());
checkCudaErrors(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync));
checkCudaErrors(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
checkCudaErrors(cudaMalloc((void**)&heap0, sizeof(digit0)));
checkCudaErrors(cudaMalloc((void**)&heap1, sizeof(digit1)));
for (u32 r = 0; r < WK; r++)
if ((r & 1) == 0)
eq->hta.trees0[r / 2] = (bucket0 *)(heap0 + r / 2);
else
eq->hta.trees1[r / 2] = (bucket1 *)(heap1 + r / 2);
checkCudaErrors(cudaMalloc((void**)&eq->nslots, 2 * NBUCKETS * sizeof(u32)));
checkCudaErrors(cudaMalloc((void**)&eq->sols, MAXSOLS * sizeof(proof)));
checkCudaErrors(cudaMalloc((void**)&device_eq, sizeof(equi)));
}
eq_cuda_context::~eq_cuda_context()
{
/*checkCudaErrors(cudaFree(eq->nslots));
checkCudaErrors(cudaFree(eq->sols));
checkCudaErrors(cudaFree(eq->hta.trees0[0]));
checkCudaErrors(cudaFree(eq->hta.trees1[0]));*/
checkCudaErrors(cudaSetDevice(device_id));
checkCudaErrors(cudaDeviceReset());
free(sol_memory);
delete eq;
}
void eq_cuda_context::solve(const char *tequihash_header,
unsigned int tequihash_header_len,
const char* nonce,
unsigned int nonce_len,
std::function<bool()> cancelf,
std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> solutionf,
std::function<void(void)> hashdonef)
{
checkCudaErrors(cudaSetDevice(device_id));
eq->setheadernonce(tequihash_header, tequihash_header_len, nonce, nonce_len);
checkCudaErrors(cudaMemcpy(device_eq, eq, sizeof(equi), cudaMemcpyHostToDevice));
digitH << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
#if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL)
digit_1 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit2 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit3 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit4 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit5 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit6 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit7 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit8 << <totalblocks, threadsperblock >> >(device_eq);
#else
for (u32 r = 1; r < WK; r++) {
r & 1 ? digitO << <totalblocks, threadsperblock >> >(device_eq, r)
: digitE << <totalblocks, threadsperblock >> >(device_eq, r);
}
#endif
if (cancelf()) return;
digitK << <totalblocks, threadsperblock >> >(device_eq);
checkCudaErrors(cudaMemcpy(eq, device_eq, sizeof(equi), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(solutions, eq->sols, MAXSOLS * sizeof(proof), cudaMemcpyDeviceToHost));
for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++)
{
std::vector<uint32_t> index_vector(PROOFSIZE);
for (u32 i = 0; i < PROOFSIZE; i++) {
index_vector[i] = solutions[s][i];
}
solutionf(index_vector, DIGITBITS, nullptr);
if (cancelf()) return;
}
hashdonef();
} |
ba9d96cd91249de3c0896be3459f7dedf080574e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "calcSumTable.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *rowCumSum = NULL;
hipMalloc(&rowCumSum, XSIZE*YSIZE);
float *SumTable = NULL;
hipMalloc(&SumTable, XSIZE*YSIZE);
int rowNumberN = 1;
int colNumberM = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
calcSumTable), dim3(gridBlock),dim3(threadBlock), 0, 0, rowCumSum,SumTable,rowNumberN,colNumberM);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
calcSumTable), dim3(gridBlock),dim3(threadBlock), 0, 0, rowCumSum,SumTable,rowNumberN,colNumberM);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
calcSumTable), dim3(gridBlock),dim3(threadBlock), 0, 0, rowCumSum,SumTable,rowNumberN,colNumberM);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ba9d96cd91249de3c0896be3459f7dedf080574e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "calcSumTable.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *rowCumSum = NULL;
cudaMalloc(&rowCumSum, XSIZE*YSIZE);
float *SumTable = NULL;
cudaMalloc(&SumTable, XSIZE*YSIZE);
int rowNumberN = 1;
int colNumberM = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
calcSumTable<<<gridBlock,threadBlock>>>(rowCumSum,SumTable,rowNumberN,colNumberM);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
calcSumTable<<<gridBlock,threadBlock>>>(rowCumSum,SumTable,rowNumberN,colNumberM);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
calcSumTable<<<gridBlock,threadBlock>>>(rowCumSum,SumTable,rowNumberN,colNumberM);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.