hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
6a2c0a48fb7680085fd5208d85d832be60b430dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#ifndef THREADS_PER_BLOCK
#define THREADS_PER_BLOCK 1024
#endif
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
__global__ void kernel1(float *dW, float *dWcurr, int N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
dWcurr[id] = dW[id];
}
}
__global__ void kernel2(float *dW, float *dWcurr, float *dX, float *dY, float alpha, int nSamples, int nFeatures, int start, int N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
float err = 0.0;
for (int s = 0; s < nSamples; s++) {
float arg = 0.0;
for (int f = 0; f < nFeatures; f++) {
arg += dWcurr[f] * dX[s * (nFeatures) + f];
}
float hypo = 1 / (1 + exp(-arg));
err += (hypo - dY[s]) * dX[s * (nFeatures) + start + id];
}
dW[id] = dWcurr[start + id] - alpha * err;
}
}
extern "C" {
void lrCUDA1(float *W, float *Wcurr, int start, int end, int GPUN) {
float *dW, *dWcurr;
if (GPUN > 0) {
assert(end - start + 1 == GPUN);
#ifdef VERBOSE
printf("In lrCUDA1\n");
printf("\t GPUN: %d\n", GPUN);
printf("\t range: %d..%d\n", start, end);
#endif
CudaSafeCall(hipMalloc(&dW, sizeof(float) * GPUN));
CudaSafeCall(hipMalloc(&dWcurr, sizeof(float) * GPUN));
CudaSafeCall(hipMemcpy(dW, W + start, sizeof(float) * GPUN, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel1), dim3(ceil(((float)GPUN)/THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, 0, dW, dWcurr, GPUN);
CudaSafeCall(hipDeviceSynchronize());
CudaSafeCall(hipMemcpy(Wcurr + start, dWcurr, sizeof(float) * GPUN, hipMemcpyDeviceToHost));
CudaSafeCall(hipFree(dW));
CudaSafeCall(hipFree(dWcurr));
}
}
void lrCUDA2(float* X, float *Y, float *W, float *Wcurr, float alpha, int nSamples, int nFeatures, int start, int end, int GPUN) {
float *dX, *dY, *dW, *dWcurr;
if (GPUN > 0) {
assert(end - start + 1 == GPUN);
#ifdef VERBOSE
printf("In lrCUDA2\n");
printf("\t GPUN: %d\n", GPUN);
printf("\t range: %d..%d\n", start, end);
#endif
CudaSafeCall(hipMalloc(&dX, sizeof(float) * nSamples * nFeatures));
CudaSafeCall(hipMalloc(&dY, sizeof(float) * nSamples));
CudaSafeCall(hipMalloc(&dWcurr, sizeof(float) * nFeatures));
CudaSafeCall(hipMalloc(&dW, sizeof(float) * GPUN));
CudaSafeCall(hipMemcpy(dX, X, sizeof(float) * nSamples * nFeatures, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(dY, Y, sizeof(float) * nSamples, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(dWcurr, Wcurr, sizeof(float) * nFeatures, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel2), dim3(ceil(((float)GPUN)/THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, 0, dW, dWcurr, dX, dY, alpha, nSamples, nFeatures, start-1, GPUN);
CudaSafeCall(hipDeviceSynchronize());
CudaSafeCall(hipMemcpy(W, dW, sizeof(float) * GPUN, hipMemcpyDeviceToHost));
CudaSafeCall(hipFree(dX));
CudaSafeCall(hipFree(dY));
CudaSafeCall(hipFree(dW));
CudaSafeCall(hipFree(dWcurr));
}
}
}
| 6a2c0a48fb7680085fd5208d85d832be60b430dc.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#ifndef THREADS_PER_BLOCK
#define THREADS_PER_BLOCK 1024
#endif
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
__global__ void kernel1(float *dW, float *dWcurr, int N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
dWcurr[id] = dW[id];
}
}
__global__ void kernel2(float *dW, float *dWcurr, float *dX, float *dY, float alpha, int nSamples, int nFeatures, int start, int N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
float err = 0.0;
for (int s = 0; s < nSamples; s++) {
float arg = 0.0;
for (int f = 0; f < nFeatures; f++) {
arg += dWcurr[f] * dX[s * (nFeatures) + f];
}
float hypo = 1 / (1 + exp(-arg));
err += (hypo - dY[s]) * dX[s * (nFeatures) + start + id];
}
dW[id] = dWcurr[start + id] - alpha * err;
}
}
extern "C" {
void lrCUDA1(float *W, float *Wcurr, int start, int end, int GPUN) {
float *dW, *dWcurr;
if (GPUN > 0) {
assert(end - start + 1 == GPUN);
#ifdef VERBOSE
printf("In lrCUDA1\n");
printf("\t GPUN: %d\n", GPUN);
printf("\t range: %d..%d\n", start, end);
#endif
CudaSafeCall(cudaMalloc(&dW, sizeof(float) * GPUN));
CudaSafeCall(cudaMalloc(&dWcurr, sizeof(float) * GPUN));
CudaSafeCall(cudaMemcpy(dW, W + start, sizeof(float) * GPUN, cudaMemcpyHostToDevice));
kernel1<<<ceil(((float)GPUN)/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(dW, dWcurr, GPUN);
CudaSafeCall(cudaDeviceSynchronize());
CudaSafeCall(cudaMemcpy(Wcurr + start, dWcurr, sizeof(float) * GPUN, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaFree(dW));
CudaSafeCall(cudaFree(dWcurr));
}
}
void lrCUDA2(float* X, float *Y, float *W, float *Wcurr, float alpha, int nSamples, int nFeatures, int start, int end, int GPUN) {
float *dX, *dY, *dW, *dWcurr;
if (GPUN > 0) {
assert(end - start + 1 == GPUN);
#ifdef VERBOSE
printf("In lrCUDA2\n");
printf("\t GPUN: %d\n", GPUN);
printf("\t range: %d..%d\n", start, end);
#endif
CudaSafeCall(cudaMalloc(&dX, sizeof(float) * nSamples * nFeatures));
CudaSafeCall(cudaMalloc(&dY, sizeof(float) * nSamples));
CudaSafeCall(cudaMalloc(&dWcurr, sizeof(float) * nFeatures));
CudaSafeCall(cudaMalloc(&dW, sizeof(float) * GPUN));
CudaSafeCall(cudaMemcpy(dX, X, sizeof(float) * nSamples * nFeatures, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(dY, Y, sizeof(float) * nSamples, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(dWcurr, Wcurr, sizeof(float) * nFeatures, cudaMemcpyHostToDevice));
kernel2<<<ceil(((float)GPUN)/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(dW, dWcurr, dX, dY, alpha, nSamples, nFeatures, start-1, GPUN);
CudaSafeCall(cudaDeviceSynchronize());
CudaSafeCall(cudaMemcpy(W, dW, sizeof(float) * GPUN, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaFree(dX));
CudaSafeCall(cudaFree(dY));
CudaSafeCall(cudaFree(dW));
CudaSafeCall(cudaFree(dWcurr));
}
}
}
|
cgetf2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgetf2.cu normal z -> c, Tue Sep 2 12:38:17 2014
*/
#include "common_magma.h"
#define PRECISION_c
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
#define cswap_bs 64
//#if (GPUSHMEM < 200)
#define cgeru_bs 512 // 512 is max threads for 1.x cards
//#else
//#define cgeru_bs 1024
//#endif
void magma_cswap(
magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx);
void magma_cscal_cgeru(
magma_int_t m, magma_int_t n, magmaFloatComplex *A, magma_int_t lda);
/**
CGETF2 computes an LU factorization of a general m-by-n matrix A
using partial pivoting with row interchanges.
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 2 BLAS version of the algorithm.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0 and N <= 1024.
On CUDA architecture 1.x cards, N <= 512.
@param[in,out]
A COMPLEX array, dimension (LDA,N)
On entry, the m by n matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
ipiv INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, U(k,k) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@ingroup magma_cgesv_aux
********************************************************************/
extern "C" magma_int_t
magma_cgetf2_gpu(
magma_int_t m, magma_int_t n,
magmaFloatComplex *A, magma_int_t lda,
magma_int_t *ipiv,
magma_int_t* info )
{
*info = 0;
if (m < 0) {
*info = -1;
} else if (n < 0 || n > cgeru_bs) {
*info = -2;
} else if (lda < max(1,m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (m == 0 || n == 0) {
return *info;
}
magma_int_t min_mn = min(m, n);
magma_int_t j, jp;
for( j=0; j < min_mn; j++ ) {
hipDeviceSetCacheConfig( hipFuncCachePreferShared );
// Find pivot and test for singularity.
jp = j - 1 + magma_icamax(m-j, A(j,j), 1);
ipiv[j] = jp + 1; // ipiv uses Fortran one-based index
// Can't check value of A since it is on GPU
//if ( A(jp, j) != 0.0) {
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
// Apply the interchange to columns 1:N.
if (jp != j) {
magma_cswap(n, A, j, jp, lda);
}
// Compute elements J+1:M of J-th column.
if (j < m) {
magma_cscal_cgeru(m-j, n-j, A(j, j), lda);
}
//}
//else if (*info == 0) {
// *info = j;
//}
}
return *info;
}
__global__
void kernel_cswap(int n, magmaFloatComplex *x, int i, int j, int incx)
{
int id = blockIdx.x * cswap_bs + threadIdx.x;
if (id < n) {
magmaFloatComplex tmp = x[i + incx*id];
x[i + incx*id] = x[j + incx*id];
x[j + incx*id] = tmp;
}
}
void magma_cswap(magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx)
{
/*
cswap two row vectors: ith and jth
*/
dim3 threads(cswap_bs, 1, 1);
int num_blocks = (n - 1)/cswap_bs + 1;
dim3 grid(num_blocks,1);
hipLaunchKernelGGL(( kernel_cswap), dim3(grid), dim3(threads), 0, magma_stream , n, x, i, j, incx);
}
// dynamically allocated shared memory, set to size n when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ magmaFloatComplex shared_data[];
__global__
void kernel_cscal_cgeru(int m, int n, magmaFloatComplex *A, int lda)
{
magmaFloatComplex *shared_y = shared_data;
int tid = blockIdx.x * cgeru_bs + threadIdx.x;
magmaFloatComplex reg = MAGMA_C_ZERO;
if (threadIdx.x < n) {
shared_y[threadIdx.x] = A[lda * threadIdx.x];
}
__syncthreads();
if (tid < m && tid > 0) {
reg = A[tid];
reg *= MAGMA_C_DIV(MAGMA_C_ONE, shared_y[0]);
A[tid] = reg;
#pragma unroll
for(int i=1; i < n; i++) {
A[tid + i*lda] += (MAGMA_C_NEG_ONE) * shared_y[i] * reg;
}
}
}
void magma_cscal_cgeru(magma_int_t m, magma_int_t n, magmaFloatComplex *A, magma_int_t lda)
{
/*
Specialized kernel which merged cscal and cgeru the two kernels
1) cscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a cgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
dim3 threads(cgeru_bs, 1, 1);
int num_blocks = (m - 1)/cgeru_bs + 1;
dim3 grid(num_blocks,1);
size_t shared_size = sizeof(magmaFloatComplex)*(n);
hipLaunchKernelGGL(( kernel_cscal_cgeru), dim3(grid), dim3(threads), shared_size, magma_stream, m, n, A, lda);
}
| cgetf2.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgetf2.cu normal z -> c, Tue Sep 2 12:38:17 2014
*/
#include "common_magma.h"
#define PRECISION_c
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
#define cswap_bs 64
//#if (GPUSHMEM < 200)
#define cgeru_bs 512 // 512 is max threads for 1.x cards
//#else
//#define cgeru_bs 1024
//#endif
void magma_cswap(
magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx);
void magma_cscal_cgeru(
magma_int_t m, magma_int_t n, magmaFloatComplex *A, magma_int_t lda);
/**
CGETF2 computes an LU factorization of a general m-by-n matrix A
using partial pivoting with row interchanges.
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 2 BLAS version of the algorithm.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0 and N <= 1024.
On CUDA architecture 1.x cards, N <= 512.
@param[in,out]
A COMPLEX array, dimension (LDA,N)
On entry, the m by n matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
ipiv INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, U(k,k) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@ingroup magma_cgesv_aux
********************************************************************/
extern "C" magma_int_t
magma_cgetf2_gpu(
magma_int_t m, magma_int_t n,
magmaFloatComplex *A, magma_int_t lda,
magma_int_t *ipiv,
magma_int_t* info )
{
*info = 0;
if (m < 0) {
*info = -1;
} else if (n < 0 || n > cgeru_bs) {
*info = -2;
} else if (lda < max(1,m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (m == 0 || n == 0) {
return *info;
}
magma_int_t min_mn = min(m, n);
magma_int_t j, jp;
for( j=0; j < min_mn; j++ ) {
cudaDeviceSetCacheConfig( cudaFuncCachePreferShared );
// Find pivot and test for singularity.
jp = j - 1 + magma_icamax(m-j, A(j,j), 1);
ipiv[j] = jp + 1; // ipiv uses Fortran one-based index
// Can't check value of A since it is on GPU
//if ( A(jp, j) != 0.0) {
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
// Apply the interchange to columns 1:N.
if (jp != j) {
magma_cswap(n, A, j, jp, lda);
}
// Compute elements J+1:M of J-th column.
if (j < m) {
magma_cscal_cgeru(m-j, n-j, A(j, j), lda);
}
//}
//else if (*info == 0) {
// *info = j;
//}
}
return *info;
}
__global__
void kernel_cswap(int n, magmaFloatComplex *x, int i, int j, int incx)
{
int id = blockIdx.x * cswap_bs + threadIdx.x;
if (id < n) {
magmaFloatComplex tmp = x[i + incx*id];
x[i + incx*id] = x[j + incx*id];
x[j + incx*id] = tmp;
}
}
void magma_cswap(magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx)
{
/*
cswap two row vectors: ith and jth
*/
dim3 threads(cswap_bs, 1, 1);
int num_blocks = (n - 1)/cswap_bs + 1;
dim3 grid(num_blocks,1);
kernel_cswap<<< grid, threads, 0, magma_stream >>>(n, x, i, j, incx);
}
// dynamically allocated shared memory, set to size n when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ magmaFloatComplex shared_data[];
__global__
void kernel_cscal_cgeru(int m, int n, magmaFloatComplex *A, int lda)
{
magmaFloatComplex *shared_y = shared_data;
int tid = blockIdx.x * cgeru_bs + threadIdx.x;
magmaFloatComplex reg = MAGMA_C_ZERO;
if (threadIdx.x < n) {
shared_y[threadIdx.x] = A[lda * threadIdx.x];
}
__syncthreads();
if (tid < m && tid > 0) {
reg = A[tid];
reg *= MAGMA_C_DIV(MAGMA_C_ONE, shared_y[0]);
A[tid] = reg;
#pragma unroll
for(int i=1; i < n; i++) {
A[tid + i*lda] += (MAGMA_C_NEG_ONE) * shared_y[i] * reg;
}
}
}
void magma_cscal_cgeru(magma_int_t m, magma_int_t n, magmaFloatComplex *A, magma_int_t lda)
{
/*
Specialized kernel which merged cscal and cgeru the two kernels
1) cscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a cgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
dim3 threads(cgeru_bs, 1, 1);
int num_blocks = (m - 1)/cgeru_bs + 1;
dim3 grid(num_blocks,1);
size_t shared_size = sizeof(magmaFloatComplex)*(n);
kernel_cscal_cgeru<<< grid, threads, shared_size, magma_stream>>>(m, n, A, lda);
}
|
089b0f63394df55fedcc50e7b9b02051d0e1c5a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "../catch.hpp"
#define TEST_CUDA_CHECK_RETURN
//--------------------------------------------------------------
#include "../../GPUPatternMining/HashMap/gpuhashmapper.h"
#include "../BaseCudaTestHandler.h"
//--------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "InsertTest", "HashMap")
{
constexpr size_t threeUINTsize = sizeof(unsigned int) * 3;
GPUUIntKeyProcessor *intKeyProcessor = new GPUUIntKeyProcessor();
unsigned int hashSize = 4;
GPUHashMapper<unsigned int, unsigned int, GPUUIntKeyProcessor> mapper(hashSize, intKeyProcessor);
unsigned int* c_keys;
unsigned int* c_values;
hipMalloc((void**)&c_keys, (sizeof(unsigned int) * 3));
hipMalloc((void**)&c_values, (sizeof(unsigned int) * 3));
unsigned int h_keys[] = { 1, 2, 3 };
unsigned int h_values[] = { 10, 100, 1000 };
hipMemcpy(c_keys, h_keys, threeUINTsize, hipMemcpyHostToDevice);
hipMemcpy(c_values, h_values, threeUINTsize, hipMemcpyHostToDevice);
mapper.insertKeyValuePairs(c_keys, c_values, 3);
hipFree(c_keys);
hipFree(c_values);
REQUIRE(true);
}
TEST_CASE_METHOD(BaseCudaTestHandler, "Insert and Read test", "HashMap")
{
constexpr size_t threeUINTsize = sizeof(unsigned int) * 3;
GPUUIntKeyProcessor *intKeyProcessor = new GPUUIntKeyProcessor();
unsigned int hashSize = 4;
GPUHashMapper<unsigned int, unsigned int, GPUUIntKeyProcessor> mapper(hashSize, intKeyProcessor);
unsigned int* c_keys;
unsigned int* c_values;
hipMalloc((void**)&c_keys, (sizeof(unsigned int) * 3));
hipMalloc((void**)&c_values, (sizeof(unsigned int) * 3));
unsigned int h_keys[] = { 1, 2, 3 };
unsigned int h_values[] = { 10, 100, 1000 };
hipMemcpy(c_keys, h_keys, threeUINTsize, hipMemcpyHostToDevice);
hipMemcpy(c_values, h_values, threeUINTsize, hipMemcpyHostToDevice);
mapper.insertKeyValuePairs(c_keys, c_values, 3);
unsigned int* c_resultValues;
hipMalloc((void**)&c_resultValues, (sizeof(unsigned int) * 3));
unsigned int h_resultValues[] = { 0, 0, 0 };
mapper.getValues(c_keys, c_resultValues, 3);
hipMemcpy(h_resultValues, c_resultValues, threeUINTsize, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
REQUIRE(h_resultValues[0] == h_values[0]);
REQUIRE(h_resultValues[1] == h_values[1]);
REQUIRE(h_resultValues[2] == h_values[2]);
hipFree(c_keys);
hipFree(c_values);
hipFree(c_resultValues);
REQUIRE(true);
}
TEST_CASE_METHOD(BaseCudaTestHandler, "Insert and Read test with HEX key", "HashMap")
{
constexpr size_t threeUINTsize = sizeof(unsigned int) * 3;
GPUUIntKeyProcessor *intKeyProcessor = new GPUUIntKeyProcessor();
unsigned int hashSize = 4;
GPUHashMapper<unsigned int, unsigned int, GPUUIntKeyProcessor> mapper(hashSize, intKeyProcessor);
unsigned int* c_keys;
unsigned int* c_values;
hipMalloc(reinterpret_cast<void**>(&c_keys), (sizeof(unsigned int) * 3));
hipMalloc(reinterpret_cast<void**>(&c_values), (sizeof(unsigned int) * 3));
unsigned int h_keys[] = { 0xAA, 0xAB, 0xFF };
unsigned int h_values[] = { 10, 100, 1000 };
hipMemcpy(c_keys, h_keys, threeUINTsize, hipMemcpyHostToDevice);
hipMemcpy(c_values, h_values, threeUINTsize, hipMemcpyHostToDevice);
mapper.insertKeyValuePairs(c_keys, c_values, 3);
unsigned int* c_resultValues;
hipMalloc(reinterpret_cast<void**>(&c_resultValues), (sizeof(unsigned int) * 3));
unsigned int h_resultValues[] = { 0, 0, 0 };
mapper.getValues(c_keys, c_resultValues, 3);
hipMemcpy(h_resultValues, c_resultValues, threeUINTsize, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
REQUIRE(h_resultValues[0] == h_values[0]);
REQUIRE(h_resultValues[1] == h_values[1]);
REQUIRE(h_resultValues[2] == h_values[2]);
hipFree(c_keys);
hipFree(c_values);
hipFree(c_resultValues);
REQUIRE(true);
} | 089b0f63394df55fedcc50e7b9b02051d0e1c5a4.cu | #include "../catch.hpp"
#define TEST_CUDA_CHECK_RETURN
//--------------------------------------------------------------
#include "../../GPUPatternMining/HashMap/gpuhashmapper.h"
#include "../BaseCudaTestHandler.h"
//--------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "InsertTest", "HashMap")
{
constexpr size_t threeUINTsize = sizeof(unsigned int) * 3;
GPUUIntKeyProcessor *intKeyProcessor = new GPUUIntKeyProcessor();
unsigned int hashSize = 4;
GPUHashMapper<unsigned int, unsigned int, GPUUIntKeyProcessor> mapper(hashSize, intKeyProcessor);
unsigned int* c_keys;
unsigned int* c_values;
cudaMalloc((void**)&c_keys, (sizeof(unsigned int) * 3));
cudaMalloc((void**)&c_values, (sizeof(unsigned int) * 3));
unsigned int h_keys[] = { 1, 2, 3 };
unsigned int h_values[] = { 10, 100, 1000 };
cudaMemcpy(c_keys, h_keys, threeUINTsize, cudaMemcpyHostToDevice);
cudaMemcpy(c_values, h_values, threeUINTsize, cudaMemcpyHostToDevice);
mapper.insertKeyValuePairs(c_keys, c_values, 3);
cudaFree(c_keys);
cudaFree(c_values);
REQUIRE(true);
}
TEST_CASE_METHOD(BaseCudaTestHandler, "Insert and Read test", "HashMap")
{
constexpr size_t threeUINTsize = sizeof(unsigned int) * 3;
GPUUIntKeyProcessor *intKeyProcessor = new GPUUIntKeyProcessor();
unsigned int hashSize = 4;
GPUHashMapper<unsigned int, unsigned int, GPUUIntKeyProcessor> mapper(hashSize, intKeyProcessor);
unsigned int* c_keys;
unsigned int* c_values;
cudaMalloc((void**)&c_keys, (sizeof(unsigned int) * 3));
cudaMalloc((void**)&c_values, (sizeof(unsigned int) * 3));
unsigned int h_keys[] = { 1, 2, 3 };
unsigned int h_values[] = { 10, 100, 1000 };
cudaMemcpy(c_keys, h_keys, threeUINTsize, cudaMemcpyHostToDevice);
cudaMemcpy(c_values, h_values, threeUINTsize, cudaMemcpyHostToDevice);
mapper.insertKeyValuePairs(c_keys, c_values, 3);
unsigned int* c_resultValues;
cudaMalloc((void**)&c_resultValues, (sizeof(unsigned int) * 3));
unsigned int h_resultValues[] = { 0, 0, 0 };
mapper.getValues(c_keys, c_resultValues, 3);
cudaMemcpy(h_resultValues, c_resultValues, threeUINTsize, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
REQUIRE(h_resultValues[0] == h_values[0]);
REQUIRE(h_resultValues[1] == h_values[1]);
REQUIRE(h_resultValues[2] == h_values[2]);
cudaFree(c_keys);
cudaFree(c_values);
cudaFree(c_resultValues);
REQUIRE(true);
}
TEST_CASE_METHOD(BaseCudaTestHandler, "Insert and Read test with HEX key", "HashMap")
{
constexpr size_t threeUINTsize = sizeof(unsigned int) * 3;
GPUUIntKeyProcessor *intKeyProcessor = new GPUUIntKeyProcessor();
unsigned int hashSize = 4;
GPUHashMapper<unsigned int, unsigned int, GPUUIntKeyProcessor> mapper(hashSize, intKeyProcessor);
unsigned int* c_keys;
unsigned int* c_values;
cudaMalloc(reinterpret_cast<void**>(&c_keys), (sizeof(unsigned int) * 3));
cudaMalloc(reinterpret_cast<void**>(&c_values), (sizeof(unsigned int) * 3));
unsigned int h_keys[] = { 0xAA, 0xAB, 0xFF };
unsigned int h_values[] = { 10, 100, 1000 };
cudaMemcpy(c_keys, h_keys, threeUINTsize, cudaMemcpyHostToDevice);
cudaMemcpy(c_values, h_values, threeUINTsize, cudaMemcpyHostToDevice);
mapper.insertKeyValuePairs(c_keys, c_values, 3);
unsigned int* c_resultValues;
cudaMalloc(reinterpret_cast<void**>(&c_resultValues), (sizeof(unsigned int) * 3));
unsigned int h_resultValues[] = { 0, 0, 0 };
mapper.getValues(c_keys, c_resultValues, 3);
cudaMemcpy(h_resultValues, c_resultValues, threeUINTsize, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
REQUIRE(h_resultValues[0] == h_values[0]);
REQUIRE(h_resultValues[1] == h_values[1]);
REQUIRE(h_resultValues[2] == h_values[2]);
cudaFree(c_keys);
cudaFree(c_values);
cudaFree(c_resultValues);
REQUIRE(true);
} |
456c8539feae5b5d2e746bdc97576b2b314ad90d.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by qinbin on 21/8/18.
//
#include "thundergbm/csc2r_transform.h"
void Csc2r::from_csr(float_type* csr_val, int* csr_col_ind, int* csr_row_ptr, int n_instances, int n_column, int nnz){
hipsparseHandle_t handle;
hipsparseMatDescr_t descr;
hipsparseCreate(&handle);
hipsparseCreateMatDescr(&descr);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
//std::cout<<"nnz:"<<nnz<<std::endl;
csc_val.resize(nnz);
csc_row_ind.resize(nnz);
csc_col_ptr.resize(n_column + 1);
this->nnz = nnz;
hipsparseScsr2csc(handle, n_instances, n_column, nnz, csr_val, csr_row_ptr,
csr_col_ind, csc_val.device_data(), csc_row_ind.device_data(), csc_col_ptr.device_data(),
HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ZERO);
hipDeviceSynchronize();
hipsparseDestroy(handle);
hipsparseDestroyMatDescr(descr);
}
void Csc2r::get_cut_points_evenly(int nBin, vector<float>& bin_id,
const vector<float>& min_fea, const vector<float>& max_fea) {
float* csc_val_host = csc_val.host_data();
int* csc_row_host = csc_row_ind.host_data();
int* csc_col_host = csc_col_ptr.host_data();
for(int cid = 0; cid < csc_col_ptr.size() - 1; cid ++){
int cstart = csc_col_host[cid];
int cend = csc_col_host[cid + 1];
for(int off = cstart; off < cend; off++){
float val = csc_val_host[off];
int rid = csc_row_host[off];
// std::cout<<"rid:"<<rid<<" ";
// std::cout<<"diff fea:"<<(max_fea[rid] - min_fea[rid])<<" ";
if((max_fea[rid] - min_fea[rid]) < 1e-5) {
// std::cout << "only one feature value" << std::endl;
bin_id[off] = 2.0;
}
// if(min_fea[rid] == INFINITY || max_fea[rid] == -INFINITY){
// std::cout<<"impossible case"<<std::endl;
// bin_id[off]=0.0;
// }
else
bin_id[off] = 1.0 * ((int) ((val - min_fea[rid]) / (max_fea[rid] - min_fea[rid]) * nBin) + 1);
}
}
}
| 456c8539feae5b5d2e746bdc97576b2b314ad90d.cu | //
// Created by qinbin on 21/8/18.
//
#include "thundergbm/csc2r_transform.h"
void Csc2r::from_csr(float_type* csr_val, int* csr_col_ind, int* csr_row_ptr, int n_instances, int n_column, int nnz){
cusparseHandle_t handle;
cusparseMatDescr_t descr;
cusparseCreate(&handle);
cusparseCreateMatDescr(&descr);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
//std::cout<<"nnz:"<<nnz<<std::endl;
csc_val.resize(nnz);
csc_row_ind.resize(nnz);
csc_col_ptr.resize(n_column + 1);
this->nnz = nnz;
cusparseScsr2csc(handle, n_instances, n_column, nnz, csr_val, csr_row_ptr,
csr_col_ind, csc_val.device_data(), csc_row_ind.device_data(), csc_col_ptr.device_data(),
CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO);
cudaDeviceSynchronize();
cusparseDestroy(handle);
cusparseDestroyMatDescr(descr);
}
void Csc2r::get_cut_points_evenly(int nBin, vector<float>& bin_id,
const vector<float>& min_fea, const vector<float>& max_fea) {
float* csc_val_host = csc_val.host_data();
int* csc_row_host = csc_row_ind.host_data();
int* csc_col_host = csc_col_ptr.host_data();
for(int cid = 0; cid < csc_col_ptr.size() - 1; cid ++){
int cstart = csc_col_host[cid];
int cend = csc_col_host[cid + 1];
for(int off = cstart; off < cend; off++){
float val = csc_val_host[off];
int rid = csc_row_host[off];
// std::cout<<"rid:"<<rid<<" ";
// std::cout<<"diff fea:"<<(max_fea[rid] - min_fea[rid])<<" ";
if((max_fea[rid] - min_fea[rid]) < 1e-5) {
// std::cout << "only one feature value" << std::endl;
bin_id[off] = 2.0;
}
// if(min_fea[rid] == INFINITY || max_fea[rid] == -INFINITY){
// std::cout<<"impossible case"<<std::endl;
// bin_id[off]=0.0;
// }
else
bin_id[off] = 1.0 * ((int) ((val - min_fea[rid]) / (max_fea[rid] - min_fea[rid]) * nBin) + 1);
}
}
}
|
386e231fcd871ab5c8e05d4b3b6f44c585b1bb6b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define MASK_N 2
#define MASK_X 5
#define MASK_Y 5
#define SCALE 8
#define THREAD_PER_BLOCK 1024
unsigned char *host_s = NULL; // source image array
unsigned char *host_t = NULL; // target image array
FILE *fp_s = NULL; // source file handler
FILE *fp_t = NULL; // target file handler
unsigned int width, height; // image width, image height
unsigned int rgb_raw_data_offset; // RGB raw data offset
unsigned char bit_per_pixel; // bit per pixel
unsigned short byte_per_pixel; // byte per pixel
// bitmap header
unsigned char header[54] = {
0x42, // identity : B
0x4d, // identity : M
0, 0, 0, 0, // file size
0, 0, // reserved1
0, 0, // reserved2
54, 0, 0, 0, // RGB data offset
40, 0, 0, 0, // struct BITMAPINFOHEADER size
0, 0, 0, 0, // bmp width
0, 0, 0, 0, // bmp height
1, 0, // planes
24, 0, // bit per pixel
0, 0, 0, 0, // compression
0, 0, 0, 0, // data size
0, 0, 0, 0, // h resolution
0, 0, 0, 0, // v resolution
0, 0, 0, 0, // used colors
0, 0, 0, 0 // important colors
};
// sobel mask (5x5 version)
// Task 2: Put mask[][][] into Shared Memroy
__constant__ int mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}},
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
int read_bmp (const char *fname_s) {
fp_s = fopen(fname_s, "rb");
if (fp_s == NULL) {
printf("fopen fp_s error\n");
return -1;
}
// move offset to 10 to find rgb raw data offset
fseek(fp_s, 10, SEEK_SET);
fread(&rgb_raw_data_offset, sizeof(unsigned int), 1, fp_s);
// move offset to 18 to get width & height;
fseek(fp_s, 18, SEEK_SET);
fread(&width, sizeof(unsigned int), 1, fp_s);
fread(&height, sizeof(unsigned int), 1, fp_s);
// get bit per pixel
fseek(fp_s, 28, SEEK_SET);
fread(&bit_per_pixel, sizeof(unsigned short), 1, fp_s);
byte_per_pixel = bit_per_pixel / 8;
// move offset to rgb_raw_data_offset to get RGB raw data
fseek(fp_s, rgb_raw_data_offset, SEEK_SET);
// Task 3: Assign host_s to Pinnned Memory
// Hint : err = hipHostMalloc ( ... )
// if (err != hipSuccess)
host_s = (unsigned char *) malloc((size_t)width * height * byte_per_pixel);
if (host_s == NULL) {
printf("malloc images_s error\n");
return -1;
}
// Task 3: Assign host_t to Pinned Memory
// Hint : err = hipHostMalloc ( ... )
// if (err != hipSuccess)
host_t = (unsigned char *) malloc((size_t) width * height * byte_per_pixel);
if (host_t == NULL) {
printf("malloc host_t error\n");
return -1;
}
fread(host_s, sizeof(unsigned char), (size_t)(long) width * height * byte_per_pixel, fp_s);
return 0;
}
// void sobel () {
// int x, y, i, v, u; // for loop counter
// int R, G, B; // color of R, G, B
// double val[MASK_N*3] = {0.0};
// int adjustX, adjustY, xBound, yBound;
// // Task 2: Put mask[][][] into Shared Memory
// // Hint : Please declare it in kernel function
// // Then use some threads to move data from global memory to shared memory
// // Remember to __syncthreads() after it's done <WHY?>
// // Task 1: Relabel x or y or both into combination of blockIdx, threadIdx ... etc
// // Hint A: We do not have enough threads for each pixels in the image, so what should we do?
// // Hint B: Maybe you can map each y to different threads in different blocks
// for (y = 0; y < height; ++y) {
// for (x = 0; x < width; ++x) {
// for (i = 0; i < MASK_N; ++i) {
// adjustX = (MASK_X % 2) ? 1 : 0;
// adjustY = (MASK_Y % 2) ? 1 : 0;
// xBound = MASK_X /2;
// yBound = MASK_Y /2;
// val[i*3+2] = 0.0;
// val[i*3+1] = 0.0;
// val[i*3] = 0.0;
// for (v = -yBound; v < yBound + adjustY; ++v) {
// for (u = -xBound; u < xBound + adjustX; ++u) {
// if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) {
// R = host_s[byte_per_pixel * (width * (y+v) + (x+u)) + 2];
// G = host_s[byte_per_pixel * (width * (y+v) + (x+u)) + 1];
// B = host_s[byte_per_pixel * (width * (y+v) + (x+u)) + 0];
// val[i*3+2] += R * mask[i][u + xBound][v + yBound];
// val[i*3+1] += G * mask[i][u + xBound][v + yBound];
// val[i*3+0] += B * mask[i][u + xBound][v + yBound];
// }
// }
// }
// }
// double totalR = 0.0;
// double totalG = 0.0;
// double totalB = 0.0;
// for (i = 0; i < MASK_N; ++i) {
// totalR += val[i * 3 + 2] * val[i * 3 + 2];
// totalG += val[i * 3 + 1] * val[i * 3 + 1];
// totalB += val[i * 3 + 0] * val[i * 3 + 0];
// }
// totalR = sqrt(totalR) / SCALE;
// totalG = sqrt(totalG) / SCALE;
// totalB = sqrt(totalB) / SCALE;
// const unsigned char cR = (totalR > 255.0) ? 255 : totalR;
// const unsigned char cG = (totalG > 255.0) ? 255 : totalG;
// const unsigned char cB = (totalB > 255.0) ? 255 : totalB;
// host_t[byte_per_pixel * (width * y + x) + 2] = cR;
// host_t[byte_per_pixel * (width * y + x) + 1] = cG;
// host_t[byte_per_pixel * (width * y + x) + 0] = cB;
// }
// }
// }
__global__ void gpu_sobel (unsigned char* device_s, unsigned char* device_t, int width, int height, short byte_per_pixel) {
int x, y, i, v, u; // for loop counter
int R, G, B; // color of R, G, B
double val[MASK_N*3] = {0.0};
int adjustX, adjustY, xBound, yBound;
// Task 2: Put mask[][][] into Shared Memory
// Hint : Please declare it in kernel function
// Then use some threads to move data from global memory to shared memory
// Remember to __syncthreads() after it's done <WHY?>
int _mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}},
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
__shared__ int mask[MASK_N][MASK_X][MASK_Y];
if(threadIdx.x<MASK_X*MASK_Y){
mask[0][threadIdx.x/MASK_Y][threadIdx.x%MASK_Y]=_mask[0][threadIdx.x/MASK_Y][threadIdx.x%MASK_Y];
mask[1][threadIdx.x/MASK_Y][threadIdx.x%MASK_Y]=_mask[1][threadIdx.x/MASK_Y][threadIdx.x%MASK_Y];
}
__syncthreads();
// Task 1: Relabel x or y or both into combination of blockIdx, threadIdx ... etc
// Hint A: We do not have enough threads for each pixels in the image, so what should we do?
// Hint B: Maybe you can map each y to different threads in different blocks
y=blockIdx.x*THREAD_PER_BLOCK+threadIdx.x;
if(y<height)
for (x = 0; x < width; ++x) {
for (i = 0; i < MASK_N; ++i) {
adjustX = (MASK_X % 2) ? 1 : 0;
adjustY = (MASK_Y % 2) ? 1 : 0;
xBound = MASK_X /2;
yBound = MASK_Y /2;
val[i*3+2] = 0.0;
val[i*3+1] = 0.0;
val[i*3] = 0.0;
for (v = -yBound; v < yBound + adjustY; ++v) {
for (u = -xBound; u < xBound + adjustX; ++u) {
if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) {
R = device_s[byte_per_pixel * (width * (y+v) + (x+u)) + 2];
G = device_s[byte_per_pixel * (width * (y+v) + (x+u)) + 1];
B = device_s[byte_per_pixel * (width * (y+v) + (x+u)) + 0];
val[i*3+2] += R * mask[i][u + xBound][v + yBound];
val[i*3+1] += G * mask[i][u + xBound][v + yBound];
val[i*3+0] += B * mask[i][u + xBound][v + yBound];
}
}
}
}
double totalR = 0.0;
double totalG = 0.0;
double totalB = 0.0;
for (i = 0; i < MASK_N; ++i) {
totalR += val[i * 3 + 2] * val[i * 3 + 2];
totalG += val[i * 3 + 1] * val[i * 3 + 1];
totalB += val[i * 3 + 0] * val[i * 3 + 0];
}
totalR = sqrt(totalR) / SCALE;
totalG = sqrt(totalG) / SCALE;
totalB = sqrt(totalB) / SCALE;
const unsigned char cR = (totalR > 255.0) ? 255 : totalR;
const unsigned char cG = (totalG > 255.0) ? 255 : totalG;
const unsigned char cB = (totalB > 255.0) ? 255 : totalB;
device_t[byte_per_pixel * (width * y + x) + 2] = cR;
device_t[byte_per_pixel * (width * y + x) + 1] = cG;
device_t[byte_per_pixel * (width * y + x) + 0] = cB;
}
}
int write_bmp (const char *fname_t) {
unsigned int file_size;
fp_t = fopen(fname_t, "wb");
if (fp_t == NULL) {
printf("fopen fname_t error\n");
return -1;
}
// file size
file_size = width * height * byte_per_pixel + rgb_raw_data_offset;
header[2] = (unsigned char)(file_size & 0x000000ff);
header[3] = (file_size >> 8) & 0x000000ff;
header[4] = (file_size >> 16) & 0x000000ff;
header[5] = (file_size >> 24) & 0x000000ff;
// width
header[18] = width & 0x000000ff;
header[19] = (width >> 8) & 0x000000ff;
header[20] = (width >> 16) & 0x000000ff;
header[21] = (width >> 24) & 0x000000ff;
// height
header[22] = height &0x000000ff;
header[23] = (height >> 8) & 0x000000ff;
header[24] = (height >> 16) & 0x000000ff;
header[25] = (height >> 24) & 0x000000ff;
// bit per pixel
header[28] = bit_per_pixel;
// write header
fwrite(header, sizeof(unsigned char), rgb_raw_data_offset, fp_t);
// write image
fwrite(host_t, sizeof(unsigned char), (size_t)(long)width * height * byte_per_pixel, fp_t);
fclose(fp_s);
fclose(fp_t);
return 0;
}
int main(int argc, char **argv) {
assert(argc == 3);
const char* input = argv[1];
const char* output = argv[2];
read_bmp(input); // 24 bit gray level image
// Task 1: Allocate memory on GPU
// Hint : hipMalloc ()
// What do we need to store on GPU? (input image, output image, ...)
unsigned char* ptr_s = 0;
unsigned char* ptr_d = 0;
// int* ptr_mask;
hipMalloc(&ptr_s,(size_t)width*height*byte_per_pixel);
hipMalloc(&ptr_d,(size_t)width*height*byte_per_pixel);
// hipMalloc(&ptr_mask,sizeof(int)*MASK_N*MASK_X*MASK_Y);
// Task 1: Memory copy from Host to Device (GPU)
// Hint : hipMemcpy ( ... , hipMemcpyHostToDevice )
hipMemcpy(ptr_s,host_s,width*height*byte_per_pixel,hipMemcpyHostToDevice);
// hipMemcpy(ptr_mask,mask,sizeof(int)*MASK_N*MASK_X*MASK_Y,hipMemcpyHostToDevice);
// Task 1: Modify sobel() to CUDA kernel function
// Hint : sobel_Kernel <<< ??? , ??? >>> ( ??? );
// sobel();
hipLaunchKernelGGL(( gpu_sobel), dim3((height/THREAD_PER_BLOCK)+1),dim3(THREAD_PER_BLOCK),MASK_N*MASK_X*MASK_Y, 0, ptr_s, ptr_d, width, height, byte_per_pixel);
// Task 1: Memory Copy from Device (GPU) to Host
// Hint : hipMemcpy ( ... , hipMemcpyDeviceToHost )
hipMemcpy(host_t,ptr_d,width*height*byte_per_pixel,hipMemcpyDeviceToHost);
// Task 1: Free memory on device
// Hint : hipFree ( ... )
hipFree(ptr_s);
hipFree(ptr_d);
write_bmp(output);
// Task 3: Free Pinned memory
// Hint : replace free ( ... ) by hipHostFree ( ... )
free (host_s);
free (host_t);
}
| 386e231fcd871ab5c8e05d4b3b6f44c585b1bb6b.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <cuda_runtime.h>
#define MASK_N 2
#define MASK_X 5
#define MASK_Y 5
#define SCALE 8
#define THREAD_PER_BLOCK 1024
unsigned char *host_s = NULL; // source image array
unsigned char *host_t = NULL; // target image array
FILE *fp_s = NULL; // source file handler
FILE *fp_t = NULL; // target file handler
unsigned int width, height; // image width, image height
unsigned int rgb_raw_data_offset; // RGB raw data offset
unsigned char bit_per_pixel; // bit per pixel
unsigned short byte_per_pixel; // byte per pixel
// bitmap header
unsigned char header[54] = {
0x42, // identity : B
0x4d, // identity : M
0, 0, 0, 0, // file size
0, 0, // reserved1
0, 0, // reserved2
54, 0, 0, 0, // RGB data offset
40, 0, 0, 0, // struct BITMAPINFOHEADER size
0, 0, 0, 0, // bmp width
0, 0, 0, 0, // bmp height
1, 0, // planes
24, 0, // bit per pixel
0, 0, 0, 0, // compression
0, 0, 0, 0, // data size
0, 0, 0, 0, // h resolution
0, 0, 0, 0, // v resolution
0, 0, 0, 0, // used colors
0, 0, 0, 0 // important colors
};
// sobel mask (5x5 version)
// Task 2: Put mask[][][] into Shared Memroy
__constant__ int mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}},
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
int read_bmp (const char *fname_s) {
fp_s = fopen(fname_s, "rb");
if (fp_s == NULL) {
printf("fopen fp_s error\n");
return -1;
}
// move offset to 10 to find rgb raw data offset
fseek(fp_s, 10, SEEK_SET);
fread(&rgb_raw_data_offset, sizeof(unsigned int), 1, fp_s);
// move offset to 18 to get width & height;
fseek(fp_s, 18, SEEK_SET);
fread(&width, sizeof(unsigned int), 1, fp_s);
fread(&height, sizeof(unsigned int), 1, fp_s);
// get bit per pixel
fseek(fp_s, 28, SEEK_SET);
fread(&bit_per_pixel, sizeof(unsigned short), 1, fp_s);
byte_per_pixel = bit_per_pixel / 8;
// move offset to rgb_raw_data_offset to get RGB raw data
fseek(fp_s, rgb_raw_data_offset, SEEK_SET);
// Task 3: Assign host_s to Pinnned Memory
// Hint : err = cudaMallocHost ( ... )
// if (err != CUDA_SUCCESS)
host_s = (unsigned char *) malloc((size_t)width * height * byte_per_pixel);
if (host_s == NULL) {
printf("malloc images_s error\n");
return -1;
}
// Task 3: Assign host_t to Pinned Memory
// Hint : err = cudaMallocHost ( ... )
// if (err != CUDA_SUCCESS)
host_t = (unsigned char *) malloc((size_t) width * height * byte_per_pixel);
if (host_t == NULL) {
printf("malloc host_t error\n");
return -1;
}
fread(host_s, sizeof(unsigned char), (size_t)(long) width * height * byte_per_pixel, fp_s);
return 0;
}
// void sobel () {
// int x, y, i, v, u; // for loop counter
// int R, G, B; // color of R, G, B
// double val[MASK_N*3] = {0.0};
// int adjustX, adjustY, xBound, yBound;
// // Task 2: Put mask[][][] into Shared Memory
// // Hint : Please declare it in kernel function
// // Then use some threads to move data from global memory to shared memory
// // Remember to __syncthreads() after it's done <WHY?>
// // Task 1: Relabel x or y or both into combination of blockIdx, threadIdx ... etc
// // Hint A: We do not have enough threads for each pixels in the image, so what should we do?
// // Hint B: Maybe you can map each y to different threads in different blocks
// for (y = 0; y < height; ++y) {
// for (x = 0; x < width; ++x) {
// for (i = 0; i < MASK_N; ++i) {
// adjustX = (MASK_X % 2) ? 1 : 0;
// adjustY = (MASK_Y % 2) ? 1 : 0;
// xBound = MASK_X /2;
// yBound = MASK_Y /2;
// val[i*3+2] = 0.0;
// val[i*3+1] = 0.0;
// val[i*3] = 0.0;
// for (v = -yBound; v < yBound + adjustY; ++v) {
// for (u = -xBound; u < xBound + adjustX; ++u) {
// if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) {
// R = host_s[byte_per_pixel * (width * (y+v) + (x+u)) + 2];
// G = host_s[byte_per_pixel * (width * (y+v) + (x+u)) + 1];
// B = host_s[byte_per_pixel * (width * (y+v) + (x+u)) + 0];
// val[i*3+2] += R * mask[i][u + xBound][v + yBound];
// val[i*3+1] += G * mask[i][u + xBound][v + yBound];
// val[i*3+0] += B * mask[i][u + xBound][v + yBound];
// }
// }
// }
// }
// double totalR = 0.0;
// double totalG = 0.0;
// double totalB = 0.0;
// for (i = 0; i < MASK_N; ++i) {
// totalR += val[i * 3 + 2] * val[i * 3 + 2];
// totalG += val[i * 3 + 1] * val[i * 3 + 1];
// totalB += val[i * 3 + 0] * val[i * 3 + 0];
// }
// totalR = sqrt(totalR) / SCALE;
// totalG = sqrt(totalG) / SCALE;
// totalB = sqrt(totalB) / SCALE;
// const unsigned char cR = (totalR > 255.0) ? 255 : totalR;
// const unsigned char cG = (totalG > 255.0) ? 255 : totalG;
// const unsigned char cB = (totalB > 255.0) ? 255 : totalB;
// host_t[byte_per_pixel * (width * y + x) + 2] = cR;
// host_t[byte_per_pixel * (width * y + x) + 1] = cG;
// host_t[byte_per_pixel * (width * y + x) + 0] = cB;
// }
// }
// }
__global__ void gpu_sobel (unsigned char* device_s, unsigned char* device_t, int width, int height, short byte_per_pixel) {
int x, y, i, v, u; // for loop counter
int R, G, B; // color of R, G, B
double val[MASK_N*3] = {0.0};
int adjustX, adjustY, xBound, yBound;
// Task 2: Put mask[][][] into Shared Memory
// Hint : Please declare it in kernel function
// Then use some threads to move data from global memory to shared memory
// Remember to __syncthreads() after it's done <WHY?>
int _mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}},
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
__shared__ int mask[MASK_N][MASK_X][MASK_Y];
if(threadIdx.x<MASK_X*MASK_Y){
mask[0][threadIdx.x/MASK_Y][threadIdx.x%MASK_Y]=_mask[0][threadIdx.x/MASK_Y][threadIdx.x%MASK_Y];
mask[1][threadIdx.x/MASK_Y][threadIdx.x%MASK_Y]=_mask[1][threadIdx.x/MASK_Y][threadIdx.x%MASK_Y];
}
__syncthreads();
// Task 1: Relabel x or y or both into combination of blockIdx, threadIdx ... etc
// Hint A: We do not have enough threads for each pixels in the image, so what should we do?
// Hint B: Maybe you can map each y to different threads in different blocks
y=blockIdx.x*THREAD_PER_BLOCK+threadIdx.x;
if(y<height)
for (x = 0; x < width; ++x) {
for (i = 0; i < MASK_N; ++i) {
adjustX = (MASK_X % 2) ? 1 : 0;
adjustY = (MASK_Y % 2) ? 1 : 0;
xBound = MASK_X /2;
yBound = MASK_Y /2;
val[i*3+2] = 0.0;
val[i*3+1] = 0.0;
val[i*3] = 0.0;
for (v = -yBound; v < yBound + adjustY; ++v) {
for (u = -xBound; u < xBound + adjustX; ++u) {
if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) {
R = device_s[byte_per_pixel * (width * (y+v) + (x+u)) + 2];
G = device_s[byte_per_pixel * (width * (y+v) + (x+u)) + 1];
B = device_s[byte_per_pixel * (width * (y+v) + (x+u)) + 0];
val[i*3+2] += R * mask[i][u + xBound][v + yBound];
val[i*3+1] += G * mask[i][u + xBound][v + yBound];
val[i*3+0] += B * mask[i][u + xBound][v + yBound];
}
}
}
}
double totalR = 0.0;
double totalG = 0.0;
double totalB = 0.0;
for (i = 0; i < MASK_N; ++i) {
totalR += val[i * 3 + 2] * val[i * 3 + 2];
totalG += val[i * 3 + 1] * val[i * 3 + 1];
totalB += val[i * 3 + 0] * val[i * 3 + 0];
}
totalR = sqrt(totalR) / SCALE;
totalG = sqrt(totalG) / SCALE;
totalB = sqrt(totalB) / SCALE;
const unsigned char cR = (totalR > 255.0) ? 255 : totalR;
const unsigned char cG = (totalG > 255.0) ? 255 : totalG;
const unsigned char cB = (totalB > 255.0) ? 255 : totalB;
device_t[byte_per_pixel * (width * y + x) + 2] = cR;
device_t[byte_per_pixel * (width * y + x) + 1] = cG;
device_t[byte_per_pixel * (width * y + x) + 0] = cB;
}
}
int write_bmp (const char *fname_t) {
unsigned int file_size;
fp_t = fopen(fname_t, "wb");
if (fp_t == NULL) {
printf("fopen fname_t error\n");
return -1;
}
// file size
file_size = width * height * byte_per_pixel + rgb_raw_data_offset;
header[2] = (unsigned char)(file_size & 0x000000ff);
header[3] = (file_size >> 8) & 0x000000ff;
header[4] = (file_size >> 16) & 0x000000ff;
header[5] = (file_size >> 24) & 0x000000ff;
// width
header[18] = width & 0x000000ff;
header[19] = (width >> 8) & 0x000000ff;
header[20] = (width >> 16) & 0x000000ff;
header[21] = (width >> 24) & 0x000000ff;
// height
header[22] = height &0x000000ff;
header[23] = (height >> 8) & 0x000000ff;
header[24] = (height >> 16) & 0x000000ff;
header[25] = (height >> 24) & 0x000000ff;
// bit per pixel
header[28] = bit_per_pixel;
// write header
fwrite(header, sizeof(unsigned char), rgb_raw_data_offset, fp_t);
// write image
fwrite(host_t, sizeof(unsigned char), (size_t)(long)width * height * byte_per_pixel, fp_t);
fclose(fp_s);
fclose(fp_t);
return 0;
}
int main(int argc, char **argv) {
assert(argc == 3);
const char* input = argv[1];
const char* output = argv[2];
read_bmp(input); // 24 bit gray level image
// Task 1: Allocate memory on GPU
// Hint : cudaMalloc ()
// What do we need to store on GPU? (input image, output image, ...)
unsigned char* ptr_s = 0;
unsigned char* ptr_d = 0;
// int* ptr_mask;
cudaMalloc(&ptr_s,(size_t)width*height*byte_per_pixel);
cudaMalloc(&ptr_d,(size_t)width*height*byte_per_pixel);
// cudaMalloc(&ptr_mask,sizeof(int)*MASK_N*MASK_X*MASK_Y);
// Task 1: Memory copy from Host to Device (GPU)
// Hint : cudaMemcpy ( ... , cudaMemcpyHostToDevice )
cudaMemcpy(ptr_s,host_s,width*height*byte_per_pixel,cudaMemcpyHostToDevice);
// cudaMemcpy(ptr_mask,mask,sizeof(int)*MASK_N*MASK_X*MASK_Y,cudaMemcpyHostToDevice);
// Task 1: Modify sobel() to CUDA kernel function
// Hint : sobel_Kernel <<< ??? , ??? >>> ( ??? );
// sobel();
gpu_sobel<<<(height/THREAD_PER_BLOCK)+1,THREAD_PER_BLOCK,MASK_N*MASK_X*MASK_Y>>> (ptr_s, ptr_d, width, height, byte_per_pixel);
// Task 1: Memory Copy from Device (GPU) to Host
// Hint : cudaMemcpy ( ... , cudaMemcpyDeviceToHost )
cudaMemcpy(host_t,ptr_d,width*height*byte_per_pixel,cudaMemcpyDeviceToHost);
// Task 1: Free memory on device
// Hint : cudaFree ( ... )
cudaFree(ptr_s);
cudaFree(ptr_d);
write_bmp(output);
// Task 3: Free Pinned memory
// Hint : replace free ( ... ) by cudaFreeHost ( ... )
free (host_s);
free (host_t);
}
|
b70779c658c99a2dd0ebeb3807bf66e3d84140fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
void initSomme_g() {
somme_g = 0;
for (int i = 1; i < nbMachines_g; i++)
somme_g += i;
}
void free_memories() {
for (int i = 0; i < nbMachines_g; i++)
free(tempsFinMachines_g[i]);
free(tempsFinMachines_g);
for (int i = 0; i < somme_g; i++)
free(tempsEntre_g[i]);
free(tempsEntre_g);
free(tempsFin_g);
free(tempsExeTrie_g);
free(tempsEntreTrie_g);
free(tempsFinMachinesTrie_g);
free(tempsLag_g);
free(ordoSomme_g);
free(nbFois_g);
free(tempsFinTrie_g);
free(tabJohnson_g);
free(machine_g);
free(tempsArriver_g);
free(tempsDepart_g);
free(minTempsArr_g);
free(minTempsDep_g);
free(tempsJob_g);
}
void allouerMemoire_g() {
tempsFin_g = (int *) malloc(nbJob_g * sizeof(int));
tempsExeTrie_g = (int *) malloc(nbMachines_g * nbJob_g * sizeof(int));
tempsFinMachines_g = (int **) malloc(nbMachines_g * sizeof(int *));
for (int i = 0; i < nbMachines_g; i++)
tempsFinMachines_g[i] = (int *) malloc(nbJob_g * sizeof(int));
tempsEntre_g = (int **) malloc(somme_g * sizeof(int *));
for (int i = 0; i < somme_g; i++)
tempsEntre_g[i] = (int *) malloc(nbJob_g * sizeof(int));
tempsEntreTrie_g = (int *) malloc(somme_g * nbJob_g * sizeof(int));
tempsFinMachinesTrie_g = (int *) malloc(
nbMachines_g * nbJob_g * sizeof(int));
tempsLag_g = (int *) malloc(somme_g * nbJob_g * sizeof(int));
ordoSomme_g = (int *) malloc(somme_g * sizeof(int));
nbFois_g = (int *) malloc(somme_g * sizeof(int));
tempsFinTrie_g = (int *) malloc(nbJob_g * sizeof(int));
tabJohnson_g = (int *) malloc(somme_g * nbJob_g * sizeof(int));
machine_g = (int *) malloc(2 * somme_g * sizeof(int));
tempsArriver_g = (int *) malloc(nbMachines_g * nbJob_g * sizeof(int));
tempsDepart_g = (int *) malloc(nbMachines_g * nbJob_g * sizeof(int));
minTempsArr_g = (int *) malloc(nbMachines_g * sizeof(int));
minTempsDep_g = (int *) malloc(nbMachines_g * sizeof(int));
}
long double aFaire_g(int profondeur) {
long double nbNoeudAFaire = 1;
for (int i = 2; i <= profondeur; i++)
nbNoeudAFaire = (nbNoeudAFaire * i) + i;
return nbNoeudAFaire;
}
void initTempsExeTrie_g() {
for (int i = 0; i < nbMachines_g; i++)
for (int j = 0; j < nbJob_g; j++)
tempsExeTrie_g[i * nbJob_g + j] = j;
}
void initTempsEntreTrie_g() {
for (int i = 0; i < somme_g; i++)
for (int j = 0; j < nbJob_g; j++)
tempsEntreTrie_g[i * nbJob_g + j] = j;
}
void initTempArrays_g() {
tempsExeTrie_T = (int **) malloc(nbMachines_g * sizeof(int *));
tempsFinMachinesTrie_T = (int **) malloc(nbMachines_g * sizeof(int *));
for (int i = 0; i < nbMachines_g; i++) {
tempsExeTrie_T[i] = (int *) malloc(nbJob_g * sizeof(int));
tempsFinMachinesTrie_T[i] = (int *) malloc(nbJob_g * sizeof(int));
}
tempsEntreTrie_T = (int **) malloc(somme_g * sizeof(int *));
tabJohnson_T = (int **) malloc(somme_g * sizeof(int *));
for (int i = 0; i < somme_g; i++) {
tempsEntreTrie_T[i] = (int *) malloc(nbJob_g * sizeof(int));
tabJohnson_T[i] = (int *) malloc(nbJob_g * sizeof(int));
}
for (int i = 0; i < somme_g; i++)
for (int j = 0; j < nbJob_g; j++)
tempsEntreTrie_T[i][j] = j;
for (int i = 0; i < nbMachines_g; i++)
for (int j = 0; j < nbJob_g; j++) {
tempsExeTrie_T[i][j] = j;
tempsFinMachinesTrie_T[i][j] = j;
}
}
void initTempsFinTrie_g() {
for (int i = 0; i < nbJob_g; i++)
tempsFinTrie_g[i] = i;
}
void initTempsFinMachinesTrie_g() {
for (int i = 0; i < nbMachines_g; i++)
for (int j = 0; j < nbJob_g; j++)
tempsFinMachinesTrie_g[i * nbJob_g + j] = j;
}
int estInf_g(int i, int j) {
if (pluspetit_g[0][i] == pluspetit_g[0][j]) {
if (pluspetit_g[0][i] == 1)
return pluspetit_g[1][i] < pluspetit_g[1][j];
return pluspetit_g[1][i] > pluspetit_g[1][j];
}
return pluspetit_g[0][i] < pluspetit_g[0][j];
}
int estSup_g(int i, int j) {
if (pluspetit_g[0][i] == pluspetit_g[0][j]) {
if (pluspetit_g[0][i] == 1)
return pluspetit_g[1][i] > pluspetit_g[1][j];
return pluspetit_g[1][i] < pluspetit_g[1][j];
}
return pluspetit_g[0][i] > pluspetit_g[0][j];
}
int partionner_g(int *ordo, int deb, int fin) {
int d = deb - 1;
int f = fin + 1;
int mem, pivot = ordo[deb];
do {
do {
f--;
} while (estSup_g(ordo[f], pivot));
do {
d++;
} while (estInf_g(ordo[d], pivot));
if (d < f) {
mem = ordo[d];
ordo[d] = ordo[f];
ordo[f] = mem;
}
} while (d < f);
return f;
}
void quicksort_g(int *ordo, int deb, int fin) {
int k;
if ((fin - deb) > 0) {
k = partionner_g(ordo, deb, fin);
quicksort_g(ordo, deb, k);
quicksort_g(ordo, k + 1, fin);
}
}
void trierTableau_g(int *ordo, int nbElem_g, int *nbFois_g, bool croissant) {
pluspetit_g[0] = (int *) malloc((nbElem_g) * sizeof(int));
pluspetit_g[1] = (int *) malloc((nbElem_g) * sizeof(int));
for (int i = 0; i < nbElem_g; i++) {
if (croissant)
pluspetit_g[0][i] = 1;
else
pluspetit_g[0][i] = 2;
pluspetit_g[1][i] = nbFois_g[i];
}
quicksort_g(ordo, 0, (nbElem_g - 1));
free(pluspetit_g[0]);
free(pluspetit_g[1]);
}
void remplirTempsExeTrie_g() {
for (int i = 0; i < nbMachines_g; i++)
trierTableau_g(tempsExeTrie_T[i], nbJob_g, tempsJob_T[i], true);
for (int i = 0; i < nbMachines_g; i++)
for (int j = 0; j < nbJob_g; j++)
tempsExeTrie_g[i * nbJob_g + j] = tempsExeTrie_T[i][j];
for (int i = 0; i < nbMachines_g; i++)
free(tempsExeTrie_T[i]);
free(tempsExeTrie_T);
}
void remplirTempsArriverDepart_g() {
bool faitDep[nbMachines_g];
bool faitArr[nbMachines_g];
int machine_g;
for (int k = 1; k < nbMachines_g; k++) {
faitArr[k] = false;
faitDep[k] = false;
}
minTempsDep_g[nbMachines_g - 1] = 0;
minTempsArr_g[0] = 0;
for (int i = 0; i < nbJob_g; i++) {
tempsArriver_g[i] = 0;
tempsDepart_g[(nbMachines_g - 1) * nbJob_g + i] = 0;
for (int k = 1; k < nbMachines_g; k++) {
machine_g = nbMachines_g - k - 1;
tempsArriver_g[k * nbJob_g + i] = tempsArriver_g[(k - 1) * nbJob_g
+ i] + tempsJob_g[(k - 1) * nbJob_g + i];
tempsDepart_g[machine_g * nbJob_g + i] = tempsDepart_g[(machine_g
+ 1) * nbJob_g + i]
+ tempsJob_g[(machine_g + 1) * nbJob_g + i];
if (!faitArr[k]
|| minTempsArr_g[k] > tempsArriver_g[k * nbJob_g + i]) {
faitArr[k] = true;
minTempsArr_g[k] = tempsArriver_g[k * nbJob_g + i];
}
if (!faitDep[k]
|| minTempsDep_g[machine_g]
> tempsDepart_g[machine_g * nbJob_g + i]) {
faitDep[k] = true;
minTempsDep_g[machine_g] =
tempsDepart_g[machine_g * nbJob_g + i];
}
}
}
}
void remplirMachine_g() {
int cmpt = 0;
for (int i = 0; i < (nbMachines_g - 1); i++)
for (int j = i + 1; j < nbMachines_g; j++) {
machine_g[cmpt] = i;
cmpt++;
}
int taille = cmpt;
cmpt = 0;
for (int i = 0; i < (nbMachines_g - 1); i++)
for (int j = i + 1; j < nbMachines_g; j++) {
machine_g[1 * taille + cmpt] = j;
cmpt++;
}
}
void remplirLag_g() {
int m1, m2;
for (int i = 0; i < somme_g; i++) {
m1 = machine_g[0 * somme_g + i];
m2 = machine_g[1 * somme_g + i];
for (int j = 0; j < nbJob_g; j++) {
tempsLag_g[i * nbJob_g + j] = 0;
for (int k = m1 + 1; k < m2; k++)
tempsLag_g[i * nbJob_g + j] += tempsJob_g[k * nbJob_g + j];
tempsEntre_g[i][j] = tempsLag_g[i * nbJob_g + j]
+ tempsJob_g[m2 * nbJob_g + j];
}
}
}
void remplirTempsEntreTrie_g() {
for (int i = 0; i < somme_g; i++)
trierTableau_g(tempsEntreTrie_T[i], nbJob_g, tempsEntre_g[i], true);
for (int i = 0; i < somme_g; i++)
for (int j = 0; j < nbJob_g; j++)
tempsEntreTrie_g[i * nbJob_g + j] = tempsEntreTrie_T[i][j];
for (int i = 0; i < somme_g; i++)
free(tempsEntreTrie_T[i]);
free(tempsEntreTrie_T);
}
void Johnson_g(int *ordo, int m1, int m2, int s) {
pluspetit_g[0] = (int *) malloc(nbJob_g * sizeof(int));
pluspetit_g[1] = (int *) malloc(nbJob_g * sizeof(int));
for (int i = 0; i < nbJob_g; i++) {
ordo[i] = i;
if (tempsJob_g[m1 * nbJob_g + i] < tempsJob_g[m2 * nbJob_g + i]) {
pluspetit_g[0][i] = 1;
pluspetit_g[1][i] = tempsJob_g[m1 * nbJob_g + i]
+ tempsLag_g[s * nbJob_g + i];
} else {
pluspetit_g[0][i] = 2;
pluspetit_g[1][i] = tempsJob_g[m2 * nbJob_g + i]
+ tempsLag_g[s * nbJob_g + i];
}
}
quicksort_g(ordo, 0, (nbJob_g - 1));
free(pluspetit_g[0]);
free(pluspetit_g[1]);
}
void remplirTabJohnson_g() {
int cmpt = 0;
for (int i = 0; i < (nbMachines_g - 1); i++)
for (int j = i + 1; j < nbMachines_g; j++) {
Johnson_g(tabJohnson_T[cmpt], i, j, cmpt);
cmpt++;
}
for (int i = 0; i < somme_g; i++)
for (int j = 0; j < nbJob_g; j++)
tabJohnson_g[i * nbJob_g + j] = tabJohnson_T[i][j];
for (int i = 0; i < somme_g; i++)
free(tabJohnson_T[i]);
free(tabJohnson_T);
}
void remplirTempsFinMachines_g() {
for (int i = 0; i < nbJob_g; i++)
tempsFinMachines_g[nbMachines_g - 1][i] = tempsFin_g[i];
for (int i = nbMachines_g - 2; i >= 0; i--)
for (int j = 0; j < nbJob_g; j++)
tempsFinMachines_g[i][j] = tempsFinMachines_g[i + 1][j]
- tempsJob_T[i + 1][j];
for (int i = 0; i < nbMachines_g; i++)
free(tempsJob_T[i]);
free(tempsJob_T);
}
void remplirTempsFinMachinesTrie_g() {
for (int i = 0; i < nbMachines_g; i++)
trierTableau_g(tempsFinMachinesTrie_T[i], nbJob_g,
tempsFinMachines_g[i], true);
for (int i = 0; i < nbMachines_g; i++)
for (int j = 0; j < nbJob_g; j++)
tempsFinMachinesTrie_g[i * nbJob_g + j] =
tempsFinMachinesTrie_T[i][j];
for (int i = 0; i < nbMachines_g; i++)
free(tempsFinMachinesTrie_T[i]);
free(tempsFinMachinesTrie_T);
}
void initNbFois_g() {
for (int i = 0; i < somme_g; i++) {
ordoSomme_g[i] = i;
nbFois_g[i] = 0;
}
}
void initialiserVar_g() {
initSomme_g();
allouerMemoire_g();
initTempArrays_g();
initTempsFinTrie_g();
initTempsExeTrie_g();
initTempsEntreTrie_g();
initTempsFinMachinesTrie_g();
trierTableau_g(tempsFinTrie_g, nbJob_g, tempsFin_g, true);
remplirTempsExeTrie_g();
remplirTempsArriverDepart_g();
remplirMachine_g();
remplirLag_g();
remplirTempsEntreTrie_g();
remplirTabJohnson_g();
remplirTempsFinMachines_g();
remplirTempsFinMachinesTrie_g();
initNbFois_g();
}
void init_bound() {
nbOrdo_g = 1;
nbborne_g = 1;
nbNoeud_g = 0;
nbPartition_g = 10;
nbRetardNonAffB1_g = 0;
nbRetardNonAffB2_g = 0;
nbZero_g = 0;
seuil_g = 0;
initialiserVar_g();
nbElem_g = somme_g;
nbNoeudTotal_g = aFaire_g(nbJob_g);
}
void init_problem_device_g(problem_d &p) {
p.limite1 = -1;
p.limite2 = nbJob_g;
for (int j = 0; j < nbJob_g; j++)
p.permutation[j] = j;
p.couts_somme = 0;
p.depth = 0;
}
inline __device__ void set_job_jobFin_g(int* job, int* jobFin, int* permutation, int limite1, int limite2,int nbJob_g)
{
int j = 0;
for(j = 0;j <= limite1;j++) job[permutation[j]] = j + 1;
for(j = limite1 + 1;j < limite2;j++) job[permutation[j]] = 0;
for(j = limite2; j < nbJob_g;j++)
{
job[permutation[j]] = j + 1;
jobFin[j] = permutation[j];
}
}
inline __device__ int cmaxFin_g(int *tempsMachinesFin, int *tmp, int *ma)
{
return max(tmp[1] + tempsMachinesFin[ma[1]],tmp[0] + tempsMachinesFin[ma[0]]);
}
inline __device__ void initCmax_g(int* tempsMachines,int nbAffectDebut, int *tmp, int *ma, int ind,int * machine_g, int somme_g, int * minTempsArr_g)
{
ma[0] = machine_g[ind];
ma[1] = machine_g[1 * somme_g + ind];
int coeff = __cosf(nbAffectDebut);
tmp[0] = (1 - coeff) * tempsMachines[ma[0]] + coeff * minTempsArr_g[ma[0]];
tmp[1] = (1 - coeff) * tempsMachines[ma[1]] + coeff * minTempsArr_g[ma[1]];
}
inline __device__ void heuristiqueCmax_g(int* job, int *tmp, int *ma, int ind, int nbJob_g, int * tabJohnson_g, unsigned char* tempsJob_g,int * tempsLag_g)
{
int jobCour;
for(int j= 0; j < nbJob_g; j++)
{
jobCour = tabJohnson_g[ind * nbJob_g + j];
if( job[jobCour] == 0 )
{
tmp[0] = tmp[0] + tempsJob_g[ma[0] * nbJob_g + jobCour];
tmp[1] = max (tmp[1], tmp[0] + tempsLag_g[ind * nbJob_g + jobCour]) + tempsJob_g[ma[1] * nbJob_g + jobCour];
}
}
}
inline __device__ int criteres_calculer_g(int* permutation,int nbMachines_g,int nbJob_g,unsigned char* tempsJob_g)
{
int temps[MAX_NB_MACHINES];
for(int mm = 0; mm < nbMachines_g; mm++) temps[mm] = 0;
for(int j = 0; j < nbJob_g;j++)
{
int job = permutation[j];
temps[0] = temps[0] + tempsJob_g[job];
for(int m = 1; m < nbMachines_g;m++)
temps[m] = max(temps[m],temps[m-1]) + tempsJob_g[m * nbJob_g + job];
}
return temps[nbMachines_g-1];
}
inline __device__ void set_tempsMachines_retardDebut_g(int *tempsMachines, int* permutation, int limite1,int nbMachines_g,int nbJob_g,unsigned char* tempsJob_g)
{
int m = 0;
for(m = 0; m < nbMachines_g; m++) tempsMachines[m] = 0;
for(int j = 0; j <= limite1; j++)
{
int job = permutation[j];
tempsMachines[0] = tempsMachines[0] + tempsJob_g[job];
for(m = 1; m < nbMachines_g;m++)
tempsMachines[m] = max(tempsMachines[m],tempsMachines[m-1]) + tempsJob_g[m * nbJob_g + job];
}
}
inline __device__ void set_tempsMachinesFin_tempsJobFin_g(int* jobFin, int * tempsMachinesFin,int nbAffectFin,int nbJob_g, int nbMachines_g,unsigned char* tempsJob_g)
{
int jobCour;
int tmpMa[MAX_NB_MACHINES];
for(int j = 0; j < nbMachines_g; j++)
{
for(int k = j; k < nbMachines_g; k++) tmpMa[k] = 0;
for(int k = nbJob_g - nbAffectFin; k < nbJob_g; k++)
{
jobCour = jobFin[k];
tmpMa[j] = tmpMa[j] + tempsJob_g[j * nbJob_g + jobCour];
for(int l = j + 1; l < nbMachines_g; l++)
{
tmpMa[l] = max (tmpMa[l-1],tmpMa[l]);
tmpMa[l] = tmpMa[l] + tempsJob_g[l * nbJob_g + jobCour];
}
}
tempsMachinesFin[j] = tmpMa[nbMachines_g-1];
}
}
inline __device__ int borneInfMakespan_g(int* job,int *tempsMachinesFin,int* tempsMachines,
int nbAffectDebut,int nbAffectFin,int *valBorneInf, int retardNonFin, int minCmax,int nbJob_g, int nbElem_g,
int leMeilleur_g, int nbborne_g, int somme_g, int * minTempsArr_g, int*nbFois_g, int *machine_g, int * tabJohnson_g,
unsigned char* tempsJob_g, int* tempsLag_g, int* ordoSomme_g, int* minTempsDep_g)
{
int moinsBon = 0;
int idxMoinsBonCmax, i, tmpDep, retard = retardNonFin;
int ma[2] = {0,0};
int tmp[2] = {0,0};
int res[2] = {0,0};
int l;
int coeffReturn=1;
for(l = 0; l < nbElem_g; l++)
{
i = ordoSomme_g[l];
initCmax_g(tempsMachines,nbAffectDebut,tmp,ma,i,machine_g,somme_g,minTempsArr_g);
heuristiqueCmax_g(job, tmp, ma, i, nbJob_g,tabJohnson_g, tempsJob_g, tempsLag_g);
if( nbAffectFin != 0 ) tmp[1] = cmaxFin_g(tempsMachinesFin, tmp, ma);
else tmp[1] = tmp[1] + minTempsDep_g[ma[1]];
float un = 1;
int coeff2 = min(un, __expf(tmp[1] - moinsBon - 1));
idxMoinsBonCmax = coeff2 * i;
moinsBon = max(moinsBon,tmp[1]);
int coeff3 = min(un,__expf(moinsBon - leMeilleur_g - 1));
int coeff4 = min(1,(leMeilleur_g + 2) ^ 1);
int coeff5 = coeff3 * coeff4;
nbborne_g = nbborne_g + coeff5;
valBorneInf[0] = valBorneInf[0] + coeff5 * moinsBon;
coeffReturn = coeff5;
}
nbborne_g++;
nbFois_g[idxMoinsBonCmax]++;
valBorneInf[0] = moinsBon;
return 0;
}
inline __device__ int calculBorne_g(int* job,int *tempsMachinesFin,int *tempsMachines,int nbAffectDebut,int nbAffectFin,int nbJob_g,int leMeilleur_g, int nbborne_g,int somme_g,int nbElem_g,unsigned char* tempsJob_g,int* nBfois,int* machine_g,int* tabJohnson_g,int* tempsLag_g,int * minTempsArr_g,int* ordoSomme_g,int* minTempsDep_g)
{
int retardNonAff = 0;
int minCmax = 0;
int valBorneInf[2];
int retardNonFin = retardNonAff;
borneInfMakespan_g(job,tempsMachinesFin,tempsMachines,nbAffectDebut,nbAffectFin,valBorneInf,retardNonFin,minCmax,nbJob_g,nbElem_g,leMeilleur_g,nbborne_g,somme_g,minTempsArr_g,
nBfois,machine_g,tabJohnson_g,tempsJob_g,tempsLag_g,ordoSomme_g,minTempsDep_g);
return valBorneInf[0];
}
__global__ void Evaluate_ON_GPU(raw_bb_problem* pool_to_evaluate, int* bounds,
int nbJob_g, int nbMachines_g, int nbborne_g, int somme_g, int nbElem_g,
int *nbFois_g, int *machine_g, int *tabJohnson_g, int *tempsJob_g,
int *tempsLag_g, int *minTempsArr_g, int *ordoSomme_g,
int *minTempsDep_g, int todo, int time_seed_g) {
int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__
unsigned char tempsJob_g_shared[MAX_NB_MACHINES * MAX_NBJOBS];
if (thread_idx < todo) {
if (threadIdx.x == 0) {
for (int i = 0; i < nbMachines_g; i++)
for (int j = 0; j < nbJob_g; j++)
tempsJob_g_shared[i * nbJob_g + j] = tempsJob_g[i * nbJob_g
+ j];
}
__syncthreads();
int tempsMachines[MAX_NB_MACHINES];
int tempsMachinesFin[MAX_NB_MACHINES];
int job[MAX_NBJOBS];
int jobFin[MAX_NBJOBS];
int nbAffectFin = nbJob_g - pool_to_evaluate[thread_idx].limite2;
int nbAffectDebut = pool_to_evaluate[thread_idx].limite1 + 1;
int leMeilleur_g = 999999;
int borneInf = 0;
if (pool_to_evaluate[thread_idx].limite2
- pool_to_evaluate[thread_idx].limite1 == 1)
borneInf = criteres_calculer_g(
pool_to_evaluate[thread_idx].permutation, nbMachines_g,
nbJob_g, tempsJob_g_shared);
else {
set_tempsMachines_retardDebut_g(tempsMachines,
pool_to_evaluate[thread_idx].permutation,
pool_to_evaluate[thread_idx].limite1, nbMachines_g, nbJob_g,
tempsJob_g_shared);
set_job_jobFin_g(job, jobFin,
pool_to_evaluate[thread_idx].permutation,
pool_to_evaluate[thread_idx].limite1,
pool_to_evaluate[thread_idx].limite2, nbJob_g);
set_tempsMachinesFin_tempsJobFin_g(jobFin, tempsMachinesFin,
nbAffectFin, nbJob_g, nbMachines_g, tempsJob_g_shared);
borneInf = calculBorne_g(job, tempsMachinesFin, tempsMachines,
nbAffectDebut, nbAffectFin, nbJob_g, leMeilleur_g,
nbborne_g, somme_g, nbElem_g, tempsJob_g_shared, nbFois_g,
machine_g, tabJohnson_g, tempsLag_g, minTempsArr_g,
ordoSomme_g, minTempsDep_g);
}
bounds[thread_idx] = borneInf;
}
}
| b70779c658c99a2dd0ebeb3807bf66e3d84140fc.cu | void initSomme_g() {
somme_g = 0;
for (int i = 1; i < nbMachines_g; i++)
somme_g += i;
}
void free_memories() {
for (int i = 0; i < nbMachines_g; i++)
free(tempsFinMachines_g[i]);
free(tempsFinMachines_g);
for (int i = 0; i < somme_g; i++)
free(tempsEntre_g[i]);
free(tempsEntre_g);
free(tempsFin_g);
free(tempsExeTrie_g);
free(tempsEntreTrie_g);
free(tempsFinMachinesTrie_g);
free(tempsLag_g);
free(ordoSomme_g);
free(nbFois_g);
free(tempsFinTrie_g);
free(tabJohnson_g);
free(machine_g);
free(tempsArriver_g);
free(tempsDepart_g);
free(minTempsArr_g);
free(minTempsDep_g);
free(tempsJob_g);
}
void allouerMemoire_g() {
tempsFin_g = (int *) malloc(nbJob_g * sizeof(int));
tempsExeTrie_g = (int *) malloc(nbMachines_g * nbJob_g * sizeof(int));
tempsFinMachines_g = (int **) malloc(nbMachines_g * sizeof(int *));
for (int i = 0; i < nbMachines_g; i++)
tempsFinMachines_g[i] = (int *) malloc(nbJob_g * sizeof(int));
tempsEntre_g = (int **) malloc(somme_g * sizeof(int *));
for (int i = 0; i < somme_g; i++)
tempsEntre_g[i] = (int *) malloc(nbJob_g * sizeof(int));
tempsEntreTrie_g = (int *) malloc(somme_g * nbJob_g * sizeof(int));
tempsFinMachinesTrie_g = (int *) malloc(
nbMachines_g * nbJob_g * sizeof(int));
tempsLag_g = (int *) malloc(somme_g * nbJob_g * sizeof(int));
ordoSomme_g = (int *) malloc(somme_g * sizeof(int));
nbFois_g = (int *) malloc(somme_g * sizeof(int));
tempsFinTrie_g = (int *) malloc(nbJob_g * sizeof(int));
tabJohnson_g = (int *) malloc(somme_g * nbJob_g * sizeof(int));
machine_g = (int *) malloc(2 * somme_g * sizeof(int));
tempsArriver_g = (int *) malloc(nbMachines_g * nbJob_g * sizeof(int));
tempsDepart_g = (int *) malloc(nbMachines_g * nbJob_g * sizeof(int));
minTempsArr_g = (int *) malloc(nbMachines_g * sizeof(int));
minTempsDep_g = (int *) malloc(nbMachines_g * sizeof(int));
}
long double aFaire_g(int profondeur) {
long double nbNoeudAFaire = 1;
for (int i = 2; i <= profondeur; i++)
nbNoeudAFaire = (nbNoeudAFaire * i) + i;
return nbNoeudAFaire;
}
void initTempsExeTrie_g() {
for (int i = 0; i < nbMachines_g; i++)
for (int j = 0; j < nbJob_g; j++)
tempsExeTrie_g[i * nbJob_g + j] = j;
}
void initTempsEntreTrie_g() {
for (int i = 0; i < somme_g; i++)
for (int j = 0; j < nbJob_g; j++)
tempsEntreTrie_g[i * nbJob_g + j] = j;
}
void initTempArrays_g() {
tempsExeTrie_T = (int **) malloc(nbMachines_g * sizeof(int *));
tempsFinMachinesTrie_T = (int **) malloc(nbMachines_g * sizeof(int *));
for (int i = 0; i < nbMachines_g; i++) {
tempsExeTrie_T[i] = (int *) malloc(nbJob_g * sizeof(int));
tempsFinMachinesTrie_T[i] = (int *) malloc(nbJob_g * sizeof(int));
}
tempsEntreTrie_T = (int **) malloc(somme_g * sizeof(int *));
tabJohnson_T = (int **) malloc(somme_g * sizeof(int *));
for (int i = 0; i < somme_g; i++) {
tempsEntreTrie_T[i] = (int *) malloc(nbJob_g * sizeof(int));
tabJohnson_T[i] = (int *) malloc(nbJob_g * sizeof(int));
}
for (int i = 0; i < somme_g; i++)
for (int j = 0; j < nbJob_g; j++)
tempsEntreTrie_T[i][j] = j;
for (int i = 0; i < nbMachines_g; i++)
for (int j = 0; j < nbJob_g; j++) {
tempsExeTrie_T[i][j] = j;
tempsFinMachinesTrie_T[i][j] = j;
}
}
void initTempsFinTrie_g() {
for (int i = 0; i < nbJob_g; i++)
tempsFinTrie_g[i] = i;
}
void initTempsFinMachinesTrie_g() {
for (int i = 0; i < nbMachines_g; i++)
for (int j = 0; j < nbJob_g; j++)
tempsFinMachinesTrie_g[i * nbJob_g + j] = j;
}
int estInf_g(int i, int j) {
if (pluspetit_g[0][i] == pluspetit_g[0][j]) {
if (pluspetit_g[0][i] == 1)
return pluspetit_g[1][i] < pluspetit_g[1][j];
return pluspetit_g[1][i] > pluspetit_g[1][j];
}
return pluspetit_g[0][i] < pluspetit_g[0][j];
}
int estSup_g(int i, int j) {
if (pluspetit_g[0][i] == pluspetit_g[0][j]) {
if (pluspetit_g[0][i] == 1)
return pluspetit_g[1][i] > pluspetit_g[1][j];
return pluspetit_g[1][i] < pluspetit_g[1][j];
}
return pluspetit_g[0][i] > pluspetit_g[0][j];
}
int partionner_g(int *ordo, int deb, int fin) {
int d = deb - 1;
int f = fin + 1;
int mem, pivot = ordo[deb];
do {
do {
f--;
} while (estSup_g(ordo[f], pivot));
do {
d++;
} while (estInf_g(ordo[d], pivot));
if (d < f) {
mem = ordo[d];
ordo[d] = ordo[f];
ordo[f] = mem;
}
} while (d < f);
return f;
}
void quicksort_g(int *ordo, int deb, int fin) {
int k;
if ((fin - deb) > 0) {
k = partionner_g(ordo, deb, fin);
quicksort_g(ordo, deb, k);
quicksort_g(ordo, k + 1, fin);
}
}
void trierTableau_g(int *ordo, int nbElem_g, int *nbFois_g, bool croissant) {
pluspetit_g[0] = (int *) malloc((nbElem_g) * sizeof(int));
pluspetit_g[1] = (int *) malloc((nbElem_g) * sizeof(int));
for (int i = 0; i < nbElem_g; i++) {
if (croissant)
pluspetit_g[0][i] = 1;
else
pluspetit_g[0][i] = 2;
pluspetit_g[1][i] = nbFois_g[i];
}
quicksort_g(ordo, 0, (nbElem_g - 1));
free(pluspetit_g[0]);
free(pluspetit_g[1]);
}
void remplirTempsExeTrie_g() {
for (int i = 0; i < nbMachines_g; i++)
trierTableau_g(tempsExeTrie_T[i], nbJob_g, tempsJob_T[i], true);
for (int i = 0; i < nbMachines_g; i++)
for (int j = 0; j < nbJob_g; j++)
tempsExeTrie_g[i * nbJob_g + j] = tempsExeTrie_T[i][j];
for (int i = 0; i < nbMachines_g; i++)
free(tempsExeTrie_T[i]);
free(tempsExeTrie_T);
}
void remplirTempsArriverDepart_g() {
bool faitDep[nbMachines_g];
bool faitArr[nbMachines_g];
int machine_g;
for (int k = 1; k < nbMachines_g; k++) {
faitArr[k] = false;
faitDep[k] = false;
}
minTempsDep_g[nbMachines_g - 1] = 0;
minTempsArr_g[0] = 0;
for (int i = 0; i < nbJob_g; i++) {
tempsArriver_g[i] = 0;
tempsDepart_g[(nbMachines_g - 1) * nbJob_g + i] = 0;
for (int k = 1; k < nbMachines_g; k++) {
machine_g = nbMachines_g - k - 1;
tempsArriver_g[k * nbJob_g + i] = tempsArriver_g[(k - 1) * nbJob_g
+ i] + tempsJob_g[(k - 1) * nbJob_g + i];
tempsDepart_g[machine_g * nbJob_g + i] = tempsDepart_g[(machine_g
+ 1) * nbJob_g + i]
+ tempsJob_g[(machine_g + 1) * nbJob_g + i];
if (!faitArr[k]
|| minTempsArr_g[k] > tempsArriver_g[k * nbJob_g + i]) {
faitArr[k] = true;
minTempsArr_g[k] = tempsArriver_g[k * nbJob_g + i];
}
if (!faitDep[k]
|| minTempsDep_g[machine_g]
> tempsDepart_g[machine_g * nbJob_g + i]) {
faitDep[k] = true;
minTempsDep_g[machine_g] =
tempsDepart_g[machine_g * nbJob_g + i];
}
}
}
}
void remplirMachine_g() {
int cmpt = 0;
for (int i = 0; i < (nbMachines_g - 1); i++)
for (int j = i + 1; j < nbMachines_g; j++) {
machine_g[cmpt] = i;
cmpt++;
}
int taille = cmpt;
cmpt = 0;
for (int i = 0; i < (nbMachines_g - 1); i++)
for (int j = i + 1; j < nbMachines_g; j++) {
machine_g[1 * taille + cmpt] = j;
cmpt++;
}
}
void remplirLag_g() {
int m1, m2;
for (int i = 0; i < somme_g; i++) {
m1 = machine_g[0 * somme_g + i];
m2 = machine_g[1 * somme_g + i];
for (int j = 0; j < nbJob_g; j++) {
tempsLag_g[i * nbJob_g + j] = 0;
for (int k = m1 + 1; k < m2; k++)
tempsLag_g[i * nbJob_g + j] += tempsJob_g[k * nbJob_g + j];
tempsEntre_g[i][j] = tempsLag_g[i * nbJob_g + j]
+ tempsJob_g[m2 * nbJob_g + j];
}
}
}
void remplirTempsEntreTrie_g() {
for (int i = 0; i < somme_g; i++)
trierTableau_g(tempsEntreTrie_T[i], nbJob_g, tempsEntre_g[i], true);
for (int i = 0; i < somme_g; i++)
for (int j = 0; j < nbJob_g; j++)
tempsEntreTrie_g[i * nbJob_g + j] = tempsEntreTrie_T[i][j];
for (int i = 0; i < somme_g; i++)
free(tempsEntreTrie_T[i]);
free(tempsEntreTrie_T);
}
void Johnson_g(int *ordo, int m1, int m2, int s) {
pluspetit_g[0] = (int *) malloc(nbJob_g * sizeof(int));
pluspetit_g[1] = (int *) malloc(nbJob_g * sizeof(int));
for (int i = 0; i < nbJob_g; i++) {
ordo[i] = i;
if (tempsJob_g[m1 * nbJob_g + i] < tempsJob_g[m2 * nbJob_g + i]) {
pluspetit_g[0][i] = 1;
pluspetit_g[1][i] = tempsJob_g[m1 * nbJob_g + i]
+ tempsLag_g[s * nbJob_g + i];
} else {
pluspetit_g[0][i] = 2;
pluspetit_g[1][i] = tempsJob_g[m2 * nbJob_g + i]
+ tempsLag_g[s * nbJob_g + i];
}
}
quicksort_g(ordo, 0, (nbJob_g - 1));
free(pluspetit_g[0]);
free(pluspetit_g[1]);
}
void remplirTabJohnson_g() {
int cmpt = 0;
for (int i = 0; i < (nbMachines_g - 1); i++)
for (int j = i + 1; j < nbMachines_g; j++) {
Johnson_g(tabJohnson_T[cmpt], i, j, cmpt);
cmpt++;
}
for (int i = 0; i < somme_g; i++)
for (int j = 0; j < nbJob_g; j++)
tabJohnson_g[i * nbJob_g + j] = tabJohnson_T[i][j];
for (int i = 0; i < somme_g; i++)
free(tabJohnson_T[i]);
free(tabJohnson_T);
}
void remplirTempsFinMachines_g() {
for (int i = 0; i < nbJob_g; i++)
tempsFinMachines_g[nbMachines_g - 1][i] = tempsFin_g[i];
for (int i = nbMachines_g - 2; i >= 0; i--)
for (int j = 0; j < nbJob_g; j++)
tempsFinMachines_g[i][j] = tempsFinMachines_g[i + 1][j]
- tempsJob_T[i + 1][j];
for (int i = 0; i < nbMachines_g; i++)
free(tempsJob_T[i]);
free(tempsJob_T);
}
void remplirTempsFinMachinesTrie_g() {
for (int i = 0; i < nbMachines_g; i++)
trierTableau_g(tempsFinMachinesTrie_T[i], nbJob_g,
tempsFinMachines_g[i], true);
for (int i = 0; i < nbMachines_g; i++)
for (int j = 0; j < nbJob_g; j++)
tempsFinMachinesTrie_g[i * nbJob_g + j] =
tempsFinMachinesTrie_T[i][j];
for (int i = 0; i < nbMachines_g; i++)
free(tempsFinMachinesTrie_T[i]);
free(tempsFinMachinesTrie_T);
}
void initNbFois_g() {
for (int i = 0; i < somme_g; i++) {
ordoSomme_g[i] = i;
nbFois_g[i] = 0;
}
}
void initialiserVar_g() {
initSomme_g();
allouerMemoire_g();
initTempArrays_g();
initTempsFinTrie_g();
initTempsExeTrie_g();
initTempsEntreTrie_g();
initTempsFinMachinesTrie_g();
trierTableau_g(tempsFinTrie_g, nbJob_g, tempsFin_g, true);
remplirTempsExeTrie_g();
remplirTempsArriverDepart_g();
remplirMachine_g();
remplirLag_g();
remplirTempsEntreTrie_g();
remplirTabJohnson_g();
remplirTempsFinMachines_g();
remplirTempsFinMachinesTrie_g();
initNbFois_g();
}
void init_bound() {
nbOrdo_g = 1;
nbborne_g = 1;
nbNoeud_g = 0;
nbPartition_g = 10;
nbRetardNonAffB1_g = 0;
nbRetardNonAffB2_g = 0;
nbZero_g = 0;
seuil_g = 0;
initialiserVar_g();
nbElem_g = somme_g;
nbNoeudTotal_g = aFaire_g(nbJob_g);
}
void init_problem_device_g(problem_d &p) {
p.limite1 = -1;
p.limite2 = nbJob_g;
for (int j = 0; j < nbJob_g; j++)
p.permutation[j] = j;
p.couts_somme = 0;
p.depth = 0;
}
inline __device__ void set_job_jobFin_g(int* job, int* jobFin, int* permutation, int limite1, int limite2,int nbJob_g)
{
int j = 0;
for(j = 0;j <= limite1;j++) job[permutation[j]] = j + 1;
for(j = limite1 + 1;j < limite2;j++) job[permutation[j]] = 0;
for(j = limite2; j < nbJob_g;j++)
{
job[permutation[j]] = j + 1;
jobFin[j] = permutation[j];
}
}
inline __device__ int cmaxFin_g(int *tempsMachinesFin, int *tmp, int *ma)
{
return max(tmp[1] + tempsMachinesFin[ma[1]],tmp[0] + tempsMachinesFin[ma[0]]);
}
inline __device__ void initCmax_g(int* tempsMachines,int nbAffectDebut, int *tmp, int *ma, int ind,int * machine_g, int somme_g, int * minTempsArr_g)
{
ma[0] = machine_g[ind];
ma[1] = machine_g[1 * somme_g + ind];
int coeff = __cosf(nbAffectDebut);
tmp[0] = (1 - coeff) * tempsMachines[ma[0]] + coeff * minTempsArr_g[ma[0]];
tmp[1] = (1 - coeff) * tempsMachines[ma[1]] + coeff * minTempsArr_g[ma[1]];
}
inline __device__ void heuristiqueCmax_g(int* job, int *tmp, int *ma, int ind, int nbJob_g, int * tabJohnson_g, unsigned char* tempsJob_g,int * tempsLag_g)
{
int jobCour;
for(int j= 0; j < nbJob_g; j++)
{
jobCour = tabJohnson_g[ind * nbJob_g + j];
if( job[jobCour] == 0 )
{
tmp[0] = tmp[0] + tempsJob_g[ma[0] * nbJob_g + jobCour];
tmp[1] = max (tmp[1], tmp[0] + tempsLag_g[ind * nbJob_g + jobCour]) + tempsJob_g[ma[1] * nbJob_g + jobCour];
}
}
}
inline __device__ int criteres_calculer_g(int* permutation,int nbMachines_g,int nbJob_g,unsigned char* tempsJob_g)
{
int temps[MAX_NB_MACHINES];
for(int mm = 0; mm < nbMachines_g; mm++) temps[mm] = 0;
for(int j = 0; j < nbJob_g;j++)
{
int job = permutation[j];
temps[0] = temps[0] + tempsJob_g[job];
for(int m = 1; m < nbMachines_g;m++)
temps[m] = max(temps[m],temps[m-1]) + tempsJob_g[m * nbJob_g + job];
}
return temps[nbMachines_g-1];
}
inline __device__ void set_tempsMachines_retardDebut_g(int *tempsMachines, int* permutation, int limite1,int nbMachines_g,int nbJob_g,unsigned char* tempsJob_g)
{
int m = 0;
for(m = 0; m < nbMachines_g; m++) tempsMachines[m] = 0;
for(int j = 0; j <= limite1; j++)
{
int job = permutation[j];
tempsMachines[0] = tempsMachines[0] + tempsJob_g[job];
for(m = 1; m < nbMachines_g;m++)
tempsMachines[m] = max(tempsMachines[m],tempsMachines[m-1]) + tempsJob_g[m * nbJob_g + job];
}
}
inline __device__ void set_tempsMachinesFin_tempsJobFin_g(int* jobFin, int * tempsMachinesFin,int nbAffectFin,int nbJob_g, int nbMachines_g,unsigned char* tempsJob_g)
{
int jobCour;
int tmpMa[MAX_NB_MACHINES];
for(int j = 0; j < nbMachines_g; j++)
{
for(int k = j; k < nbMachines_g; k++) tmpMa[k] = 0;
for(int k = nbJob_g - nbAffectFin; k < nbJob_g; k++)
{
jobCour = jobFin[k];
tmpMa[j] = tmpMa[j] + tempsJob_g[j * nbJob_g + jobCour];
for(int l = j + 1; l < nbMachines_g; l++)
{
tmpMa[l] = max (tmpMa[l-1],tmpMa[l]);
tmpMa[l] = tmpMa[l] + tempsJob_g[l * nbJob_g + jobCour];
}
}
tempsMachinesFin[j] = tmpMa[nbMachines_g-1];
}
}
inline __device__ int borneInfMakespan_g(int* job,int *tempsMachinesFin,int* tempsMachines,
int nbAffectDebut,int nbAffectFin,int *valBorneInf, int retardNonFin, int minCmax,int nbJob_g, int nbElem_g,
int leMeilleur_g, int nbborne_g, int somme_g, int * minTempsArr_g, int*nbFois_g, int *machine_g, int * tabJohnson_g,
unsigned char* tempsJob_g, int* tempsLag_g, int* ordoSomme_g, int* minTempsDep_g)
{
int moinsBon = 0;
int idxMoinsBonCmax, i, tmpDep, retard = retardNonFin;
int ma[2] = {0,0};
int tmp[2] = {0,0};
int res[2] = {0,0};
int l;
int coeffReturn=1;
for(l = 0; l < nbElem_g; l++)
{
i = ordoSomme_g[l];
initCmax_g(tempsMachines,nbAffectDebut,tmp,ma,i,machine_g,somme_g,minTempsArr_g);
heuristiqueCmax_g(job, tmp, ma, i, nbJob_g,tabJohnson_g, tempsJob_g, tempsLag_g);
if( nbAffectFin != 0 ) tmp[1] = cmaxFin_g(tempsMachinesFin, tmp, ma);
else tmp[1] = tmp[1] + minTempsDep_g[ma[1]];
float un = 1;
int coeff2 = min(un, __expf(tmp[1] - moinsBon - 1));
idxMoinsBonCmax = coeff2 * i;
moinsBon = max(moinsBon,tmp[1]);
int coeff3 = min(un,__expf(moinsBon - leMeilleur_g - 1));
int coeff4 = min(1,(leMeilleur_g + 2) ^ 1);
int coeff5 = coeff3 * coeff4;
nbborne_g = nbborne_g + coeff5;
valBorneInf[0] = valBorneInf[0] + coeff5 * moinsBon;
coeffReturn = coeff5;
}
nbborne_g++;
nbFois_g[idxMoinsBonCmax]++;
valBorneInf[0] = moinsBon;
return 0;
}
inline __device__ int calculBorne_g(int* job,int *tempsMachinesFin,int *tempsMachines,int nbAffectDebut,int nbAffectFin,int nbJob_g,int leMeilleur_g, int nbborne_g,int somme_g,int nbElem_g,unsigned char* tempsJob_g,int* nBfois,int* machine_g,int* tabJohnson_g,int* tempsLag_g,int * minTempsArr_g,int* ordoSomme_g,int* minTempsDep_g)
{
int retardNonAff = 0;
int minCmax = 0;
int valBorneInf[2];
int retardNonFin = retardNonAff;
borneInfMakespan_g(job,tempsMachinesFin,tempsMachines,nbAffectDebut,nbAffectFin,valBorneInf,retardNonFin,minCmax,nbJob_g,nbElem_g,leMeilleur_g,nbborne_g,somme_g,minTempsArr_g,
nBfois,machine_g,tabJohnson_g,tempsJob_g,tempsLag_g,ordoSomme_g,minTempsDep_g);
return valBorneInf[0];
}
__global__ void Evaluate_ON_GPU(raw_bb_problem* pool_to_evaluate, int* bounds,
int nbJob_g, int nbMachines_g, int nbborne_g, int somme_g, int nbElem_g,
int *nbFois_g, int *machine_g, int *tabJohnson_g, int *tempsJob_g,
int *tempsLag_g, int *minTempsArr_g, int *ordoSomme_g,
int *minTempsDep_g, int todo, int time_seed_g) {
int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__
unsigned char tempsJob_g_shared[MAX_NB_MACHINES * MAX_NBJOBS];
if (thread_idx < todo) {
if (threadIdx.x == 0) {
for (int i = 0; i < nbMachines_g; i++)
for (int j = 0; j < nbJob_g; j++)
tempsJob_g_shared[i * nbJob_g + j] = tempsJob_g[i * nbJob_g
+ j];
}
__syncthreads();
int tempsMachines[MAX_NB_MACHINES];
int tempsMachinesFin[MAX_NB_MACHINES];
int job[MAX_NBJOBS];
int jobFin[MAX_NBJOBS];
int nbAffectFin = nbJob_g - pool_to_evaluate[thread_idx].limite2;
int nbAffectDebut = pool_to_evaluate[thread_idx].limite1 + 1;
int leMeilleur_g = 999999;
int borneInf = 0;
if (pool_to_evaluate[thread_idx].limite2
- pool_to_evaluate[thread_idx].limite1 == 1)
borneInf = criteres_calculer_g(
pool_to_evaluate[thread_idx].permutation, nbMachines_g,
nbJob_g, tempsJob_g_shared);
else {
set_tempsMachines_retardDebut_g(tempsMachines,
pool_to_evaluate[thread_idx].permutation,
pool_to_evaluate[thread_idx].limite1, nbMachines_g, nbJob_g,
tempsJob_g_shared);
set_job_jobFin_g(job, jobFin,
pool_to_evaluate[thread_idx].permutation,
pool_to_evaluate[thread_idx].limite1,
pool_to_evaluate[thread_idx].limite2, nbJob_g);
set_tempsMachinesFin_tempsJobFin_g(jobFin, tempsMachinesFin,
nbAffectFin, nbJob_g, nbMachines_g, tempsJob_g_shared);
borneInf = calculBorne_g(job, tempsMachinesFin, tempsMachines,
nbAffectDebut, nbAffectFin, nbJob_g, leMeilleur_g,
nbborne_g, somme_g, nbElem_g, tempsJob_g_shared, nbFois_g,
machine_g, tabJohnson_g, tempsLag_g, minTempsArr_g,
ordoSomme_g, minTempsDep_g);
}
bounds[thread_idx] = borneInf;
}
}
|
dde68ae7449be497af38b126a0de260d63cfbfe4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Alexander Ocsa <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cassert>
#include <thrust/fill.h>
#include <tuple>
#include <bitmask/legacy/bit_mask.cuh>
#include <cudf/copying.hpp>
#include <cudf/cudf.h>
#include <cudf/groupby.hpp>
#include <cudf/legacy/bitmask.hpp>
#include <cudf/legacy/table.hpp>
#include <cudf/utilities/legacy/nvcategory_util.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <table/legacy/device_table.cuh>
#include <table/legacy/device_table_row_operators.cuh>
#include <utilities/column_utils.hpp>
#include <utilities/cuda_utils.hpp>
#include "groupby/common/aggregation_requests.hpp"
#include "groupby/common/type_info.hpp"
#include "groupby/common/utils.hpp"
#include "groupby_kernels.cuh"
#include "sort_helper.hpp"
#include <quantiles/group_quantiles.hpp>
#include <utilities/integer_utils.hpp>
namespace cudf {
namespace groupby {
namespace sort {
using index_vector = rmm::device_vector<gdf_size_type>;
namespace {
/**---------------------------------------------------------------------------*
* @brief Computes the ordered aggregation requests which were skipped
* in a previous process (`compound_to_simple`). These ordered aggregations
* were skipped because they can't be compound to simple aggregation.
*
* Then combine these results with the set of output aggregation columns
* corresponding to not ordered aggregation requests.
*
* @param groupby[in] The object for computing sort-based groupby
* @param original_requests[in] The original set of potentially ordered
* aggregation requests
* @param input_ops_args[in] The list of arguments fot each of the previous ordered
* aggregation requests
* @param current_output_values[in] Set of output aggregation columns corresponding to
* not ordered aggregation requests
* @param stream[in] CUDA stream on which to execute
* @return vector of columns satisfying each of the original aggregation requests
*---------------------------------------------------------------------------**/
std::vector<gdf_column*> compute_ordered_aggregations(
detail::helper &groupby,
std::vector<AggRequestType> const& original_requests,
std::vector<operation_args*> const& input_ops_args,
cudf::table& current_output_values,
hipStream_t stream) {
std::vector<gdf_column*> output_value(original_requests.size());
std::copy(current_output_values.begin(), current_output_values.end(), output_value.begin());
for (size_t i = 0; i < original_requests.size(); ++i) {
auto const& element = original_requests[i];
if (is_ordered(element.second)) {
gdf_column * value_col = element.first;
gdf_column sorted_values;
rmm::device_vector<gdf_size_type> group_sizes;
std::tie(sorted_values, group_sizes) = groupby.sort_values(*value_col);
auto result_col = new gdf_column;
switch (element.second) {
case MEDIAN: {
*result_col = cudf::allocate_column(
GDF_FLOAT64, groupby.num_groups(), false);
cudf::detail::group_medians(sorted_values, groupby.group_offsets(),
group_sizes, result_col, stream);
break;
}
case QUANTILE: {
quantile_args * args = static_cast<quantile_args*>(input_ops_args[i]);
*result_col = cudf::allocate_column(
GDF_FLOAT64, args->quantiles.size() * groupby.num_groups(), false);
cudf::detail::group_quantiles(sorted_values, groupby.group_offsets(),
group_sizes, result_col,
args->quantiles, args->interpolation,
stream);
break;
}
default:
break;
}
output_value[i] = result_col;
gdf_column_free(&sorted_values);
}
}
return output_value;
}
/**---------------------------------------------------------------------------*
* @brief Prepare input parameters for invoking the `aggregate_all_rows` kernel
* which compute the simple aggregation(s) of corresponding rows in the output
* `values` table.
* @param input_keys The table of keys
* @param options The options for controlling behavior of the groupby operation.
* @param groupby The object for computing sort-based groupby
* @param simple_values_columns The list of simple values columns
* @param simple_operators The list of simple aggregation operations
* @param stream[in] CUDA stream on which to execute
* @return output value table with the aggregation(s) computed
*---------------------------------------------------------------------------**/
template <bool keys_have_nulls, bool values_have_nulls>
cudf::table compute_simple_aggregations(const cudf::table &input_keys,
const Options &options,
detail::helper &groupby,
const std::vector<gdf_column *> &simple_values_columns,
const std::vector<operators> &simple_operators,
hipStream_t &stream) {
const gdf_column& key_sorted_order = groupby.key_sort_order();
//group_labels
const index_vector& group_labels = groupby.group_labels();
const gdf_size_type num_groups = groupby.num_groups();
// Output allocation size aligned to 4 bytes. The use of `round_up_safe`
// guarantee correct execution with cuda-memcheck for cases when
// num_groups == 1 and with dtype == int_8.
gdf_size_type const output_size_estimate = cudf::util::round_up_safe((int64_t)groupby.num_groups(), (int64_t)sizeof(int32_t));
cudf::table simple_values_table{simple_values_columns};
cudf::table simple_output_values{
output_size_estimate, target_dtypes(column_dtypes(simple_values_table), simple_operators),
column_dtype_infos(simple_values_table), values_have_nulls, false, stream};
initialize_with_identity(simple_output_values, simple_operators, stream);
auto d_input_values = device_table::create(simple_values_table);
auto d_output_values = device_table::create(simple_output_values, stream);
rmm::device_vector<operators> d_ops(simple_operators);
auto row_bitmask = cudf::row_bitmask(input_keys, stream);
cudf::util::cuda::grid_config_1d grid_params{input_keys.num_rows(), 256};
//Aggregate all rows for simple requests using the key sorted order (indices) and the group labels
hipLaunchKernelGGL(( cudf::groupby::sort::aggregate_all_rows<keys_have_nulls, values_have_nulls>),
dim3(grid_params.num_blocks), dim3(grid_params.num_threads_per_block), 0, stream,
*d_input_values, *d_output_values,
static_cast<gdf_index_type const*>(key_sorted_order.data),
group_labels.data().get(), options.ignore_null_keys,
d_ops.data().get(), row_bitmask.data().get());
std::transform(simple_output_values.begin(), simple_output_values.end(), simple_output_values.begin(),
[num_groups](gdf_column *col) {
CUDF_EXPECTS(col != nullptr, "Attempt to update Null column.");
col->size = num_groups;
return col;
});
return simple_output_values;
}
template <bool keys_have_nulls, bool values_have_nulls>
std::pair<cudf::table, std::vector<gdf_column*>> compute_sort_groupby(cudf::table const& input_keys, cudf::table const& input_values,
std::vector<operators> const& input_ops,
std::vector<operation_args*> const& input_ops_args,
Options options,
hipStream_t stream) {
auto include_nulls = not options.ignore_null_keys;
auto groupby = detail::helper(input_keys, include_nulls, options.null_sort_behavior, options.input_sorted);
if (groupby.num_groups() == 0) {
cudf::table output_values(0, target_dtypes(column_dtypes(input_values), input_ops), column_dtype_infos(input_values));
return std::make_pair(
cudf::empty_like(input_keys),
std::vector<gdf_column*>{output_values.begin(), output_values.end()}
);
}
gdf_size_type num_groups = groupby.num_groups();
// An "aggregation request" is the combination of a `gdf_column*` to a column
// of values, and an aggregation operation enum indicating the aggregation
// requested to be performed on the column
std::vector<AggRequestType> original_requests(input_values.num_columns());
std::transform(input_values.begin(), input_values.end(), input_ops.begin(),
original_requests.begin(),
[](gdf_column const* col, operators op) {
return std::make_pair(const_cast<gdf_column*>(col), op);
});
// Some aggregations are "compound", meaning they need be satisfied via the
// composition of 1 or more "simple" aggregation requests. For example, MEAN
// is satisfied via the division of the SUM by the COUNT aggregation. We
// translate these compound requests into simple requests, and compute the
// groupby operation for these simple requests. Later, we translate the simple
// requests back to compound request results.
std::vector<SimpleAggRequestCounter> simple_requests =
compound_to_simple(original_requests);
std::vector<gdf_column*> simple_values_columns;
std::vector<operators> simple_operators;
for (auto const& p : simple_requests) {
const AggRequestType& agg_req_type = p.first;
simple_values_columns.push_back(
const_cast<gdf_column*>(agg_req_type.first));
simple_operators.push_back(agg_req_type.second);
}
// If there are "simple" aggregation requests, compute the aggregations
cudf::table current_output_values{};
if (simple_values_columns.size() > 0) {
// Step 1: Aggregate all rows for simple requests
cudf::table simple_output_values = compute_simple_aggregations<keys_have_nulls, values_have_nulls>(input_keys,
options,
groupby,
simple_values_columns,
simple_operators,
stream);
// Step 2: If any of the original requests were compound, compute them from the
// results of simple aggregation requests
current_output_values = compute_original_requests(original_requests, simple_requests, simple_output_values, stream);
}
// If there are "ordered" aggregation requests like MEDIAN, QUANTILE, compute these aggregations
std::vector<gdf_column*> final_output_values = compute_ordered_aggregations(groupby, original_requests, input_ops_args, current_output_values, stream);
// Update size and null count of output columns
std::transform(final_output_values.begin(), final_output_values.end(), final_output_values.begin(),
[num_groups](gdf_column *col) {
CUDF_EXPECTS(col != nullptr, "Attempt to update Null column.");
set_null_count(*col);
return col;
});
return std::make_pair(groupby.unique_keys(), final_output_values);
}
/**---------------------------------------------------------------------------*
* @brief Returns appropriate callable instantiation of `compute_sort_groupby`
* based on presence of null values in keys and values.
*
* @param keys The groupby key columns
* @param values The groupby value columns
* @return Instantiated callable of compute_sort_groupby
*---------------------------------------------------------------------------**/
auto groupby_null_specialization(table const& keys, table const& values) {
if (cudf::has_nulls(keys)) {
if (cudf::has_nulls(values)) {
return compute_sort_groupby<true, true>;
} else {
return compute_sort_groupby<true, false>;
}
} else {
if (cudf::has_nulls(values)) {
return compute_sort_groupby<false, true>;
} else {
return compute_sort_groupby<false, false>;
}
}
}
} // anonymous namespace
namespace detail {
/**---------------------------------------------------------------------------*
* @brief Verifies the requested aggregation is valid for the arguments of the
* operator.
*
* @throw cudf::logic_error if an invalid combination of argument and operator
* is requested.
*
* @param ops The aggregation operators
* @param ops The aggregation arguments
*---------------------------------------------------------------------------**/
static void verify_operators_with_arguments(std::vector<operators> const& ops, std::vector<operation_args*> const& args) {
CUDF_EXPECTS(ops.size() == args.size(),
"Size mismatch between ops and args");
for (size_t i = 0; i < ops.size(); i++) {
if(ops[i] == QUANTILE) {
quantile_args* q_args = static_cast<quantile_args*>(args[i]);
if (q_args == nullptr or q_args->quantiles.size() == 0) {
CUDF_FAIL(
"Missing quantile aggregation arguments.");
}
}
}
}
std::pair<cudf::table, std::vector<gdf_column*>> groupby(cudf::table const& keys,
cudf::table const& values,
std::vector<operation> const& ops,
Options options,
hipStream_t stream = 0) {
CUDF_EXPECTS(keys.num_rows() == values.num_rows(),
"Size mismatch between number of rows in keys and values.");
std::vector<operators> optype_list(ops.size());
std::transform(ops.begin(), ops.end(), optype_list.begin(), [](auto const& op) {
return op.op_name;
});
std::vector<operation_args*> ops_args(ops.size());
std::transform(ops.begin(), ops.end(), ops_args.begin(), [](auto const& op) {
return op.args.get();
});
verify_operators(values, optype_list);
verify_operators_with_arguments(optype_list, ops_args);
// Empty inputs
if (keys.num_rows() == 0) {
cudf::table output_values(0, target_dtypes(column_dtypes(values), optype_list), column_dtype_infos(values));
return std::make_pair(
cudf::empty_like(keys),
std::vector<gdf_column*>{output_values.begin(), output_values.end()}
);
}
auto compute_groupby = groupby_null_specialization(keys, values);
cudf::table output_keys;
std::vector<gdf_column*> output_values;
std::tie(output_keys, output_values) =
compute_groupby(keys, values, optype_list, ops_args, options, stream);
cudf::table table_output_values(output_values);
update_nvcategories(keys, output_keys, values, table_output_values);
return std::make_pair(output_keys, output_values);
}
} // namespace detail
std::pair<cudf::table, std::vector<gdf_column*>> groupby(cudf::table const &keys,
cudf::table const &values,
std::vector<operation> const &ops,
Options options) {
return detail::groupby(keys, values, ops, options);
}
} // END: namespace sort
} // END: namespace groupby
} // END: namespace cudf
| dde68ae7449be497af38b126a0de260d63cfbfe4.cu | /*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Alexander Ocsa <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cassert>
#include <thrust/fill.h>
#include <tuple>
#include <bitmask/legacy/bit_mask.cuh>
#include <cudf/copying.hpp>
#include <cudf/cudf.h>
#include <cudf/groupby.hpp>
#include <cudf/legacy/bitmask.hpp>
#include <cudf/legacy/table.hpp>
#include <cudf/utilities/legacy/nvcategory_util.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <table/legacy/device_table.cuh>
#include <table/legacy/device_table_row_operators.cuh>
#include <utilities/column_utils.hpp>
#include <utilities/cuda_utils.hpp>
#include "groupby/common/aggregation_requests.hpp"
#include "groupby/common/type_info.hpp"
#include "groupby/common/utils.hpp"
#include "groupby_kernels.cuh"
#include "sort_helper.hpp"
#include <quantiles/group_quantiles.hpp>
#include <utilities/integer_utils.hpp>
namespace cudf {
namespace groupby {
namespace sort {
using index_vector = rmm::device_vector<gdf_size_type>;
namespace {
/**---------------------------------------------------------------------------*
* @brief Computes the ordered aggregation requests which were skipped
* in a previous process (`compound_to_simple`). These ordered aggregations
* were skipped because they can't be compound to simple aggregation.
*
* Then combine these results with the set of output aggregation columns
* corresponding to not ordered aggregation requests.
*
* @param groupby[in] The object for computing sort-based groupby
* @param original_requests[in] The original set of potentially ordered
* aggregation requests
* @param input_ops_args[in] The list of arguments fot each of the previous ordered
* aggregation requests
* @param current_output_values[in] Set of output aggregation columns corresponding to
* not ordered aggregation requests
* @param stream[in] CUDA stream on which to execute
* @return vector of columns satisfying each of the original aggregation requests
*---------------------------------------------------------------------------**/
std::vector<gdf_column*> compute_ordered_aggregations(
detail::helper &groupby,
std::vector<AggRequestType> const& original_requests,
std::vector<operation_args*> const& input_ops_args,
cudf::table& current_output_values,
cudaStream_t stream) {
std::vector<gdf_column*> output_value(original_requests.size());
std::copy(current_output_values.begin(), current_output_values.end(), output_value.begin());
for (size_t i = 0; i < original_requests.size(); ++i) {
auto const& element = original_requests[i];
if (is_ordered(element.second)) {
gdf_column * value_col = element.first;
gdf_column sorted_values;
rmm::device_vector<gdf_size_type> group_sizes;
std::tie(sorted_values, group_sizes) = groupby.sort_values(*value_col);
auto result_col = new gdf_column;
switch (element.second) {
case MEDIAN: {
*result_col = cudf::allocate_column(
GDF_FLOAT64, groupby.num_groups(), false);
cudf::detail::group_medians(sorted_values, groupby.group_offsets(),
group_sizes, result_col, stream);
break;
}
case QUANTILE: {
quantile_args * args = static_cast<quantile_args*>(input_ops_args[i]);
*result_col = cudf::allocate_column(
GDF_FLOAT64, args->quantiles.size() * groupby.num_groups(), false);
cudf::detail::group_quantiles(sorted_values, groupby.group_offsets(),
group_sizes, result_col,
args->quantiles, args->interpolation,
stream);
break;
}
default:
break;
}
output_value[i] = result_col;
gdf_column_free(&sorted_values);
}
}
return output_value;
}
/**---------------------------------------------------------------------------*
* @brief Prepare input parameters for invoking the `aggregate_all_rows` kernel
* which compute the simple aggregation(s) of corresponding rows in the output
* `values` table.
* @param input_keys The table of keys
* @param options The options for controlling behavior of the groupby operation.
* @param groupby The object for computing sort-based groupby
* @param simple_values_columns The list of simple values columns
* @param simple_operators The list of simple aggregation operations
* @param stream[in] CUDA stream on which to execute
* @return output value table with the aggregation(s) computed
*---------------------------------------------------------------------------**/
template <bool keys_have_nulls, bool values_have_nulls>
cudf::table compute_simple_aggregations(const cudf::table &input_keys,
const Options &options,
detail::helper &groupby,
const std::vector<gdf_column *> &simple_values_columns,
const std::vector<operators> &simple_operators,
cudaStream_t &stream) {
const gdf_column& key_sorted_order = groupby.key_sort_order();
//group_labels
const index_vector& group_labels = groupby.group_labels();
const gdf_size_type num_groups = groupby.num_groups();
// Output allocation size aligned to 4 bytes. The use of `round_up_safe`
// guarantee correct execution with cuda-memcheck for cases when
// num_groups == 1 and with dtype == int_8.
gdf_size_type const output_size_estimate = cudf::util::round_up_safe((int64_t)groupby.num_groups(), (int64_t)sizeof(int32_t));
cudf::table simple_values_table{simple_values_columns};
cudf::table simple_output_values{
output_size_estimate, target_dtypes(column_dtypes(simple_values_table), simple_operators),
column_dtype_infos(simple_values_table), values_have_nulls, false, stream};
initialize_with_identity(simple_output_values, simple_operators, stream);
auto d_input_values = device_table::create(simple_values_table);
auto d_output_values = device_table::create(simple_output_values, stream);
rmm::device_vector<operators> d_ops(simple_operators);
auto row_bitmask = cudf::row_bitmask(input_keys, stream);
cudf::util::cuda::grid_config_1d grid_params{input_keys.num_rows(), 256};
//Aggregate all rows for simple requests using the key sorted order (indices) and the group labels
cudf::groupby::sort::aggregate_all_rows<keys_have_nulls, values_have_nulls><<<
grid_params.num_blocks, grid_params.num_threads_per_block, 0, stream>>>(
*d_input_values, *d_output_values,
static_cast<gdf_index_type const*>(key_sorted_order.data),
group_labels.data().get(), options.ignore_null_keys,
d_ops.data().get(), row_bitmask.data().get());
std::transform(simple_output_values.begin(), simple_output_values.end(), simple_output_values.begin(),
[num_groups](gdf_column *col) {
CUDF_EXPECTS(col != nullptr, "Attempt to update Null column.");
col->size = num_groups;
return col;
});
return simple_output_values;
}
template <bool keys_have_nulls, bool values_have_nulls>
std::pair<cudf::table, std::vector<gdf_column*>> compute_sort_groupby(cudf::table const& input_keys, cudf::table const& input_values,
std::vector<operators> const& input_ops,
std::vector<operation_args*> const& input_ops_args,
Options options,
cudaStream_t stream) {
auto include_nulls = not options.ignore_null_keys;
auto groupby = detail::helper(input_keys, include_nulls, options.null_sort_behavior, options.input_sorted);
if (groupby.num_groups() == 0) {
cudf::table output_values(0, target_dtypes(column_dtypes(input_values), input_ops), column_dtype_infos(input_values));
return std::make_pair(
cudf::empty_like(input_keys),
std::vector<gdf_column*>{output_values.begin(), output_values.end()}
);
}
gdf_size_type num_groups = groupby.num_groups();
// An "aggregation request" is the combination of a `gdf_column*` to a column
// of values, and an aggregation operation enum indicating the aggregation
// requested to be performed on the column
std::vector<AggRequestType> original_requests(input_values.num_columns());
std::transform(input_values.begin(), input_values.end(), input_ops.begin(),
original_requests.begin(),
[](gdf_column const* col, operators op) {
return std::make_pair(const_cast<gdf_column*>(col), op);
});
// Some aggregations are "compound", meaning they need be satisfied via the
// composition of 1 or more "simple" aggregation requests. For example, MEAN
// is satisfied via the division of the SUM by the COUNT aggregation. We
// translate these compound requests into simple requests, and compute the
// groupby operation for these simple requests. Later, we translate the simple
// requests back to compound request results.
std::vector<SimpleAggRequestCounter> simple_requests =
compound_to_simple(original_requests);
std::vector<gdf_column*> simple_values_columns;
std::vector<operators> simple_operators;
for (auto const& p : simple_requests) {
const AggRequestType& agg_req_type = p.first;
simple_values_columns.push_back(
const_cast<gdf_column*>(agg_req_type.first));
simple_operators.push_back(agg_req_type.second);
}
// If there are "simple" aggregation requests, compute the aggregations
cudf::table current_output_values{};
if (simple_values_columns.size() > 0) {
// Step 1: Aggregate all rows for simple requests
cudf::table simple_output_values = compute_simple_aggregations<keys_have_nulls, values_have_nulls>(input_keys,
options,
groupby,
simple_values_columns,
simple_operators,
stream);
// Step 2: If any of the original requests were compound, compute them from the
// results of simple aggregation requests
current_output_values = compute_original_requests(original_requests, simple_requests, simple_output_values, stream);
}
// If there are "ordered" aggregation requests like MEDIAN, QUANTILE, compute these aggregations
std::vector<gdf_column*> final_output_values = compute_ordered_aggregations(groupby, original_requests, input_ops_args, current_output_values, stream);
// Update size and null count of output columns
std::transform(final_output_values.begin(), final_output_values.end(), final_output_values.begin(),
[num_groups](gdf_column *col) {
CUDF_EXPECTS(col != nullptr, "Attempt to update Null column.");
set_null_count(*col);
return col;
});
return std::make_pair(groupby.unique_keys(), final_output_values);
}
/**---------------------------------------------------------------------------*
* @brief Returns appropriate callable instantiation of `compute_sort_groupby`
* based on presence of null values in keys and values.
*
* @param keys The groupby key columns
* @param values The groupby value columns
* @return Instantiated callable of compute_sort_groupby
*---------------------------------------------------------------------------**/
auto groupby_null_specialization(table const& keys, table const& values) {
if (cudf::has_nulls(keys)) {
if (cudf::has_nulls(values)) {
return compute_sort_groupby<true, true>;
} else {
return compute_sort_groupby<true, false>;
}
} else {
if (cudf::has_nulls(values)) {
return compute_sort_groupby<false, true>;
} else {
return compute_sort_groupby<false, false>;
}
}
}
} // anonymous namespace
namespace detail {
/**---------------------------------------------------------------------------*
* @brief Verifies the requested aggregation is valid for the arguments of the
* operator.
*
* @throw cudf::logic_error if an invalid combination of argument and operator
* is requested.
*
* @param ops The aggregation operators
* @param ops The aggregation arguments
*---------------------------------------------------------------------------**/
static void verify_operators_with_arguments(std::vector<operators> const& ops, std::vector<operation_args*> const& args) {
CUDF_EXPECTS(ops.size() == args.size(),
"Size mismatch between ops and args");
for (size_t i = 0; i < ops.size(); i++) {
if(ops[i] == QUANTILE) {
quantile_args* q_args = static_cast<quantile_args*>(args[i]);
if (q_args == nullptr or q_args->quantiles.size() == 0) {
CUDF_FAIL(
"Missing quantile aggregation arguments.");
}
}
}
}
std::pair<cudf::table, std::vector<gdf_column*>> groupby(cudf::table const& keys,
cudf::table const& values,
std::vector<operation> const& ops,
Options options,
cudaStream_t stream = 0) {
CUDF_EXPECTS(keys.num_rows() == values.num_rows(),
"Size mismatch between number of rows in keys and values.");
std::vector<operators> optype_list(ops.size());
std::transform(ops.begin(), ops.end(), optype_list.begin(), [](auto const& op) {
return op.op_name;
});
std::vector<operation_args*> ops_args(ops.size());
std::transform(ops.begin(), ops.end(), ops_args.begin(), [](auto const& op) {
return op.args.get();
});
verify_operators(values, optype_list);
verify_operators_with_arguments(optype_list, ops_args);
// Empty inputs
if (keys.num_rows() == 0) {
cudf::table output_values(0, target_dtypes(column_dtypes(values), optype_list), column_dtype_infos(values));
return std::make_pair(
cudf::empty_like(keys),
std::vector<gdf_column*>{output_values.begin(), output_values.end()}
);
}
auto compute_groupby = groupby_null_specialization(keys, values);
cudf::table output_keys;
std::vector<gdf_column*> output_values;
std::tie(output_keys, output_values) =
compute_groupby(keys, values, optype_list, ops_args, options, stream);
cudf::table table_output_values(output_values);
update_nvcategories(keys, output_keys, values, table_output_values);
return std::make_pair(output_keys, output_values);
}
} // namespace detail
std::pair<cudf::table, std::vector<gdf_column*>> groupby(cudf::table const &keys,
cudf::table const &values,
std::vector<operation> const &ops,
Options options) {
return detail::groupby(keys, values, ops, options);
}
} // END: namespace sort
} // END: namespace groupby
} // END: namespace cudf
|
e278988c4256025ca4d9f4454e4a4e93ce61652c.hip | // !!! This is a file automatically generated by hipify!!!
// I am including this file solely to show that I did work in CUDA too.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <helper_cuda.h>
#include "roctracer/roctx.h"
#include "nvToolsExtCuda.h"
#include "nvToolsExtCudaRt.h"
#include <dlfcn.h>
#include <cxxabi.h>
#define PRINTING false
#define tx threadIdx.x
#define ty threadIdx.y
#define bx blockIdx.x
#define by blockIdx.y
#define bdx blockDim.x
#define bdy blockDim.y
#define gdx gridDim.x
#define gdy gridDim.y
#define iceil(n,d) ((n-1)/d)+1
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError();\
if (__err != hipSuccess) { \
if (PRINTING) printf("Fatal error %s (%s at %s:%d)\n", msg, hipGetErrorString(__err), __FILE__, __LINE__); \
exit(1); \
} \
} while (0)
#define idx(particle, dimension) (dimension*N + particle)
// so all of the "x" dimension values are contiguous
#define sIdx(column, order) (order*(N/2) + column)
// please not that here consecutive values {0,1,2} of column correspond to particles {0,2,4} or {1,3,5}
// that is to say, column is: (particle>>1)
#define N 6
#define D 2
#define ALPHA 1.0
#define BETA 0.4
#define OMEGA 1.0
//#define K2 (OMEGA * ALPHA)
//#define K (sqrt(K2))
#define CHARGE 6
#define STEP_LENGTH 0.001
#define h 0.00001
#define CYCLES 10000
//#define SEED 124
#define SEED 1234
/*
#define position 0
#define magnitude (position + N*D)
#define slaterUp (magnitude + N*D)
#define slaterDown (slaterUp + N*N/4)
#define slaterUpDet (slaterDown + N*N/4)
#define slaterDownDet (slaterUpDet + 1)
#define jastrow (slaterDownDet + 1)
#define wavefunction (jastrow + 1)
#define force (wavefunction + 1)
#define stateSize (force + N*D)
#define stateA 0
#define stateB (stateA + stateSize)
#define sizeOfSM2 (stateB + stateSize)
*/
#define oldPosition 0
#define newPosition (oldPosition + N*D)
#define oldMagnitude (newPosition + N*D)
#define newMagnitude (oldMagnitude + N)
#define oldSlaterUp (newMagnitude + N)
#define newSlaterUp (oldSlaterUp + N*N/4)
#define oldSlaterDown (newSlaterUp + N*N/4)
#define newSlaterDown (oldSlaterDown + N*N/4)
#define oldDoubleGradientSlater (newSlaterDown + N*N/4)
#define oldLaplacianSlater (oldDoubleGradientSlater + D)
#define oldSlaterUpDeterminant (oldLaplacianSlater + 1)
#define oldSlaterDownDeterminant (oldSlaterUpDeterminant + 1)
#define oldJastrow (oldSlaterDownDeterminant + 1)
#define oldGradientJastrow (oldJastrow + 1)
#define oldLaplacianJastrow (oldGradientJastrow + D)
#define oldForce (oldLaplacianJastrow + 1)
#define newDoubleGradientSlater (oldForce + D)
#define newLaplacianSlater (newDoubleGradientSlater + D)
#define newSlaterUpDeterminant (newLaplacianSlater + 1)
#define newSlaterDownDeterminant (newSlaterUpDeterminant + 1)
#define newJastrow (newSlaterDownDeterminant + 1)
#define newGradientJastrow (newJastrow + 1)
#define newLaplacianJastrow (newGradientJastrow + D)
#define newForce (newLaplacianJastrow + 1)
#define kineticEnergies (newForce + D)
#define sizeOfSM (kineticEnergies + N)
#define STEP_LENGTH2 (STEP_LENGTH * STEP_LENGTH)
// the below is for our finite difference
#define oneOverH (1.0 / h)
#define oneOverH2 (oneOverH*oneOverH)
// the "diffusion constant"
#define bigD 0.5
extern __shared__ double S[]; // I swear to god...
// http://devblogs.nvidia.com/parallelforall/using-shared-memory-cuda-cc/
__device__ __forceinline__ double hermite (int order, double x) {
switch (order) {
case 0:
return 1.0;
//break; get a compiler warning for including breaks after a return! (i mean, of course you do, but c'mon...)
case 1:
return x+x; // 2x
//break;
/* case 2:
return 4*x*x - 2; // 4x^2 - 2
break;
case 3:
return 8*x*x*x - 12*x; // 8x^3 - 12x
break;
case 4: {
double x2 = x*x;
return 16*x2*x2 - 48*x2 + 12; // 16x^4 - 48x^2 + 12
} break;
*/ default:
if (PRINTING) printf("ERROR: hermite order %d (x=%f)\n", order, x);
}
return -1.0;
}
// Test passed!
__device__ double determinant_3x3 (int offset) {
double partial = 0.0;
if (tx == 0) {
partial = S[offset + 0] * (S[offset + 4]*S[offset + 8] - S[offset + 5]*S[offset + 7]);
}
if (tx == 1) {
partial = S[offset + 1] * (S[offset + 3]*S[offset + 8] - S[offset + 5]*S[offset + 6]);
}
if (tx == 2) {
partial = S[offset + 2] * (S[offset + 3]*S[offset + 7] - S[offset + 6]*S[offset + 4]);
}
__syncthreads();
partial += __shfl_down(partial, 2);
partial -= __shfl_down(partial, 1);
// tx should now have the right partial. everything else is garbage.
return __shfl (partial, 0);
}
__device__ double psi(double k, double k2, int orbital, int position, int magnitude) {
int nx,ny;
if (orbital == 1) { // orbitals are 0-indexed
ny = 1;
} else {
ny = 0;
}
if (orbital == 2) {
nx = 1;
} else {
nx = 0;
}
double m = S[magnitude + tx];
return hermite(nx, k*S[position + tx]) * hermite(ny, k*S[position + N + tx]) * exp(-0.5 * k2 * m*m);
}
__device__ double a(int i, int j) {
if ((i&1) == (j&1)) { // same spin
return 1.0/3.0;
} else { // different spin
return 1.0;
}
}
__device__ void initializePosition(hiprandState_t *RNG_state) {
if (tx<N) {
double r = 0;
double temp;
for (int d=0; d<D; ++d) {
temp = hiprand_normal_double(RNG_state);
S[oldPosition + d*N + tx] = temp;
r += temp * temp;
}
S[oldMagnitude + tx] = sqrt(r);
}
}
// Test passed!
__device__ __forceinline__ void moveElectron(int positionBefore, int magnitudeBefore, int positionAfter, int magnitudeAfter, int doubleGradientSlater, int slaterDeterminant, int gradientJastrow, int jastrow, int force, int currentlyMovedElectron, hiprandState_t *RNG_state) {
if (tx==currentlyMovedElectron) { // this should not be serialized. i mean, really... but that is a worry for later
double r = 0.0;
double temp;
for (int d=0; d<D; ++d) {
temp = S[positionBefore + d*N + tx] + hiprand_normal_double(RNG_state) * STEP_LENGTH;
temp += S[force + d] * bigD * STEP_LENGTH2;
// temp += (S[doubleGradientSlater+d]/S[slaterDeterminant] + 2*S[gradientJastrow+d]/S[jastrow]) * bigD * STEP_LENGTH2;
S[positionAfter + d*N + tx] = temp;
r += temp * temp;
}
S[magnitudeAfter + tx] = sqrt(r);
} else { // just to be extra-clear
if (tx<N) {
for (int d=0; d<D; ++d) {
S[positionAfter + d*N + tx] = S[positionBefore + d*N + tx];
}
S[magnitudeAfter + tx] = S[magnitudeBefore + tx];
}
}
}
__device__ __forceinline__ int updateCurrentElectron(int currentlyMovedElectron) {
if (currentlyMovedElectron==N-1) {
return 0;
} else {
return ++currentlyMovedElectron;
}
}
__device__ void initializeSlaterMatrices(int slaterUp, int slaterDown, int position, int magnitude, double k) {
if (tx<N) {
int slater;
if ((tx & 1) == 0) { // that is to say, all EVEN threads; including the 0th thread
slater = slaterUp; // thus EVEN threads are UP threads...
} else { // all ODD threads
slater = slaterDown; // ... and ODD threads are DOWN threads
}
for (int orbital=0; orbital<N/2; orbital++) { // the tx/2 on the next line SHOULD return an (int) anyways...
S[slater + (int)(tx/2) + orbital*N/2] = psi(k, k*k, orbital, position, magnitude); // change orbital to orbital after test for consistency with usage in psi()
}
}
}
__device__ double jastrowFactor(int position, int currentlyMovedElectron, double beta, double dx, double dy) {
double jastrowTerm = 0.0;
if (tx<N && tx!=currentlyMovedElectron) {
double relativeDistance, temp;
temp = S[position + 0 + tx] - (S[position + 0 + currentlyMovedElectron] + dx);
relativeDistance = temp * temp;
temp = S[position + N + tx] - (S[position + N + currentlyMovedElectron] + dy);
relativeDistance += temp * temp;
relativeDistance = sqrt(relativeDistance);
jastrowTerm = a(tx,currentlyMovedElectron) * relativeDistance;
jastrowTerm /= (1.0 + beta*relativeDistance); // could we FMA this? is it still IEEE compliant?
}
// we need to reduce the jastrow terms... this is the least worst way to do it (not pretty but efficient; brittle, assumes N=6)
jastrowTerm += __shfl_down(jastrowTerm,1);
jastrowTerm += __shfl_down(jastrowTerm,2);
jastrowTerm += __shfl_down(jastrowTerm,4);
jastrowTerm = __shfl(jastrowTerm, 0, 8);
return exp(jastrowTerm);
}
__device__ void jastrowGradientAndLaplacian(int position, int jastrowIndex, int gradientJastrow, int laplacianJastrow, int currentlyMovedElectron, double beta, bool print) {
double xPlus = jastrowFactor(position, currentlyMovedElectron, beta, h, 0.0);
double xMinus = jastrowFactor(position, currentlyMovedElectron, beta, -h, 0.0);
if (tx==0) {
S[gradientJastrow + 0] = 0.5 * (xPlus - xMinus) * oneOverH;
}
double yPlus = jastrowFactor(position, currentlyMovedElectron, beta, 0.0, h);
double yMinus = jastrowFactor(position, currentlyMovedElectron, beta, 0.0, -h);
if (tx==0) {
S[gradientJastrow + 1] = 0.5 * (yPlus - yMinus) * oneOverH;
}
double middle = jastrowFactor(position, currentlyMovedElectron, beta, 0.0, 0.0);
if (tx==0) {
S[laplacianJastrow] = (xPlus + xMinus + yPlus + yMinus - 4*middle) * oneOverH2;
S[jastrowIndex] = middle;
}
}
/* We need to calc not just the stuff for the gradient but also the laplacian, while we have things set up.
with importance sampling this should be used ~90% of the time, so it would be more wasteful to assume we
probably won't need the laplacian for discrete energy calculation, and have to reconstruct partial results
(like A,B,C). also, doing everything once wins on simplicity.
it is the CALLER'S responsibility for column to be currentlyMovedElectron>>1 */
__device__ void slaterDeterminantAndGradientAndLaplacian(int slater, int position, int magnitude, int slaterDeterminant, int doubleGradientSlater, int slaterLaplacian, double k, int column, bool print) {
if (tx==0 && print) if (PRINTING) printf(":: :: Inside master slater function. Column = %d\n", column);
double A,B,C;
switch (column) { // yay! no warp divergence!
case 0:
A = S[slater+4]*S[slater+8] - S[slater+7]*S[slater+5]; // 4*8 - 7*5 THESE ARE INDICES (top row of the matrix is 0 1 2, middle row is 3 4 5, bottom row is 6 7 8)
B = S[slater+1]*S[slater+8] - S[slater+7]*S[slater+2]; // 1*8 - 7*2 THESE ARE INDICES
C = S[slater+1]*S[slater+5] - S[slater+2]*S[slater+4]; // 1*5 - 2*4 THESE ARE INDICES (each index that appears should appear twice on these 3 lines)
break;
case 1: // note the negatives here reflect the assumption of even parity in the code after the switch statement
A = -S[slater+3]*S[slater+8] + S[slater+6]*S[slater+5]; // 4*8 - 7*5 THESE ARE INDICES
B = -S[slater+0]*S[slater+8] + S[slater+6]*S[slater+2]; // 1*8 - 7*2 THESE ARE INDICES
C = -S[slater+0]*S[slater+5] + S[slater+2]*S[slater+3]; // 1*5 - 2*4 THESE ARE INDICES (each index that appears should appear twice on these 3 lines)
break;
case 2:
A = S[slater+3]*S[slater+7] - S[slater+4]*S[slater+6]; // 4*8 - 7*5 THESE ARE INDICES
B = S[slater+0]*S[slater+7] - S[slater+4]*S[slater+6]; // 1*8 - 7*2 THESE ARE INDICES
C = S[slater+0]*S[slater+4] - S[slater+1]*S[slater+3]; // 1*5 - 2*4 THESE ARE INDICES (each index that appears should appear twice on these 3 lines)
break;
default:
if (PRINTING) printf("You should never see this message! Something broke!\n");
break;
}
double x = S[position + column];
double y = S[position + N + column];
if ((tx>>1) == 0) { // threads 0 and 1 handle x
if ((tx&1) == 0) { // thread 0 is minus
x -= h;
} else { // thread 1 is plus
x += h;
}
}
if ((tx>>1) == 1) { // threads 2 and 3 handle y
if ((tx&1) == 0) { // thread 2 is minus
y -= h;
} else { // thread 3 is plus
y += h;
}
}
//if (tx == 4) { // thread 4 handles center of 4 point star
// but there is nothing to do for that case! x and y are already good.
//}
double expTerm = exp(-0.5 * k * k * (x*x + y*y));
double determinant = 0.0;
determinant += A * hermite(0, k*x) * hermite(0, k*y) * expTerm;
determinant -= B * hermite(0, k*x) * hermite(1, k*y) * expTerm; // you can optimize this by writing the hermite polynomials explicitly
determinant += C * hermite(1, k*x) * hermite(0, k*y) * expTerm;
// threads 0-3 now hold the slater determinants for the outer 4 points on a 5 point 2D stencil, and thread 4..7 hold the center (duplicates but thats okay)
if (tx==4) {
S[slaterDeterminant] = determinant;
if (PRINTING) printf(":: determinant = %15.15f\n", determinant);
}
if (tx < 4) {
// will this still work if we put it in inside the IF below it? Answer: No.
double temp = (__shfl_down(determinant, 1, 8) - determinant) * oneOverH;
if ((tx&1) == 0) { // if we are thread 0 or 2
S[doubleGradientSlater + ((tx&2)>>1)] = temp; // write out twice the x and y components of the gradient of the slater det.
}
}
// now we are going to calculate the laplacian
if (tx < 4) {
determinant += __shfl_down(determinant, 1, 8);
determinant += __shfl_down(determinant, 2, 8); // this line and above computes sum of the 4 outlying points on a QUINCUNX stencil (new favorite word)
}
determinant -= 4*__shfl_down(determinant, 4, 8); // threads 4..7 should be equal
if (tx == 0) {
S[slaterLaplacian] = determinant * oneOverH2;
}
}
__device__ double greensFunction(int positionOld, int positionNew, int forceOld, int forceNew, int currentlyMovedElectron) {
double gf = 0.0;
if (tx<D) {
gf = 0.5 * (S[forceOld + tx] + S[forceNew + tx]);
if (PRINTING) printf("tx=%d : gf1 = %f\n",tx,gf);
if (PRINTING) printf("tx=%d : delta = %f\n",tx,S[forceOld + tx] - S[forceNew + tx]);
if (PRINTING) printf("tx=%d : pos = %f\n",tx, - S[positionNew + tx*N + currentlyMovedElectron] + S[positionOld + tx*N + currentlyMovedElectron]);
gf *= (bigD * STEP_LENGTH2 * 0.5 * (S[forceOld + tx] - S[forceNew + tx]) - S[positionNew + tx*N + currentlyMovedElectron] + S[positionOld + tx*N + currentlyMovedElectron]);
if (PRINTING) printf("tx=%d : gf2 = %f\n",tx,gf);
}
gf += __shfl_down(gf, 1);
if (tx==0) {
if (PRINTING) printf("tx=%d : gf3 = %f\n",tx,gf);
gf = exp(gf);
if (PRINTING) printf("tx=%d : gf4 = %f\n",tx,gf);
}
return gf;
}
__device__ double dotProductOfGradients(int a, int b) {
double sum = 0.0;
for (int d=0; d<D; ++d) {
sum += S[a+d] * S[b+d];
}
return sum;
}
__device__ void updateForce(int doubleGradientSlaterDeterminant, int slaterDeterminant, int gradientJastrow, int jastrow, int force) {
if (tx<D) { // breaks terribly due to strange slater determinant bug, so set to 0.
S[force + tx] = 0.0;//S[doubleGradientSlaterDeterminant + tx]/S[slaterDeterminant] + 2*S[gradientJastrow + tx]/S[jastrow];
}
}
__device__ double localKineticEnergy(int slaterDeterminant, int doubleGradientSlater, int laplacianSlater, int jastrowIndex, int gradientJastrow, int laplacianJastrow) {
double laplacianPsiOverPsi;
if (tx==0) {
laplacianPsiOverPsi = S[laplacianSlater] / S[slaterDeterminant];
laplacianPsiOverPsi += S[laplacianJastrow] / S[jastrowIndex];
laplacianPsiOverPsi += dotProductOfGradients(gradientJastrow, doubleGradientSlater) / (S[slaterDeterminant] * S[jastrowIndex]);
laplacianPsiOverPsi *= 0.5;
}
return __shfl(laplacianPsiOverPsi, 0);
}
__device__ double electronElectronPotentialEnergy(int position) {
double energy = 0.0;
if (tx<N) {
double x = S[position + tx];
double y = S[position + N + tx];
#pragma unroll
for (int n=0; n<N; n++) {
if (tx!=n) {
double distance, temp;
temp = S[position + n] - x;
distance = temp * temp;
temp = S[position + N + n] - y;
distance += temp * temp;
distance = sqrt(distance);
energy += 1.0/distance;
}
}
}
energy += __shfl_down(energy, 1, 8);
energy += __shfl_down(energy, 2, 8);
energy += __shfl_down(energy, 4, 8);
energy = __shfl(energy, 0, 8);
return energy;
}
__device__ double harmonicPotentialEnergy(int position) {
double energy = 0.0;
double r;
if (tx<N) {
for (int d=0; d<D; d++) {
r = S[position + d*N + tx];
energy += r*r;
}
}
energy += __shfl_down(energy, 1, 8);
energy += __shfl_down(energy, 2, 8);
energy += __shfl_down(energy, 4, 8);
energy = __shfl(energy, 0, 8);
return energy * 0.5 * OMEGA;
}
__device__ double electronProtonPotentialEnergy(int magnitude) {
double energy = 0.0;
if (tx<N) {
energy = -CHARGE / S[magnitude + tx];
}
energy += __shfl_down(energy, 1, 8);
energy += __shfl_down(energy, 2, 8);
energy += __shfl_down(energy, 4, 8);
energy = __shfl(energy, 0, 8);
return energy;
}
__device__ void initializeKineticEnergies() {
if (tx<N) {
S[kineticEnergies + tx] = 0.0;
}
}
__device__ double sumKineticEnergies() {
double energy = 0.0;
if (tx<N) {
energy = S[kineticEnergies + tx];
}
energy += __shfl_down(energy, 1, 8);
energy += __shfl_down(energy, 2, 8);
energy += __shfl_down(energy, 4, 8);
energy = __shfl(energy, 0, 8);
return energy;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void VMC_Kernel(double alpha, double beta, double k) {
if (PRINTING) printf("\n");
int currentlyMovedElectron = 0;
int accepted = 0;
bool parityReversal = false;
int UNIQUE_ID = tx; // this must be unique for every thread in every block **for every run** before it is useful! (we are only launching one block per grid now for debugging purposes so it is okay)
hiprandState_t RNG_state;
hiprand_init(SEED, UNIQUE_ID, 0, &RNG_state);
// This function is called only this one time. It sets up the oldPosition and oldMagnitude arrays.
initializePosition(&RNG_state);
initializeKineticEnergies();
__syncthreads();
// This fills in the Up and Down matrices. It requires all threads.
initializeSlaterMatrices(oldSlaterUp, oldSlaterDown, oldPosition, oldMagnitude, k);
__syncthreads();
// This calculates the Up determinant, the Up gradient, and the Up laplacian. It requires all threads. The last argument is what column is being varied.
slaterDeterminantAndGradientAndLaplacian(oldSlaterUp, oldPosition, oldMagnitude, oldSlaterUpDeterminant, oldDoubleGradientSlater, oldLaplacianSlater, k, currentlyMovedElectron>>1, false);
// This calculates the Down determinant. I don't think it is actually needed, but I want to see it. It requires the first 4 threads.
S[oldSlaterDownDeterminant] = determinant_3x3(oldSlaterDown);
if (tx==0) {
if (PRINTING) printf("oldDetUp = %15.15f\n", S[oldSlaterUpDeterminant]);
if (PRINTING) printf("oldDetDown = %15.15f\n", S[oldSlaterDownDeterminant]);
if (PRINTING) printf("oldGradSlater_x = %15.15f\n", S[oldDoubleGradientSlater]);
if (PRINTING) printf("oldGradSlater_y = %15.15f\n", S[oldDoubleGradientSlater+1]);
if (PRINTING) printf("oldLaplacianSlater = %15.15f\n", S[oldLaplacianSlater] );
}
__syncthreads();
// This calculates the Jastrow factor, its gradient, and its laplacian. It requires all threads. The last argument controls debugging printing.
jastrowGradientAndLaplacian(oldPosition, oldJastrow, oldGradientJastrow, oldLaplacianJastrow, currentlyMovedElectron, beta, true);
if (tx==0) {
if (PRINTING) printf("oldJastrow = %15.15f\n", S[oldJastrow]);
if (PRINTING) printf("oldGradJastrow_x = %15.15f\n", S[oldGradientJastrow]);
if (PRINTING) printf("oldGradJastrow_y = %15.15f\n", S[oldGradientJastrow+1]);
if (PRINTING) printf("oldLaplacianJastrow = %15.15f\n", S[oldLaplacianJastrow]);
}
__syncthreads();
// This calculates the Force. It requires the first two threads.
updateForce(oldDoubleGradientSlater, oldSlaterUpDeterminant, oldGradientJastrow, oldJastrow, oldForce);
if (tx==0) {
if (PRINTING) printf("oldForce_x = %15.15f\n", S[oldForce]);
if (PRINTING) printf("oldForce_y = %15.15f\n", S[oldForce+1]);
}
__syncthreads();
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
for (int cycle = 0; cycle < CYCLES; cycle++) {
if (PRINTING) printf("\n");
if (tx==0) {
if (PRINTING) printf("************** CYCLE %d **************\n", cycle);
if (PRINTING) printf("%s\n", parityReversal ? "true" : "false");
}
if (parityReversal == false) {
if ((currentlyMovedElectron&1)==0) {
moveElectron(oldPosition, oldMagnitude, newPosition, newMagnitude, oldDoubleGradientSlater, oldSlaterUpDeterminant, oldGradientJastrow, oldJastrow, oldForce, currentlyMovedElectron, &RNG_state);
} else {
moveElectron(oldPosition, oldMagnitude, newPosition, newMagnitude, oldDoubleGradientSlater, oldSlaterDownDeterminant, oldGradientJastrow, oldJastrow, oldForce, currentlyMovedElectron, &RNG_state);
}
__syncthreads();
initializeSlaterMatrices(newSlaterUp, newSlaterDown, newPosition, newMagnitude, k);
__syncthreads();
if ((currentlyMovedElectron&1)==0) {
if (tx==0) if (PRINTING) printf("Now calling slaterDet function on newPosition: UP.\n");
slaterDeterminantAndGradientAndLaplacian(newSlaterUp, newPosition, newMagnitude, newSlaterUpDeterminant, newDoubleGradientSlater, newLaplacianSlater, k, (currentlyMovedElectron>>1), true);
S[newSlaterDownDeterminant] = determinant_3x3(newSlaterDown);
} else {
if (tx==0) if (PRINTING) printf("Now calling slaterDet function on newPosition: DOWN.\n");
slaterDeterminantAndGradientAndLaplacian(newSlaterDown, newPosition, newMagnitude, newSlaterDownDeterminant, newDoubleGradientSlater, newLaplacianSlater, k, (currentlyMovedElectron>>1), true);
S[newSlaterUpDeterminant] = determinant_3x3(newSlaterUp);
}
__syncthreads();
jastrowGradientAndLaplacian(newPosition, newJastrow, newGradientJastrow, newLaplacianJastrow, currentlyMovedElectron, beta, false);
__syncthreads();
if (tx==0) printf("currentlyMovedElectron : %d\n", currentlyMovedElectron);
if ((currentlyMovedElectron&1)==0) {
updateForce(newDoubleGradientSlater, newSlaterUpDeterminant, newGradientJastrow, newJastrow, newForce); // this is needed for the green's function
} else {
updateForce(newDoubleGradientSlater, newSlaterDownDeterminant, newGradientJastrow, newJastrow, newForce); // this is needed for the green's function
}
__syncthreads();
double gf = greensFunction(oldPosition, newPosition, oldForce, newForce, currentlyMovedElectron);
if (tx==0) {
double RHS;
double numerator = S[newJastrow]; /
double denominator = S[oldJastrow];
if (currentlyMovedElectron&1 == 0) {
numerator *= S[newSlaterUpDeterminant];
denominator *= S[oldSlaterUpDeterminant];
} else {
numerator *= S[newSlaterDownDeterminant];
denominator *= S[oldSlaterDownDeterminant];
}
double ratio2 = numerator*numerator / (denominator*denominator);
//double gf = greensFunction(oldPosition, newPosition, oldForce, newForce, currentlyMovedElectron);
RHS = gf * ratio2;
if (hiprand_uniform_double(&RNG_state) <= RHS) {
parityReversal = !parityReversal;
accepted++;
}
}
} else { // new-old PARITY IS REVERSED BELOW : EXPECT EVERYTHING TO BE ALL CATTYWAMPUS
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if ((currentlyMovedElectron&1)==0) {
moveElectron(newPosition, newMagnitude, oldPosition, oldMagnitude, newDoubleGradientSlater, newSlaterUpDeterminant, newGradientJastrow, newJastrow, newForce, currentlyMovedElectron, &RNG_state);
} else {
moveElectron(newPosition, newMagnitude, oldPosition, oldMagnitude, newDoubleGradientSlater, newSlaterDownDeterminant, newGradientJastrow, newJastrow, newForce, currentlyMovedElectron, &RNG_state);
}
__syncthreads();
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
initializeSlaterMatrices(oldSlaterUp, oldSlaterDown, oldPosition, oldMagnitude, k);
__syncthreads();
if ((currentlyMovedElectron&1)==0) {
slaterDeterminantAndGradientAndLaplacian(oldSlaterUp, oldPosition, oldMagnitude, oldSlaterUpDeterminant, oldDoubleGradientSlater, oldLaplacianSlater, k, (currentlyMovedElectron>>1), true);
S[oldSlaterDownDeterminant] = determinant_3x3(oldSlaterDown);
} else {
slaterDeterminantAndGradientAndLaplacian(oldSlaterDown, oldPosition, oldMagnitude, oldSlaterDownDeterminant, oldDoubleGradientSlater, oldLaplacianSlater, k, (currentlyMovedElectron>>1), true);
S[oldSlaterUpDeterminant] = determinant_3x3(oldSlaterUp);
}
__syncthreads();
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
jastrowGradientAndLaplacian(oldPosition, oldJastrow, oldGradientJastrow, oldLaplacianJastrow, currentlyMovedElectron, beta, false);
__syncthreads();
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if (tx==0) printf("currentlyMovedElectron : %d\n", currentlyMovedElectron);
if ((currentlyMovedElectron&1)==0) {
updateForce(oldDoubleGradientSlater, oldSlaterUpDeterminant, oldGradientJastrow, oldJastrow, oldForce); // this is needed for the green's function
} else {
updateForce(oldDoubleGradientSlater, oldSlaterDownDeterminant, oldGradientJastrow, oldJastrow, oldForce); // this is needed for the green's function
}
__syncthreads();
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
double gf = greensFunction(newPosition, oldPosition, newForce, oldForce, currentlyMovedElectron);
if (tx==0) {
double RHS;
double numerator = S[oldJastrow];
double denominator = S[newJastrow];
if (currentlyMovedElectron&1 == 0) {
numerator *= S[oldSlaterUpDeterminant];
denominator *= S[newSlaterUpDeterminant];
} else {
numerator *= S[oldSlaterDownDeterminant];
denominator *= S[newSlaterDownDeterminant];
}
double ratio2 = numerator*numerator / (denominator*denominator);
if (PRINTING) printf("ratio2 = %f\n", ratio2);
if (PRINTING) printf("GF = %15.15f\n", gf);
RHS = gf * ratio2;
if (PRINTING) printf("RHS = %15.15f\n", RHS);
if (hiprand_uniform_double(&RNG_state) <= RHS) {
if (PRINTING) printf("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! MOVE ACCEPTED !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
parityReversal = !parityReversal;
accepted++;
}
}
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
} // END PARITY REVERSAL !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
parityReversal = __shfl(parityReversal, 0, 8);
if ((cycle+1) % (CYCLES/100) == 0) { // YOU NEED TO ACTUALLY DO THIS EVERY CYCLE TO UPDATE THE KINETIC ENERGY PROPERLY
if (tx==0) printf(" cycle=%d : ", cycle+1);
if (parityReversal == false) {
double kinetic;
// the entire memoization strategy here is foolish... blatently wrong to assume kinetic energy for unmoved particle is constant.
// but more important things are broken; fixing this will matter once they are fixed
S[kineticEnergies + currentlyMovedElectron] = localKineticEnergy(oldSlaterUpDeterminant, oldDoubleGradientSlater, oldLaplacianSlater, oldJastrow, oldGradientJastrow, oldLaplacianJastrow);
kinetic = sumKineticEnergies();
if (tx==0) printf("k=%5.5f : ", kinetic);
double electronElectron;
electronElectron = electronElectronPotentialEnergy(oldPosition);
if (tx==0) printf("e=%5.5f : ", electronElectron);
double harmonicPotential;
harmonicPotential = harmonicPotentialEnergy(oldPosition);
if (tx==0) printf("p=%5.5f : ", harmonicPotential);
if (tx==0) printf("E=%f\n", electronElectron+harmonicPotential+kinetic);
} else {
double kinetic;
// the entire memoization strategy here is foolish... blatently wrong to assume kinetic energy for unmoved particle is constant.
// but more important things are broken; fixing this will matter once they are fixed
S[kineticEnergies + currentlyMovedElectron] = localKineticEnergy(newSlaterUpDeterminant, newDoubleGradientSlater, newLaplacianSlater, newJastrow, newGradientJastrow, newLaplacianJastrow);
kinetic = sumKineticEnergies();
if (tx==0) printf("k=%5.5f : ", kinetic);
double electronElectron;
electronElectron = electronElectronPotentialEnergy(newPosition);
if (tx==0) printf("e=%5.5f : ", electronElectron);
double harmonicPotential;
harmonicPotential = harmonicPotentialEnergy(newPosition);
if (tx==0) printf("p=%5.5f : ", harmonicPotential);
if (tx==0) printf("E=%f\n", electronElectron+harmonicPotential+kinetic);
}
}
currentlyMovedElectron = updateCurrentElectron(currentlyMovedElectron); // keep this at the end of the iteration
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if (PRINTING) printf("\n");
if (tx==0) {
if (PRINTING) printf("oldSlaterDetUp = %15.15f\n", S[oldSlaterUpDeterminant]);
if (PRINTING) printf("oldSlaterDetDown = %15.15f\n", S[oldSlaterDownDeterminant]);
if (PRINTING) printf("oldGradSlater_x = %15.15f\n", S[oldDoubleGradientSlater] );
if (PRINTING) printf("oldGradSlater_y = %15.15f\n", S[oldDoubleGradientSlater+1]);
if (PRINTING) printf("oldLaplacianSlater = %15.15f\n", S[oldLaplacianSlater]);
if (PRINTING) printf("oldJastrow = %15.15f\n", S[oldJastrow]);
if (PRINTING) printf("oldGradJastrow_x = %15.15f\n", S[oldGradientJastrow]);
if (PRINTING) printf("oldGradJastrow_y = %15.15f\n", S[oldGradientJastrow+1]);
if (PRINTING) printf("oldLaplacianJastrow = %15.15f\n", S[oldLaplacianJastrow]);
if (PRINTING) printf("oldForce_x = %f\n", S[oldForce]);
if (PRINTING) printf("oldForce_y = %f\n", S[oldForce+1]);
if (PRINTING) printf("\n");
if (PRINTING) printf("newSlaterDetUp = %15.15f\n", S[newSlaterUpDeterminant]);
if (PRINTING) printf("newSlaterDetDown = %15.15f\n", S[newSlaterDownDeterminant]);
if (PRINTING) printf("newGradSlater_x = %15.15f\n", S[newDoubleGradientSlater] );
if (PRINTING) printf("newGradSlater_y = %15.15f\n", S[newDoubleGradientSlater+1]);
if (PRINTING) printf("newLaplacianSlater = %15.15f\n", S[newLaplacianSlater]);
if (PRINTING) printf("newJastrow = %15.15f\n", S[newJastrow]);
if (PRINTING) printf("newGradJastrow_x = %15.15f\n", S[newGradientJastrow]);
if (PRINTING) printf("newGradJastrow_y = %15.15f\n", S[newGradientJastrow+1]);
if (PRINTING) printf("newLaplacianJastrow = %15.15f\n", S[newLaplacianJastrow]);
if (PRINTING) printf("newForce_x = %f\n", S[newForce]);
if (PRINTING) printf("newForce_y = %f\n", S[newForce+1]);
if (PRINTING) printf("\n");
}
if (tx<N) {
if (PRINTING) printf("dist[%d]=%f ", tx, S[oldMagnitude+tx]);
}
if (tx==0) printf("\naccepted = %2.2f%%\n", 100.0*accepted/CYCLES);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
__device__ __forceinline__ double hermite (int order, double x) {
switch (order) {
case 0:
return 1.0;
case 1:
return x+x;
}
printf("ERROR: hermite order %d (x=%f)\n", order, x);
return -1.0;
}
__device__ __forceinline__ bool sameSpin(int i, int j) {
return ((i&1) == (j&1));
//so the function will collapse...
//stupid text editor
}
__device__ __forceinline__ double a(int i, int j) {
if (sameSpin(i,j)) { // same spin
return 1.0/3.0;
} else { // different spin
return 1.0;
}
}
__device__ __forceinline__ bool isUpSpin(int i) {
return ((tx & 1) == 0);
// stupid text editor
}
__device__ __forceinline__ double psi(int state, orbital) {
int nx,ny;
if (orbital == 1) { // orbitals are 0-indexed
ny = 1;
} else {
ny = 0;
}
if (orbital == 2) {
nx = 1;
} else {
nx = 0;
}
double m = S[state + magnitude + tx];
return hermite(nx, K * S[state + position + idx(tx,0)]) * hermite(ny, K * S[state + position + idx(tx,1)]) * exp(-0.5 * K2 * m*m);
}
__device__ double determinant_3x3 (int offset) {
double partial = 0.0;
if (tx == 0) {
partial = S[offset + 0] * (S[offset + 4]*S[offset + 8] - S[offset + 5]*S[offset + 7]);
}
if (tx == 1) {
partial = S[offset + 1] * (S[offset + 3]*S[offset + 8] - S[offset + 5]*S[offset + 6]);
}
if (tx == 2) {
partial = S[offset + 2] * (S[offset + 3]*S[offset + 7] - S[offset + 6]*S[offset + 4]);
}
__syncthreads();
partial += __shfl_down(partial, 2);
partial -= __shfl_down(partial, 1);
// tx=0 should now have the right partial. everything else is garbage.
return __shfl(partial, 0);
}
__device__ void initializePosition(hiprandState_t *RNG_state, int state) {
if (tx<N) {
double r = 1.0 + hiprand_normal_double(RNG_state);
double theta = hiprand_uniform_double(RNG_state) * 2 * PI;
S[state + position + idx(tx,0)] = r*cos(theta);
S[state + position + idx(tx,1)] = r*sin(theta);
S[state + magnitude + tx] = abs(r);
}
}
__device__ void setSlaterMatrices(int state) {
if (tx<N) {
int mySlater;
if (isUpSpin(tx)) { // that is to say, all EVEN threads; including the 0th thread
mySlater = slaterUp; // thus EVEN threads are UP threads...
} else { // all ODD threads
mySlater = slaterDown; // ... and ODD threads are DOWN threads
}
for (int orbital=0; orbital<N/2; orbital++) {
S[state + mySlater + sIdx()] = psi(state, orbital);
}
}
}
__device__ void setSlaterDeterminants(int state) {
S[state + slaterUpDet] = determinant_3x3(state + slaterUp);
S[state + slaterDownDet] = determinant_3x3(state + slaterDown);
}
__device__ bool MC_Trial(int oldState, int newState) {
return accept;
}
__global__ void VMC_Kernel2(double alpha, double beta) {
int accepted = 0;
bool parityReversal = false;
int UNIQUE_ID = tx; // this must be unique for every thread in every block **for every run** before it is useful!
hiprandState_t RNG_state;
hiprand_init(SEED, UNIQUE_ID, 0, &RNG_state);
initializePosition(&RNG_state, stateA);
setSlaterMatrices(stateA);
setWaveFunction(stateA, particle, dimension, step);
//setQuantumForce(stateA);
for (int cycle = 0; cycle < CYCLES; cycle++) {
if (parityReversal) {
if (MC_Trial(stateB, stateA)) parityReversal = !parityReversal;
} else {
if (MC_Trial(stateA, stateB)) parityReversal = !parityReversal;
}
if (parityReversal) {
recordEnergy(stateB);
} else {
recordEnergy(stateA);
}
}
}
*/
void MC_Sample_Variations (int dimensions) {
// dim3 bpg(16*8);
// dim3 tpb(32);
// Test_Kernel <<< bpg, tpb >>> ();
dim3 threadsPerBlock(8,1);
hipDeviceSynchronize();
cudaCheckErrors("trying to sync before kernel launch");
printf("sizeOfSM: %d\n", sizeOfSM);
hipLaunchKernelGGL(( VMC_Kernel) , dim3(1), dim3(threadsPerBlock), sizeOfSM*sizeof(double), 0, ALPHA, BETA, sqrt(ALPHA*OMEGA));
// VMC_Kernel2 <<<1, threadsPerBlock, sizeOfSM2*sizeOf(double)>>> (ALPHA, BETA);
cudaCheckErrors("launching kernel");
hipDeviceSynchronize();
cudaCheckErrors("trying to sync after kernel");
return;
}
| e278988c4256025ca4d9f4454e4a4e93ce61652c.cu | // I am including this file solely to show that I did work in CUDA too.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <helper_cuda.h>
#include "nvToolsExt.h"
#include "nvToolsExtCuda.h"
#include "nvToolsExtCudaRt.h"
#include <dlfcn.h>
#include <cxxabi.h>
#define PRINTING false
#define tx threadIdx.x
#define ty threadIdx.y
#define bx blockIdx.x
#define by blockIdx.y
#define bdx blockDim.x
#define bdy blockDim.y
#define gdx gridDim.x
#define gdy gridDim.y
#define iceil(n,d) ((n-1)/d)+1
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError();\
if (__err != cudaSuccess) { \
if (PRINTING) printf("Fatal error %s (%s at %s:%d)\n", msg, cudaGetErrorString(__err), __FILE__, __LINE__); \
exit(1); \
} \
} while (0)
#define idx(particle, dimension) (dimension*N + particle)
// so all of the "x" dimension values are contiguous
#define sIdx(column, order) (order*(N/2) + column)
// please not that here consecutive values {0,1,2} of column correspond to particles {0,2,4} or {1,3,5}
// that is to say, column is: (particle>>1)
#define N 6
#define D 2
#define ALPHA 1.0
#define BETA 0.4
#define OMEGA 1.0
//#define K2 (OMEGA * ALPHA)
//#define K (sqrt(K2))
#define CHARGE 6
#define STEP_LENGTH 0.001
#define h 0.00001
#define CYCLES 10000
//#define SEED 124
#define SEED 1234
/*
#define position 0
#define magnitude (position + N*D)
#define slaterUp (magnitude + N*D)
#define slaterDown (slaterUp + N*N/4)
#define slaterUpDet (slaterDown + N*N/4)
#define slaterDownDet (slaterUpDet + 1)
#define jastrow (slaterDownDet + 1)
#define wavefunction (jastrow + 1)
#define force (wavefunction + 1)
#define stateSize (force + N*D)
#define stateA 0
#define stateB (stateA + stateSize)
#define sizeOfSM2 (stateB + stateSize)
*/
#define oldPosition 0
#define newPosition (oldPosition + N*D)
#define oldMagnitude (newPosition + N*D)
#define newMagnitude (oldMagnitude + N)
#define oldSlaterUp (newMagnitude + N)
#define newSlaterUp (oldSlaterUp + N*N/4)
#define oldSlaterDown (newSlaterUp + N*N/4)
#define newSlaterDown (oldSlaterDown + N*N/4)
#define oldDoubleGradientSlater (newSlaterDown + N*N/4)
#define oldLaplacianSlater (oldDoubleGradientSlater + D)
#define oldSlaterUpDeterminant (oldLaplacianSlater + 1)
#define oldSlaterDownDeterminant (oldSlaterUpDeterminant + 1)
#define oldJastrow (oldSlaterDownDeterminant + 1)
#define oldGradientJastrow (oldJastrow + 1)
#define oldLaplacianJastrow (oldGradientJastrow + D)
#define oldForce (oldLaplacianJastrow + 1)
#define newDoubleGradientSlater (oldForce + D)
#define newLaplacianSlater (newDoubleGradientSlater + D)
#define newSlaterUpDeterminant (newLaplacianSlater + 1)
#define newSlaterDownDeterminant (newSlaterUpDeterminant + 1)
#define newJastrow (newSlaterDownDeterminant + 1)
#define newGradientJastrow (newJastrow + 1)
#define newLaplacianJastrow (newGradientJastrow + D)
#define newForce (newLaplacianJastrow + 1)
#define kineticEnergies (newForce + D)
#define sizeOfSM (kineticEnergies + N)
#define STEP_LENGTH2 (STEP_LENGTH * STEP_LENGTH)
// the below is for our finite difference
#define oneOverH (1.0 / h)
#define oneOverH2 (oneOverH*oneOverH)
// the "diffusion constant"
#define bigD 0.5
extern __shared__ double S[]; // I swear to god...
// http://devblogs.nvidia.com/parallelforall/using-shared-memory-cuda-cc/
__device__ __forceinline__ double hermite (int order, double x) {
switch (order) {
case 0:
return 1.0;
//break; get a compiler warning for including breaks after a return! (i mean, of course you do, but c'mon...)
case 1:
return x+x; // 2x
//break;
/* case 2:
return 4*x*x - 2; // 4x^2 - 2
break;
case 3:
return 8*x*x*x - 12*x; // 8x^3 - 12x
break;
case 4: {
double x2 = x*x;
return 16*x2*x2 - 48*x2 + 12; // 16x^4 - 48x^2 + 12
} break;
*/ default:
if (PRINTING) printf("ERROR: hermite order %d (x=%f)\n", order, x);
}
return -1.0;
}
// Test passed!
__device__ double determinant_3x3 (int offset) {
double partial = 0.0;
if (tx == 0) {
partial = S[offset + 0] * (S[offset + 4]*S[offset + 8] - S[offset + 5]*S[offset + 7]);
}
if (tx == 1) {
partial = S[offset + 1] * (S[offset + 3]*S[offset + 8] - S[offset + 5]*S[offset + 6]);
}
if (tx == 2) {
partial = S[offset + 2] * (S[offset + 3]*S[offset + 7] - S[offset + 6]*S[offset + 4]);
}
__syncthreads();
partial += __shfl_down(partial, 2);
partial -= __shfl_down(partial, 1);
// tx should now have the right partial. everything else is garbage.
return __shfl (partial, 0);
}
__device__ double psi(double k, double k2, int orbital, int position, int magnitude) {
int nx,ny;
if (orbital == 1) { // orbitals are 0-indexed
ny = 1;
} else {
ny = 0;
}
if (orbital == 2) {
nx = 1;
} else {
nx = 0;
}
double m = S[magnitude + tx];
return hermite(nx, k*S[position + tx]) * hermite(ny, k*S[position + N + tx]) * exp(-0.5 * k2 * m*m);
}
__device__ double a(int i, int j) {
if ((i&1) == (j&1)) { // same spin
return 1.0/3.0;
} else { // different spin
return 1.0;
}
}
__device__ void initializePosition(curandState *RNG_state) {
if (tx<N) {
double r = 0;
double temp;
for (int d=0; d<D; ++d) {
temp = curand_normal_double(RNG_state);
S[oldPosition + d*N + tx] = temp;
r += temp * temp;
}
S[oldMagnitude + tx] = sqrt(r);
}
}
// Test passed!
__device__ __forceinline__ void moveElectron(int positionBefore, int magnitudeBefore, int positionAfter, int magnitudeAfter, int doubleGradientSlater, int slaterDeterminant, int gradientJastrow, int jastrow, int force, int currentlyMovedElectron, curandState *RNG_state) {
if (tx==currentlyMovedElectron) { // this should not be serialized. i mean, really... but that is a worry for later
double r = 0.0;
double temp;
for (int d=0; d<D; ++d) {
temp = S[positionBefore + d*N + tx] + curand_normal_double(RNG_state) * STEP_LENGTH;
temp += S[force + d] * bigD * STEP_LENGTH2;
// temp += (S[doubleGradientSlater+d]/S[slaterDeterminant] + 2*S[gradientJastrow+d]/S[jastrow]) * bigD * STEP_LENGTH2;
S[positionAfter + d*N + tx] = temp;
r += temp * temp;
}
S[magnitudeAfter + tx] = sqrt(r);
} else { // just to be extra-clear
if (tx<N) {
for (int d=0; d<D; ++d) {
S[positionAfter + d*N + tx] = S[positionBefore + d*N + tx];
}
S[magnitudeAfter + tx] = S[magnitudeBefore + tx];
}
}
}
__device__ __forceinline__ int updateCurrentElectron(int currentlyMovedElectron) {
if (currentlyMovedElectron==N-1) {
return 0;
} else {
return ++currentlyMovedElectron;
}
}
__device__ void initializeSlaterMatrices(int slaterUp, int slaterDown, int position, int magnitude, double k) {
if (tx<N) {
int slater;
if ((tx & 1) == 0) { // that is to say, all EVEN threads; including the 0th thread
slater = slaterUp; // thus EVEN threads are UP threads...
} else { // all ODD threads
slater = slaterDown; // ... and ODD threads are DOWN threads
}
for (int orbital=0; orbital<N/2; orbital++) { // the tx/2 on the next line SHOULD return an (int) anyways...
S[slater + (int)(tx/2) + orbital*N/2] = psi(k, k*k, orbital, position, magnitude); // change orbital to orbital after test for consistency with usage in psi()
}
}
}
__device__ double jastrowFactor(int position, int currentlyMovedElectron, double beta, double dx, double dy) {
double jastrowTerm = 0.0;
if (tx<N && tx!=currentlyMovedElectron) {
double relativeDistance, temp;
temp = S[position + 0 + tx] - (S[position + 0 + currentlyMovedElectron] + dx);
relativeDistance = temp * temp;
temp = S[position + N + tx] - (S[position + N + currentlyMovedElectron] + dy);
relativeDistance += temp * temp;
relativeDistance = sqrt(relativeDistance);
jastrowTerm = a(tx,currentlyMovedElectron) * relativeDistance;
jastrowTerm /= (1.0 + beta*relativeDistance); // could we FMA this? is it still IEEE compliant?
}
// we need to reduce the jastrow terms... this is the least worst way to do it (not pretty but efficient; brittle, assumes N=6)
jastrowTerm += __shfl_down(jastrowTerm,1);
jastrowTerm += __shfl_down(jastrowTerm,2);
jastrowTerm += __shfl_down(jastrowTerm,4);
jastrowTerm = __shfl(jastrowTerm, 0, 8);
return exp(jastrowTerm);
}
__device__ void jastrowGradientAndLaplacian(int position, int jastrowIndex, int gradientJastrow, int laplacianJastrow, int currentlyMovedElectron, double beta, bool print) {
double xPlus = jastrowFactor(position, currentlyMovedElectron, beta, h, 0.0);
double xMinus = jastrowFactor(position, currentlyMovedElectron, beta, -h, 0.0);
if (tx==0) {
S[gradientJastrow + 0] = 0.5 * (xPlus - xMinus) * oneOverH;
}
double yPlus = jastrowFactor(position, currentlyMovedElectron, beta, 0.0, h);
double yMinus = jastrowFactor(position, currentlyMovedElectron, beta, 0.0, -h);
if (tx==0) {
S[gradientJastrow + 1] = 0.5 * (yPlus - yMinus) * oneOverH;
}
double middle = jastrowFactor(position, currentlyMovedElectron, beta, 0.0, 0.0);
if (tx==0) {
S[laplacianJastrow] = (xPlus + xMinus + yPlus + yMinus - 4*middle) * oneOverH2;
S[jastrowIndex] = middle;
}
}
/* We need to calc not just the stuff for the gradient but also the laplacian, while we have things set up.
with importance sampling this should be used ~90% of the time, so it would be more wasteful to assume we
probably won't need the laplacian for discrete energy calculation, and have to reconstruct partial results
(like A,B,C). also, doing everything once wins on simplicity.
it is the CALLER'S responsibility for column to be currentlyMovedElectron>>1 */
__device__ void slaterDeterminantAndGradientAndLaplacian(int slater, int position, int magnitude, int slaterDeterminant, int doubleGradientSlater, int slaterLaplacian, double k, int column, bool print) {
if (tx==0 && print) if (PRINTING) printf(":: :: Inside master slater function. Column = %d\n", column);
double A,B,C;
switch (column) { // yay! no warp divergence!
case 0:
A = S[slater+4]*S[slater+8] - S[slater+7]*S[slater+5]; // 4*8 - 7*5 THESE ARE INDICES (top row of the matrix is 0 1 2, middle row is 3 4 5, bottom row is 6 7 8)
B = S[slater+1]*S[slater+8] - S[slater+7]*S[slater+2]; // 1*8 - 7*2 THESE ARE INDICES
C = S[slater+1]*S[slater+5] - S[slater+2]*S[slater+4]; // 1*5 - 2*4 THESE ARE INDICES (each index that appears should appear twice on these 3 lines)
break;
case 1: // note the negatives here reflect the assumption of even parity in the code after the switch statement
A = -S[slater+3]*S[slater+8] + S[slater+6]*S[slater+5]; // 4*8 - 7*5 THESE ARE INDICES
B = -S[slater+0]*S[slater+8] + S[slater+6]*S[slater+2]; // 1*8 - 7*2 THESE ARE INDICES
C = -S[slater+0]*S[slater+5] + S[slater+2]*S[slater+3]; // 1*5 - 2*4 THESE ARE INDICES (each index that appears should appear twice on these 3 lines)
break;
case 2:
A = S[slater+3]*S[slater+7] - S[slater+4]*S[slater+6]; // 4*8 - 7*5 THESE ARE INDICES
B = S[slater+0]*S[slater+7] - S[slater+4]*S[slater+6]; // 1*8 - 7*2 THESE ARE INDICES
C = S[slater+0]*S[slater+4] - S[slater+1]*S[slater+3]; // 1*5 - 2*4 THESE ARE INDICES (each index that appears should appear twice on these 3 lines)
break;
default:
if (PRINTING) printf("You should never see this message! Something broke!\n");
break;
}
double x = S[position + column];
double y = S[position + N + column];
if ((tx>>1) == 0) { // threads 0 and 1 handle x
if ((tx&1) == 0) { // thread 0 is minus
x -= h;
} else { // thread 1 is plus
x += h;
}
}
if ((tx>>1) == 1) { // threads 2 and 3 handle y
if ((tx&1) == 0) { // thread 2 is minus
y -= h;
} else { // thread 3 is plus
y += h;
}
}
//if (tx == 4) { // thread 4 handles center of 4 point star
// but there is nothing to do for that case! x and y are already good.
//}
double expTerm = exp(-0.5 * k * k * (x*x + y*y));
double determinant = 0.0;
determinant += A * hermite(0, k*x) * hermite(0, k*y) * expTerm;
determinant -= B * hermite(0, k*x) * hermite(1, k*y) * expTerm; // you can optimize this by writing the hermite polynomials explicitly
determinant += C * hermite(1, k*x) * hermite(0, k*y) * expTerm;
// threads 0-3 now hold the slater determinants for the outer 4 points on a 5 point 2D stencil, and thread 4..7 hold the center (duplicates but thats okay)
if (tx==4) {
S[slaterDeterminant] = determinant;
if (PRINTING) printf(":: determinant = %15.15f\n", determinant);
}
if (tx < 4) {
// will this still work if we put it in inside the IF below it? Answer: No.
double temp = (__shfl_down(determinant, 1, 8) - determinant) * oneOverH;
if ((tx&1) == 0) { // if we are thread 0 or 2
S[doubleGradientSlater + ((tx&2)>>1)] = temp; // write out twice the x and y components of the gradient of the slater det.
}
}
// now we are going to calculate the laplacian
if (tx < 4) {
determinant += __shfl_down(determinant, 1, 8);
determinant += __shfl_down(determinant, 2, 8); // this line and above computes sum of the 4 outlying points on a QUINCUNX stencil (new favorite word)
}
determinant -= 4*__shfl_down(determinant, 4, 8); // threads 4..7 should be equal
if (tx == 0) {
S[slaterLaplacian] = determinant * oneOverH2;
}
}
__device__ double greensFunction(int positionOld, int positionNew, int forceOld, int forceNew, int currentlyMovedElectron) {
double gf = 0.0;
if (tx<D) {
gf = 0.5 * (S[forceOld + tx] + S[forceNew + tx]);
if (PRINTING) printf("tx=%d : gf1 = %f\n",tx,gf);
if (PRINTING) printf("tx=%d : delta = %f\n",tx,S[forceOld + tx] - S[forceNew + tx]);
if (PRINTING) printf("tx=%d : pos = %f\n",tx, - S[positionNew + tx*N + currentlyMovedElectron] + S[positionOld + tx*N + currentlyMovedElectron]);
gf *= (bigD * STEP_LENGTH2 * 0.5 * (S[forceOld + tx] - S[forceNew + tx]) - S[positionNew + tx*N + currentlyMovedElectron] + S[positionOld + tx*N + currentlyMovedElectron]);
if (PRINTING) printf("tx=%d : gf2 = %f\n",tx,gf);
}
gf += __shfl_down(gf, 1);
if (tx==0) {
if (PRINTING) printf("tx=%d : gf3 = %f\n",tx,gf);
gf = exp(gf);
if (PRINTING) printf("tx=%d : gf4 = %f\n",tx,gf);
}
return gf;
}
__device__ double dotProductOfGradients(int a, int b) {
double sum = 0.0;
for (int d=0; d<D; ++d) {
sum += S[a+d] * S[b+d];
}
return sum;
}
__device__ void updateForce(int doubleGradientSlaterDeterminant, int slaterDeterminant, int gradientJastrow, int jastrow, int force) {
if (tx<D) { // breaks terribly due to strange slater determinant bug, so set to 0.
S[force + tx] = 0.0;//S[doubleGradientSlaterDeterminant + tx]/S[slaterDeterminant] + 2*S[gradientJastrow + tx]/S[jastrow];
}
}
__device__ double localKineticEnergy(int slaterDeterminant, int doubleGradientSlater, int laplacianSlater, int jastrowIndex, int gradientJastrow, int laplacianJastrow) {
double laplacianPsiOverPsi;
if (tx==0) {
laplacianPsiOverPsi = S[laplacianSlater] / S[slaterDeterminant];
laplacianPsiOverPsi += S[laplacianJastrow] / S[jastrowIndex];
laplacianPsiOverPsi += dotProductOfGradients(gradientJastrow, doubleGradientSlater) / (S[slaterDeterminant] * S[jastrowIndex]);
laplacianPsiOverPsi *= 0.5;
}
return __shfl(laplacianPsiOverPsi, 0);
}
__device__ double electronElectronPotentialEnergy(int position) {
double energy = 0.0;
if (tx<N) {
double x = S[position + tx];
double y = S[position + N + tx];
#pragma unroll
for (int n=0; n<N; n++) {
if (tx!=n) {
double distance, temp;
temp = S[position + n] - x;
distance = temp * temp;
temp = S[position + N + n] - y;
distance += temp * temp;
distance = sqrt(distance);
energy += 1.0/distance;
}
}
}
energy += __shfl_down(energy, 1, 8);
energy += __shfl_down(energy, 2, 8);
energy += __shfl_down(energy, 4, 8);
energy = __shfl(energy, 0, 8);
return energy;
}
__device__ double harmonicPotentialEnergy(int position) {
double energy = 0.0;
double r;
if (tx<N) {
for (int d=0; d<D; d++) {
r = S[position + d*N + tx];
energy += r*r;
}
}
energy += __shfl_down(energy, 1, 8);
energy += __shfl_down(energy, 2, 8);
energy += __shfl_down(energy, 4, 8);
energy = __shfl(energy, 0, 8);
return energy * 0.5 * OMEGA;
}
__device__ double electronProtonPotentialEnergy(int magnitude) {
double energy = 0.0;
if (tx<N) {
energy = -CHARGE / S[magnitude + tx];
}
energy += __shfl_down(energy, 1, 8);
energy += __shfl_down(energy, 2, 8);
energy += __shfl_down(energy, 4, 8);
energy = __shfl(energy, 0, 8);
return energy;
}
__device__ void initializeKineticEnergies() {
if (tx<N) {
S[kineticEnergies + tx] = 0.0;
}
}
__device__ double sumKineticEnergies() {
double energy = 0.0;
if (tx<N) {
energy = S[kineticEnergies + tx];
}
energy += __shfl_down(energy, 1, 8);
energy += __shfl_down(energy, 2, 8);
energy += __shfl_down(energy, 4, 8);
energy = __shfl(energy, 0, 8);
return energy;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void VMC_Kernel(double alpha, double beta, double k) {
if (PRINTING) printf("\n");
int currentlyMovedElectron = 0;
int accepted = 0;
bool parityReversal = false;
int UNIQUE_ID = tx; // this must be unique for every thread in every block **for every run** before it is useful! (we are only launching one block per grid now for debugging purposes so it is okay)
curandState RNG_state;
curand_init(SEED, UNIQUE_ID, 0, &RNG_state);
// This function is called only this one time. It sets up the oldPosition and oldMagnitude arrays.
initializePosition(&RNG_state);
initializeKineticEnergies();
__syncthreads();
// This fills in the Up and Down matrices. It requires all threads.
initializeSlaterMatrices(oldSlaterUp, oldSlaterDown, oldPosition, oldMagnitude, k);
__syncthreads();
// This calculates the Up determinant, the Up gradient, and the Up laplacian. It requires all threads. The last argument is what column is being varied.
slaterDeterminantAndGradientAndLaplacian(oldSlaterUp, oldPosition, oldMagnitude, oldSlaterUpDeterminant, oldDoubleGradientSlater, oldLaplacianSlater, k, currentlyMovedElectron>>1, false);
// This calculates the Down determinant. I don't think it is actually needed, but I want to see it. It requires the first 4 threads.
S[oldSlaterDownDeterminant] = determinant_3x3(oldSlaterDown);
if (tx==0) {
if (PRINTING) printf("oldDetUp = %15.15f\n", S[oldSlaterUpDeterminant]);
if (PRINTING) printf("oldDetDown = %15.15f\n", S[oldSlaterDownDeterminant]);
if (PRINTING) printf("oldGradSlater_x = %15.15f\n", S[oldDoubleGradientSlater]);
if (PRINTING) printf("oldGradSlater_y = %15.15f\n", S[oldDoubleGradientSlater+1]);
if (PRINTING) printf("oldLaplacianSlater = %15.15f\n", S[oldLaplacianSlater] );
}
__syncthreads();
// This calculates the Jastrow factor, its gradient, and its laplacian. It requires all threads. The last argument controls debugging printing.
jastrowGradientAndLaplacian(oldPosition, oldJastrow, oldGradientJastrow, oldLaplacianJastrow, currentlyMovedElectron, beta, true);
if (tx==0) {
if (PRINTING) printf("oldJastrow = %15.15f\n", S[oldJastrow]);
if (PRINTING) printf("oldGradJastrow_x = %15.15f\n", S[oldGradientJastrow]);
if (PRINTING) printf("oldGradJastrow_y = %15.15f\n", S[oldGradientJastrow+1]);
if (PRINTING) printf("oldLaplacianJastrow = %15.15f\n", S[oldLaplacianJastrow]);
}
__syncthreads();
// This calculates the Force. It requires the first two threads.
updateForce(oldDoubleGradientSlater, oldSlaterUpDeterminant, oldGradientJastrow, oldJastrow, oldForce);
if (tx==0) {
if (PRINTING) printf("oldForce_x = %15.15f\n", S[oldForce]);
if (PRINTING) printf("oldForce_y = %15.15f\n", S[oldForce+1]);
}
__syncthreads();
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
for (int cycle = 0; cycle < CYCLES; cycle++) {
if (PRINTING) printf("\n");
if (tx==0) {
if (PRINTING) printf("************** CYCLE %d **************\n", cycle);
if (PRINTING) printf("%s\n", parityReversal ? "true" : "false");
}
if (parityReversal == false) {
if ((currentlyMovedElectron&1)==0) {
moveElectron(oldPosition, oldMagnitude, newPosition, newMagnitude, oldDoubleGradientSlater, oldSlaterUpDeterminant, oldGradientJastrow, oldJastrow, oldForce, currentlyMovedElectron, &RNG_state);
} else {
moveElectron(oldPosition, oldMagnitude, newPosition, newMagnitude, oldDoubleGradientSlater, oldSlaterDownDeterminant, oldGradientJastrow, oldJastrow, oldForce, currentlyMovedElectron, &RNG_state);
}
__syncthreads();
initializeSlaterMatrices(newSlaterUp, newSlaterDown, newPosition, newMagnitude, k);
__syncthreads();
if ((currentlyMovedElectron&1)==0) {
if (tx==0) if (PRINTING) printf("Now calling slaterDet function on newPosition: UP.\n");
slaterDeterminantAndGradientAndLaplacian(newSlaterUp, newPosition, newMagnitude, newSlaterUpDeterminant, newDoubleGradientSlater, newLaplacianSlater, k, (currentlyMovedElectron>>1), true);
S[newSlaterDownDeterminant] = determinant_3x3(newSlaterDown);
} else {
if (tx==0) if (PRINTING) printf("Now calling slaterDet function on newPosition: DOWN.\n");
slaterDeterminantAndGradientAndLaplacian(newSlaterDown, newPosition, newMagnitude, newSlaterDownDeterminant, newDoubleGradientSlater, newLaplacianSlater, k, (currentlyMovedElectron>>1), true);
S[newSlaterUpDeterminant] = determinant_3x3(newSlaterUp);
}
__syncthreads();
jastrowGradientAndLaplacian(newPosition, newJastrow, newGradientJastrow, newLaplacianJastrow, currentlyMovedElectron, beta, false);
__syncthreads();
if (tx==0) printf("currentlyMovedElectron : %d\n", currentlyMovedElectron);
if ((currentlyMovedElectron&1)==0) {
updateForce(newDoubleGradientSlater, newSlaterUpDeterminant, newGradientJastrow, newJastrow, newForce); // this is needed for the green's function
} else {
updateForce(newDoubleGradientSlater, newSlaterDownDeterminant, newGradientJastrow, newJastrow, newForce); // this is needed for the green's function
}
__syncthreads();
double gf = greensFunction(oldPosition, newPosition, oldForce, newForce, currentlyMovedElectron);
if (tx==0) {
double RHS;
double numerator = S[newJastrow]; /
double denominator = S[oldJastrow];
if (currentlyMovedElectron&1 == 0) {
numerator *= S[newSlaterUpDeterminant];
denominator *= S[oldSlaterUpDeterminant];
} else {
numerator *= S[newSlaterDownDeterminant];
denominator *= S[oldSlaterDownDeterminant];
}
double ratio2 = numerator*numerator / (denominator*denominator);
//double gf = greensFunction(oldPosition, newPosition, oldForce, newForce, currentlyMovedElectron);
RHS = gf * ratio2;
if (curand_uniform_double(&RNG_state) <= RHS) {
parityReversal = !parityReversal;
accepted++;
}
}
} else { // new-old PARITY IS REVERSED BELOW : EXPECT EVERYTHING TO BE ALL CATTYWAMPUS
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if ((currentlyMovedElectron&1)==0) {
moveElectron(newPosition, newMagnitude, oldPosition, oldMagnitude, newDoubleGradientSlater, newSlaterUpDeterminant, newGradientJastrow, newJastrow, newForce, currentlyMovedElectron, &RNG_state);
} else {
moveElectron(newPosition, newMagnitude, oldPosition, oldMagnitude, newDoubleGradientSlater, newSlaterDownDeterminant, newGradientJastrow, newJastrow, newForce, currentlyMovedElectron, &RNG_state);
}
__syncthreads();
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
initializeSlaterMatrices(oldSlaterUp, oldSlaterDown, oldPosition, oldMagnitude, k);
__syncthreads();
if ((currentlyMovedElectron&1)==0) {
slaterDeterminantAndGradientAndLaplacian(oldSlaterUp, oldPosition, oldMagnitude, oldSlaterUpDeterminant, oldDoubleGradientSlater, oldLaplacianSlater, k, (currentlyMovedElectron>>1), true);
S[oldSlaterDownDeterminant] = determinant_3x3(oldSlaterDown);
} else {
slaterDeterminantAndGradientAndLaplacian(oldSlaterDown, oldPosition, oldMagnitude, oldSlaterDownDeterminant, oldDoubleGradientSlater, oldLaplacianSlater, k, (currentlyMovedElectron>>1), true);
S[oldSlaterUpDeterminant] = determinant_3x3(oldSlaterUp);
}
__syncthreads();
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
jastrowGradientAndLaplacian(oldPosition, oldJastrow, oldGradientJastrow, oldLaplacianJastrow, currentlyMovedElectron, beta, false);
__syncthreads();
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if (tx==0) printf("currentlyMovedElectron : %d\n", currentlyMovedElectron);
if ((currentlyMovedElectron&1)==0) {
updateForce(oldDoubleGradientSlater, oldSlaterUpDeterminant, oldGradientJastrow, oldJastrow, oldForce); // this is needed for the green's function
} else {
updateForce(oldDoubleGradientSlater, oldSlaterDownDeterminant, oldGradientJastrow, oldJastrow, oldForce); // this is needed for the green's function
}
__syncthreads();
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
double gf = greensFunction(newPosition, oldPosition, newForce, oldForce, currentlyMovedElectron);
if (tx==0) {
double RHS;
double numerator = S[oldJastrow];
double denominator = S[newJastrow];
if (currentlyMovedElectron&1 == 0) {
numerator *= S[oldSlaterUpDeterminant];
denominator *= S[newSlaterUpDeterminant];
} else {
numerator *= S[oldSlaterDownDeterminant];
denominator *= S[newSlaterDownDeterminant];
}
double ratio2 = numerator*numerator / (denominator*denominator);
if (PRINTING) printf("ratio2 = %f\n", ratio2);
if (PRINTING) printf("GF = %15.15f\n", gf);
RHS = gf * ratio2;
if (PRINTING) printf("RHS = %15.15f\n", RHS);
if (curand_uniform_double(&RNG_state) <= RHS) {
if (PRINTING) printf("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! MOVE ACCEPTED !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
parityReversal = !parityReversal;
accepted++;
}
}
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
} // END PARITY REVERSAL !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
parityReversal = __shfl(parityReversal, 0, 8);
if ((cycle+1) % (CYCLES/100) == 0) { // YOU NEED TO ACTUALLY DO THIS EVERY CYCLE TO UPDATE THE KINETIC ENERGY PROPERLY
if (tx==0) printf(" cycle=%d : ", cycle+1);
if (parityReversal == false) {
double kinetic;
// the entire memoization strategy here is foolish... blatently wrong to assume kinetic energy for unmoved particle is constant.
// but more important things are broken; fixing this will matter once they are fixed
S[kineticEnergies + currentlyMovedElectron] = localKineticEnergy(oldSlaterUpDeterminant, oldDoubleGradientSlater, oldLaplacianSlater, oldJastrow, oldGradientJastrow, oldLaplacianJastrow);
kinetic = sumKineticEnergies();
if (tx==0) printf("k=%5.5f : ", kinetic);
double electronElectron;
electronElectron = electronElectronPotentialEnergy(oldPosition);
if (tx==0) printf("e=%5.5f : ", electronElectron);
double harmonicPotential;
harmonicPotential = harmonicPotentialEnergy(oldPosition);
if (tx==0) printf("p=%5.5f : ", harmonicPotential);
if (tx==0) printf("E=%f\n", electronElectron+harmonicPotential+kinetic);
} else {
double kinetic;
// the entire memoization strategy here is foolish... blatently wrong to assume kinetic energy for unmoved particle is constant.
// but more important things are broken; fixing this will matter once they are fixed
S[kineticEnergies + currentlyMovedElectron] = localKineticEnergy(newSlaterUpDeterminant, newDoubleGradientSlater, newLaplacianSlater, newJastrow, newGradientJastrow, newLaplacianJastrow);
kinetic = sumKineticEnergies();
if (tx==0) printf("k=%5.5f : ", kinetic);
double electronElectron;
electronElectron = electronElectronPotentialEnergy(newPosition);
if (tx==0) printf("e=%5.5f : ", electronElectron);
double harmonicPotential;
harmonicPotential = harmonicPotentialEnergy(newPosition);
if (tx==0) printf("p=%5.5f : ", harmonicPotential);
if (tx==0) printf("E=%f\n", electronElectron+harmonicPotential+kinetic);
}
}
currentlyMovedElectron = updateCurrentElectron(currentlyMovedElectron); // keep this at the end of the iteration
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if (PRINTING) printf("\n");
if (tx==0) {
if (PRINTING) printf("oldSlaterDetUp = %15.15f\n", S[oldSlaterUpDeterminant]);
if (PRINTING) printf("oldSlaterDetDown = %15.15f\n", S[oldSlaterDownDeterminant]);
if (PRINTING) printf("oldGradSlater_x = %15.15f\n", S[oldDoubleGradientSlater] );
if (PRINTING) printf("oldGradSlater_y = %15.15f\n", S[oldDoubleGradientSlater+1]);
if (PRINTING) printf("oldLaplacianSlater = %15.15f\n", S[oldLaplacianSlater]);
if (PRINTING) printf("oldJastrow = %15.15f\n", S[oldJastrow]);
if (PRINTING) printf("oldGradJastrow_x = %15.15f\n", S[oldGradientJastrow]);
if (PRINTING) printf("oldGradJastrow_y = %15.15f\n", S[oldGradientJastrow+1]);
if (PRINTING) printf("oldLaplacianJastrow = %15.15f\n", S[oldLaplacianJastrow]);
if (PRINTING) printf("oldForce_x = %f\n", S[oldForce]);
if (PRINTING) printf("oldForce_y = %f\n", S[oldForce+1]);
if (PRINTING) printf("\n");
if (PRINTING) printf("newSlaterDetUp = %15.15f\n", S[newSlaterUpDeterminant]);
if (PRINTING) printf("newSlaterDetDown = %15.15f\n", S[newSlaterDownDeterminant]);
if (PRINTING) printf("newGradSlater_x = %15.15f\n", S[newDoubleGradientSlater] );
if (PRINTING) printf("newGradSlater_y = %15.15f\n", S[newDoubleGradientSlater+1]);
if (PRINTING) printf("newLaplacianSlater = %15.15f\n", S[newLaplacianSlater]);
if (PRINTING) printf("newJastrow = %15.15f\n", S[newJastrow]);
if (PRINTING) printf("newGradJastrow_x = %15.15f\n", S[newGradientJastrow]);
if (PRINTING) printf("newGradJastrow_y = %15.15f\n", S[newGradientJastrow+1]);
if (PRINTING) printf("newLaplacianJastrow = %15.15f\n", S[newLaplacianJastrow]);
if (PRINTING) printf("newForce_x = %f\n", S[newForce]);
if (PRINTING) printf("newForce_y = %f\n", S[newForce+1]);
if (PRINTING) printf("\n");
}
if (tx<N) {
if (PRINTING) printf("dist[%d]=%f ", tx, S[oldMagnitude+tx]);
}
if (tx==0) printf("\naccepted = %2.2f%%\n", 100.0*accepted/CYCLES);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
__device__ __forceinline__ double hermite (int order, double x) {
switch (order) {
case 0:
return 1.0;
case 1:
return x+x;
}
printf("ERROR: hermite order %d (x=%f)\n", order, x);
return -1.0;
}
__device__ __forceinline__ bool sameSpin(int i, int j) {
return ((i&1) == (j&1));
//so the function will collapse...
//stupid text editor
}
__device__ __forceinline__ double a(int i, int j) {
if (sameSpin(i,j)) { // same spin
return 1.0/3.0;
} else { // different spin
return 1.0;
}
}
__device__ __forceinline__ bool isUpSpin(int i) {
return ((tx & 1) == 0);
// stupid text editor
}
__device__ __forceinline__ double psi(int state, orbital) {
int nx,ny;
if (orbital == 1) { // orbitals are 0-indexed
ny = 1;
} else {
ny = 0;
}
if (orbital == 2) {
nx = 1;
} else {
nx = 0;
}
double m = S[state + magnitude + tx];
return hermite(nx, K * S[state + position + idx(tx,0)]) * hermite(ny, K * S[state + position + idx(tx,1)]) * exp(-0.5 * K2 * m*m);
}
__device__ double determinant_3x3 (int offset) {
double partial = 0.0;
if (tx == 0) {
partial = S[offset + 0] * (S[offset + 4]*S[offset + 8] - S[offset + 5]*S[offset + 7]);
}
if (tx == 1) {
partial = S[offset + 1] * (S[offset + 3]*S[offset + 8] - S[offset + 5]*S[offset + 6]);
}
if (tx == 2) {
partial = S[offset + 2] * (S[offset + 3]*S[offset + 7] - S[offset + 6]*S[offset + 4]);
}
__syncthreads();
partial += __shfl_down(partial, 2);
partial -= __shfl_down(partial, 1);
// tx=0 should now have the right partial. everything else is garbage.
return __shfl(partial, 0);
}
__device__ void initializePosition(curandState *RNG_state, int state) {
if (tx<N) {
double r = 1.0 + curand_normal_double(RNG_state);
double theta = curand_uniform_double(RNG_state) * 2 * PI;
S[state + position + idx(tx,0)] = r*cos(theta);
S[state + position + idx(tx,1)] = r*sin(theta);
S[state + magnitude + tx] = abs(r);
}
}
__device__ void setSlaterMatrices(int state) {
if (tx<N) {
int mySlater;
if (isUpSpin(tx)) { // that is to say, all EVEN threads; including the 0th thread
mySlater = slaterUp; // thus EVEN threads are UP threads...
} else { // all ODD threads
mySlater = slaterDown; // ... and ODD threads are DOWN threads
}
for (int orbital=0; orbital<N/2; orbital++) {
S[state + mySlater + sIdx()] = psi(state, orbital);
}
}
}
__device__ void setSlaterDeterminants(int state) {
S[state + slaterUpDet] = determinant_3x3(state + slaterUp);
S[state + slaterDownDet] = determinant_3x3(state + slaterDown);
}
__device__ bool MC_Trial(int oldState, int newState) {
return accept;
}
__global__ void VMC_Kernel2(double alpha, double beta) {
int accepted = 0;
bool parityReversal = false;
int UNIQUE_ID = tx; // this must be unique for every thread in every block **for every run** before it is useful!
curandState RNG_state;
curand_init(SEED, UNIQUE_ID, 0, &RNG_state);
initializePosition(&RNG_state, stateA);
setSlaterMatrices(stateA);
setWaveFunction(stateA, particle, dimension, step);
//setQuantumForce(stateA);
for (int cycle = 0; cycle < CYCLES; cycle++) {
if (parityReversal) {
if (MC_Trial(stateB, stateA)) parityReversal = !parityReversal;
} else {
if (MC_Trial(stateA, stateB)) parityReversal = !parityReversal;
}
if (parityReversal) {
recordEnergy(stateB);
} else {
recordEnergy(stateA);
}
}
}
*/
void MC_Sample_Variations (int dimensions) {
// dim3 bpg(16*8);
// dim3 tpb(32);
// Test_Kernel <<< bpg, tpb >>> ();
dim3 threadsPerBlock(8,1);
cudaDeviceSynchronize();
cudaCheckErrors("trying to sync before kernel launch");
printf("sizeOfSM: %d\n", sizeOfSM);
VMC_Kernel <<< 1, threadsPerBlock, sizeOfSM*sizeof(double)>>> (ALPHA, BETA, sqrt(ALPHA*OMEGA));
// VMC_Kernel2 <<<1, threadsPerBlock, sizeOfSM2*sizeOf(double)>>> (ALPHA, BETA);
cudaCheckErrors("launching kernel");
cudaDeviceSynchronize();
cudaCheckErrors("trying to sync after kernel");
return;
}
|
3077a3c9cf9af516b75eeac831131ed9bf307836.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Consider 2 filter
#include <stdio.h>
#include "support.h"
#include "kernel.hip"
int main()
{
Timer timer;
// Initialize host variables ----------------------------------------------
printf("Setting up the problem..."); fflush(stdout);
startTime(&timer);
//M1: horizontal, M2: vertical
Matrix M1_h,M2_h,N_h,P1_h,P2_h,P3_h; // M: filter, N: input image, P: output image
Matrix N_d,P1_d,P2_d,P3_d;
unsigned imageHeight = 1080; //for test_image2
unsigned imageWidth = 1920; //for test_image2
dim3 dim_grid, dim_block;
/* Allocate host memory */
M1_h = allocateMatrix(FILTER_SIZE,FILTER_SIZE);
M2_h = allocateMatrix(FILTER_SIZE,FILTER_SIZE);
N_h = allocateMatrix(imageHeight,imageWidth);
P1_h = allocateMatrix(imageHeight,imageWidth);
P2_h = allocateMatrix(imageHeight,imageWidth);
P3_h = allocateMatrix(imageHeight,imageWidth);
/* Initialize filter and images */
loadData(M1_h,"Ysobel5_5.txt"); //horizontal filter
loadData(M2_h,"Xsobel5_5.txt"); //vertical filter
loadData(N_h,"test_image2.txt"); //image
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
printf("Image: %u x %u\n",imageHeight,imageWidth);
printf("Mask: %u x %u\n",FILTER_SIZE,FILTER_SIZE);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
N_d = allocateDeviceMatrix(imageHeight,imageWidth);
P1_d = allocateDeviceMatrix(imageHeight,imageWidth);
P2_d = allocateDeviceMatrix(imageHeight,imageWidth);
P3_d = allocateDeviceMatrix(imageHeight,imageWidth);
hipDeviceSynchronize();
stopTime(&timer);
printf("%f s\n",elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
/* Copy image to device global memory */
copyToDeviceMatrix(N_d,N_h);
/* Copy mask to device constant memory */
hipMemcpyToSymbol(M1_c, M1_h.elements, M1_h.height*M1_h.width*sizeof(float));
hipMemcpyToSymbol(M2_c, M2_h.elements, M2_h.height*M2_h.width*sizeof(float));
hipDeviceSynchronize();
stopTime(&timer);
printf("%f s\n",elapsedTime(timer));
// Launch kernel ----------------------------------------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
dim_block.x = BLOCK_SIZE; dim_block.y = BLOCK_SIZE; dim_block.z = 1;
dim_grid.x = imageWidth/TILE_SIZE;
if(imageWidth%TILE_SIZE != 0)
dim_grid.x++;
dim_grid.y = imageHeight/TILE_SIZE;
if(imageHeight%TILE_SIZE != 0)
dim_grid.y++;
dim_grid.z = 1;
hipLaunchKernelGGL(( convolution), dim3(dim_grid),dim3(dim_block), 0, 0, N_d,P1_d,P2_d,P3_d);
hipDeviceSynchronize();
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
copyFromDeviceMatrix(P1_h,P1_d);
copyFromDeviceMatrix(P2_h,P2_d);
copyFromDeviceMatrix(P3_h,P3_d);
hipDeviceSynchronize();
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Executing the convolution in host..."); fflush(stdout);
startTime(&timer);
printf("Verifying results..."); fflush(stdout);
verify(M1_h,N_h,P1_h);
verify(M2_h,N_h,P2_h);
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
/* Saving Results */
saveResult(P1_h,"testImage2_Results_horizontal.txt");
saveResult(P2_h,"testImage2_Results_vertical.txt");
saveResult(P3_h,"testImage2_Results_resultant.txt");
// Free memory ------------------------------------------------------------
freeMatrix(M1_h); freeMatrix(M2_h); freeMatrix(N_h);
freeMatrix(P1_h); freeMatrix(P2_h); freeMatrix(P3_h);
freeDeviceMatrix(N_d);
freeDeviceMatrix(P1_d); freeDeviceMatrix(P2_d); freeDeviceMatrix(P3_d);
return 0;
}
| 3077a3c9cf9af516b75eeac831131ed9bf307836.cu | //Consider 2 filter
#include <stdio.h>
#include "support.h"
#include "kernel.cu"
int main()
{
Timer timer;
// Initialize host variables ----------------------------------------------
printf("Setting up the problem..."); fflush(stdout);
startTime(&timer);
//M1: horizontal, M2: vertical
Matrix M1_h,M2_h,N_h,P1_h,P2_h,P3_h; // M: filter, N: input image, P: output image
Matrix N_d,P1_d,P2_d,P3_d;
unsigned imageHeight = 1080; //for test_image2
unsigned imageWidth = 1920; //for test_image2
dim3 dim_grid, dim_block;
/* Allocate host memory */
M1_h = allocateMatrix(FILTER_SIZE,FILTER_SIZE);
M2_h = allocateMatrix(FILTER_SIZE,FILTER_SIZE);
N_h = allocateMatrix(imageHeight,imageWidth);
P1_h = allocateMatrix(imageHeight,imageWidth);
P2_h = allocateMatrix(imageHeight,imageWidth);
P3_h = allocateMatrix(imageHeight,imageWidth);
/* Initialize filter and images */
loadData(M1_h,"Ysobel5_5.txt"); //horizontal filter
loadData(M2_h,"Xsobel5_5.txt"); //vertical filter
loadData(N_h,"test_image2.txt"); //image
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
printf("Image: %u x %u\n",imageHeight,imageWidth);
printf("Mask: %u x %u\n",FILTER_SIZE,FILTER_SIZE);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
N_d = allocateDeviceMatrix(imageHeight,imageWidth);
P1_d = allocateDeviceMatrix(imageHeight,imageWidth);
P2_d = allocateDeviceMatrix(imageHeight,imageWidth);
P3_d = allocateDeviceMatrix(imageHeight,imageWidth);
cudaDeviceSynchronize();
stopTime(&timer);
printf("%f s\n",elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
/* Copy image to device global memory */
copyToDeviceMatrix(N_d,N_h);
/* Copy mask to device constant memory */
cudaMemcpyToSymbol(M1_c, M1_h.elements, M1_h.height*M1_h.width*sizeof(float));
cudaMemcpyToSymbol(M2_c, M2_h.elements, M2_h.height*M2_h.width*sizeof(float));
cudaDeviceSynchronize();
stopTime(&timer);
printf("%f s\n",elapsedTime(timer));
// Launch kernel ----------------------------------------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
dim_block.x = BLOCK_SIZE; dim_block.y = BLOCK_SIZE; dim_block.z = 1;
dim_grid.x = imageWidth/TILE_SIZE;
if(imageWidth%TILE_SIZE != 0)
dim_grid.x++;
dim_grid.y = imageHeight/TILE_SIZE;
if(imageHeight%TILE_SIZE != 0)
dim_grid.y++;
dim_grid.z = 1;
convolution<<<dim_grid,dim_block>>>(N_d,P1_d,P2_d,P3_d);
cudaDeviceSynchronize();
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
copyFromDeviceMatrix(P1_h,P1_d);
copyFromDeviceMatrix(P2_h,P2_d);
copyFromDeviceMatrix(P3_h,P3_d);
cudaDeviceSynchronize();
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Executing the convolution in host..."); fflush(stdout);
startTime(&timer);
printf("Verifying results..."); fflush(stdout);
verify(M1_h,N_h,P1_h);
verify(M2_h,N_h,P2_h);
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
/* Saving Results */
saveResult(P1_h,"testImage2_Results_horizontal.txt");
saveResult(P2_h,"testImage2_Results_vertical.txt");
saveResult(P3_h,"testImage2_Results_resultant.txt");
// Free memory ------------------------------------------------------------
freeMatrix(M1_h); freeMatrix(M2_h); freeMatrix(N_h);
freeMatrix(P1_h); freeMatrix(P2_h); freeMatrix(P3_h);
freeDeviceMatrix(N_d);
freeDeviceMatrix(P1_d); freeDeviceMatrix(P2_d); freeDeviceMatrix(P3_d);
return 0;
}
|
ce636c9513ccb65f000ebdeeef05dc1bdaa0bcfa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
#define TYPE float
typedef TYPE T;
__constant__ float dev_box[4];
__constant__ int dev_threads[1];
__constant__ int dev_blocks[1];
__constant__ int dev_n_of_ints[1];
__constant__ int dev_n_of_func = 4;
template<class T>
class interval_gpu
{
public:
__device__ __host__ interval_gpu();
__device__ __host__ interval_gpu(T const &v);
__device__ __host__ interval_gpu(T const &l, T const &u);
__device__ __host__ T const &lower() const;
__device__ __host__ T const &upper() const;
static __device__ __host__ interval_gpu empty();
friend ostream& operator<<(ostream& os, const interval_gpu<T> &x){
os<<"["<<x.lower()<<":"<<x.upper()<<"]";return os;
}
private: T low; T up;
};
// Constructors
template<class T> inline __device__ __host__
interval_gpu<T>::interval_gpu(){}
template<class T> inline __device__ __host__
interval_gpu<T>::interval_gpu(T const &v) :
low(v), up(v){}
template<class T> inline __device__ __host__
interval_gpu<T>::interval_gpu(T const &l, T const &u) :
low(l), up(u){}
template<class T> inline __device__ __host__
T const &interval_gpu<T>::lower() const
{return low;}
template<class T> inline __device__ __host__
T const &interval_gpu<T>::upper() const
{return up;}
//OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD
template<class T> inline __host__ __device__
interval_gpu<T> operator+(interval_gpu<T> const &x, interval_gpu<T> const &y)
{
return interval_gpu<T>(x.lower() + y.lower(), x.upper() + y.upper());
}
template<class T> inline __host__ __device__
interval_gpu<T> operator-(interval_gpu<T> const &x, interval_gpu<T> const &y)
{return interval_gpu<T>(x.lower() - y.upper(), x.upper() - y.lower());}
template<class T> inline __host__ __device__
interval_gpu<T> operator*(interval_gpu<T> const &x, interval_gpu<T> const &y)
{return interval_gpu<T>(min(min(x.lower()*y.lower(),x.lower()*y.upper()),
min(x.upper()*y.lower(),x.upper()*y.upper())),
max(max(x.lower()*y.lower(),x.lower()*y.upper()),
max(x.upper()*y.lower(),x.upper()*y.upper())));}
template<class T> inline __host__ __device__
interval_gpu<T> operator/(interval_gpu<T> const &x, interval_gpu<T> const &y)
{return interval_gpu<T>(min(min(x.lower()/y.lower(),x.lower()/y.upper()),
min(x.upper()/y.lower(),x.upper()/y.upper())),
max(max(x.lower()/y.lower(),x.lower()/y.upper()),
max(x.upper()/y.lower(),x.upper()/y.upper())));}
__device__ __forceinline__ int g1(interval_gpu<T> *x){
interval_gpu<T> lmax(12);
interval_gpu<T> f(x[0]*x[0] + x[1]*x[1] - lmax*lmax);
return int(bool(f.upper() < 0) + bool(f.lower() < 0));
}
__device__ __forceinline__ int g2(interval_gpu<T> *x){
interval_gpu<T> l(8);
interval_gpu<T> f(l*l - x[0]*x[0] - x[1]*x[1]);
return int(bool(f.upper() < 0) + bool(f.lower() < 0));
}
__device__ __forceinline__ int g3(interval_gpu<T> *x){
interval_gpu<T> lmax(12);
interval_gpu<T> l0(5);
interval_gpu<T> f((x[0]-l0)*(x[0]-l0) + x[1]*x[1] - lmax*lmax);
return int(bool(f.upper() < 0) + bool(f.lower() < 0));
}
__device__ __forceinline__ int g4(interval_gpu<T> *x){
interval_gpu<T> l(8);
interval_gpu<T> l0(5);
interval_gpu<T> f(l*l - (x[0]-l0)*(x[0]-l0) - x[1]*x[1]);
return int(bool(f.upper() < 0) + bool(f.lower() < 0));
}
__constant__ int(*dev_func_pp[4])(interval_gpu<T>*) = {&g1,&g2,&g3,&g4};
template<class T>
__global__ void second_grid(int* detail_res,int* corner){
double x1_low = dev_box[0] + int(corner[0] % dev_threads[0])*(dev_box[1] - dev_box[0])/dev_threads[0];
double x2_low = dev_box[2] + int(corner[0] / dev_threads[0])*(dev_box[3] - dev_box[2])/dev_blocks[0];
interval_gpu<T>* x = new interval_gpu<T>[dev_n_of_ints[0]];
x[0] = interval_gpu<T>(x1_low + (threadIdx.x) * ((dev_box[1] - dev_box[0])/dev_threads[0])/blockDim.x,
x1_low +(1+threadIdx.x) * ((dev_box[1] - dev_box[0])/dev_threads[0])/blockDim.x);
x[1] = interval_gpu<T>(x2_low + (blockIdx.x) * ((dev_box[3] - dev_box[2])/dev_blocks[0])/gridDim.x,
x2_low + (1+blockIdx.x) * ((dev_box[3] - dev_box[2])/dev_blocks[0])/gridDim.x);
detail_res[(blockIdx.x*blockDim.x + threadIdx.x)] = 1;
for(int i = 0; i < dev_n_of_func; i++){
detail_res[(blockIdx.x*blockDim.x + threadIdx.x)] *= (*dev_func_pp[i])(x);
}
if((blockIdx.x*blockDim.x + threadIdx.x)==0){
printf("corner = %d\n",corner[0]);
}
}
//1 thread to up, in for loop to the end
template<class T>
__global__ void large_grid(int* res){
interval_gpu<T>* x = new interval_gpu<T>[dev_n_of_ints[0]];
x[0] = interval_gpu<T>(dev_box[0] + (threadIdx.x) * (dev_box[1] - dev_box[0])/blockDim.x,
dev_box[0] +(1+threadIdx.x) * (dev_box[1] - dev_box[0])/blockDim.x);
x[1] = interval_gpu<T>(dev_box[2] + (blockIdx.x) * (dev_box[3] - dev_box[2])/gridDim.x,
dev_box[2] + (1+blockIdx.x) * (dev_box[3] - dev_box[2])/gridDim.x);
res[(blockIdx.x*blockDim.x + threadIdx.x)] = 1;
for(int i = 0; i < dev_n_of_func; i++){
res[(blockIdx.x*blockDim.x + threadIdx.x)] *= (*dev_func_pp[i])(x);
}
// if( (blockIdx.x*blockDim.x + threadIdx.x) == 2926){printf("[%f:%f]:[%f:%f]\n",
// dev_box[0] + (threadIdx.x) * (dev_box[1] - dev_box[0])/blockDim.x,
// dev_box[0] +(1+threadIdx.x) * (dev_box[1] - dev_box[0])/blockDim.x,
// dev_box[2] + (blockIdx.x) * (dev_box[3] - dev_box[2])/gridDim.x,
// dev_box[2] + (1+blockIdx.x) * (dev_box[3] - dev_box[2])/gridDim.x);}
// if(res[(blockIdx.x*blockDim.x + threadIdx.x)]%16>0){
// //call
// }
}
// .
// 2048
//launch kernell fromkernell cudalaunchkernel
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = hipDeviceSynchronize();
if( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
int main(){
int n_of_ints = 2;
float host_box[4] = {-15.0,0.0,0.0,7.5};
int lb = 64;
int lt = lb*2;
int * res;
int * detail_res;
int*corner;
//cout<<fixed;
//cout.precision(4);
hipMallocManaged(&corner, sizeof(int));
hipMallocManaged(&res, sizeof(int)*lb*lt);
hipMallocManaged(&detail_res, sizeof(int)*lb*lb);
hipMemcpyToSymbol(dev_n_of_ints, &n_of_ints, sizeof(int));
hipMemcpyToSymbol(dev_threads, <, sizeof(int));
hipMemcpyToSymbol(dev_blocks, &lb, sizeof(int));
hipMemcpyToSymbol(dev_box, &host_box, sizeof(float)*4);
hipLaunchKernelGGL(( large_grid<T>), dim3(lb), dim3(lt), 0, 0, res);
hipDeviceSynchronize();
int counter = 0;
for(int i = 0; i < lb; i++){
for(int j = 0; j < lt; j++){
if(int(res[(i*lt+j)])%16>0){
interval_gpu<T> xb1(host_box[0] + (j) * (host_box[1] - host_box[0])/lt ,host_box[0]+(1+j) * (host_box[1] - host_box[0])/lt);
interval_gpu<T> xb2(host_box[2] + (i) * (host_box[3] - host_box[2])/lb ,host_box[2]+(1+i) * (host_box[3] - host_box[2])/lb);
// cout<<xb1<<":"<<xb2<<"\n";
}
if(int(res[(i*lt+j)])%16>0){
counter++;
corner[0] = (i*lt+j);//
// corner[0] = 2926;
//cout<<corner[0]<<"\n";
// break;
// //cout<<"x1_low = "<<((i*lt+j)% lb)*(host_box[1] - host_box[0])/lt<<"\n";
// //cout<<"x2_low = "<<((i*lt+j)/ lb)*(host_box[3] - host_box[2])/lb<<"\n";
cout<<"counter = "<<counter<<"\n";
hipLaunchKernelGGL(( second_grid<T>), dim3(lb),dim3(lb), 0, 0, detail_res,corner);
CudaCheckError();
hipDeviceSynchronize();
for(int k = 0; k < lb; k++){
for(int m = 0; m < lb; m++){
if(int(detail_res[k*lb+m])%16>0){
double x1_low = host_box[0] + (j) * (host_box[1] - host_box[0])/lt ; //host_box[0]+(1+j) * (host_box[1] - host_box[0])/lt
double x2_low = host_box[2] + (i) * (host_box[3] - host_box[2])/lb ; //host_box[2]+(1+i) * (host_box[3] - host_box[2])/lb
interval_gpu<T> x3(x1_low + m*(host_box[1] - host_box[0])/lt/lb,x1_low + (m+1)*(host_box[1] - host_box[0])/lt/lb);
interval_gpu<T> x4(x2_low + k*(host_box[3] - host_box[2])/lb/lb,x2_low + (k+1)*(host_box[3] - host_box[2])/lb/lb);
// cout<<x3<<":"<<x4<<"\n";
}
detail_res[k*lb+m] = 0;
}
}
hipDeviceSynchronize();
// if(counter == 21){i = lb; j = lt; break;}
}
}
}
// cout<<"dick"<<"\n";
// hipFree(res);
// for(int i = 0; i < lb; i++){
// for(int j = 0; j < lt; j++){
// if(int(res[(i*lt+j)])%16>0){
// interval_gpu<T> xb1(host_box[0] + (j) * (host_box[1] - host_box[0])/lt ,host_box[0]+(1+j) * (host_box[1] - host_box[0])/lt);
// interval_gpu<T> xb2(host_box[2] + (i) * (host_box[3] - host_box[2])/lb ,host_box[2]+(1+i) * (host_box[3] - host_box[2])/lb);
// cout<<xb1<<":"<<xb2<<"\n";
// }
// }
// }
hipFree(res);
hipFree(detail_res);
hipFree(dev_blocks);
hipFree(dev_threads);
hipFree(corner);
hipFree(dev_n_of_ints);
hipFree(dev_box);
return 0;
}
| ce636c9513ccb65f000ebdeeef05dc1bdaa0bcfa.cu | #include <iostream>
using namespace std;
#define TYPE float
typedef TYPE T;
__constant__ float dev_box[4];
__constant__ int dev_threads[1];
__constant__ int dev_blocks[1];
__constant__ int dev_n_of_ints[1];
__constant__ int dev_n_of_func = 4;
template<class T>
class interval_gpu
{
public:
__device__ __host__ interval_gpu();
__device__ __host__ interval_gpu(T const &v);
__device__ __host__ interval_gpu(T const &l, T const &u);
__device__ __host__ T const &lower() const;
__device__ __host__ T const &upper() const;
static __device__ __host__ interval_gpu empty();
friend ostream& operator<<(ostream& os, const interval_gpu<T> &x){
os<<"["<<x.lower()<<":"<<x.upper()<<"]";return os;
}
private: T low; T up;
};
// Constructors
template<class T> inline __device__ __host__
interval_gpu<T>::interval_gpu(){}
template<class T> inline __device__ __host__
interval_gpu<T>::interval_gpu(T const &v) :
low(v), up(v){}
template<class T> inline __device__ __host__
interval_gpu<T>::interval_gpu(T const &l, T const &u) :
low(l), up(u){}
template<class T> inline __device__ __host__
T const &interval_gpu<T>::lower() const
{return low;}
template<class T> inline __device__ __host__
T const &interval_gpu<T>::upper() const
{return up;}
//OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD
template<class T> inline __host__ __device__
interval_gpu<T> operator+(interval_gpu<T> const &x, interval_gpu<T> const &y)
{
return interval_gpu<T>(x.lower() + y.lower(), x.upper() + y.upper());
}
template<class T> inline __host__ __device__
interval_gpu<T> operator-(interval_gpu<T> const &x, interval_gpu<T> const &y)
{return interval_gpu<T>(x.lower() - y.upper(), x.upper() - y.lower());}
template<class T> inline __host__ __device__
interval_gpu<T> operator*(interval_gpu<T> const &x, interval_gpu<T> const &y)
{return interval_gpu<T>(min(min(x.lower()*y.lower(),x.lower()*y.upper()),
min(x.upper()*y.lower(),x.upper()*y.upper())),
max(max(x.lower()*y.lower(),x.lower()*y.upper()),
max(x.upper()*y.lower(),x.upper()*y.upper())));}
template<class T> inline __host__ __device__
interval_gpu<T> operator/(interval_gpu<T> const &x, interval_gpu<T> const &y)
{return interval_gpu<T>(min(min(x.lower()/y.lower(),x.lower()/y.upper()),
min(x.upper()/y.lower(),x.upper()/y.upper())),
max(max(x.lower()/y.lower(),x.lower()/y.upper()),
max(x.upper()/y.lower(),x.upper()/y.upper())));}
__device__ __forceinline__ int g1(interval_gpu<T> *x){
interval_gpu<T> lmax(12);
interval_gpu<T> f(x[0]*x[0] + x[1]*x[1] - lmax*lmax);
return int(bool(f.upper() < 0) + bool(f.lower() < 0));
}
__device__ __forceinline__ int g2(interval_gpu<T> *x){
interval_gpu<T> l(8);
interval_gpu<T> f(l*l - x[0]*x[0] - x[1]*x[1]);
return int(bool(f.upper() < 0) + bool(f.lower() < 0));
}
__device__ __forceinline__ int g3(interval_gpu<T> *x){
interval_gpu<T> lmax(12);
interval_gpu<T> l0(5);
interval_gpu<T> f((x[0]-l0)*(x[0]-l0) + x[1]*x[1] - lmax*lmax);
return int(bool(f.upper() < 0) + bool(f.lower() < 0));
}
__device__ __forceinline__ int g4(interval_gpu<T> *x){
interval_gpu<T> l(8);
interval_gpu<T> l0(5);
interval_gpu<T> f(l*l - (x[0]-l0)*(x[0]-l0) - x[1]*x[1]);
return int(bool(f.upper() < 0) + bool(f.lower() < 0));
}
__constant__ int(*dev_func_pp[4])(interval_gpu<T>*) = {&g1,&g2,&g3,&g4};
template<class T>
__global__ void second_grid(int* detail_res,int* corner){
double x1_low = dev_box[0] + int(corner[0] % dev_threads[0])*(dev_box[1] - dev_box[0])/dev_threads[0];
double x2_low = dev_box[2] + int(corner[0] / dev_threads[0])*(dev_box[3] - dev_box[2])/dev_blocks[0];
interval_gpu<T>* x = new interval_gpu<T>[dev_n_of_ints[0]];
x[0] = interval_gpu<T>(x1_low + (threadIdx.x) * ((dev_box[1] - dev_box[0])/dev_threads[0])/blockDim.x,
x1_low +(1+threadIdx.x) * ((dev_box[1] - dev_box[0])/dev_threads[0])/blockDim.x);
x[1] = interval_gpu<T>(x2_low + (blockIdx.x) * ((dev_box[3] - dev_box[2])/dev_blocks[0])/gridDim.x,
x2_low + (1+blockIdx.x) * ((dev_box[3] - dev_box[2])/dev_blocks[0])/gridDim.x);
detail_res[(blockIdx.x*blockDim.x + threadIdx.x)] = 1;
for(int i = 0; i < dev_n_of_func; i++){
detail_res[(blockIdx.x*blockDim.x + threadIdx.x)] *= (*dev_func_pp[i])(x);
}
if((blockIdx.x*blockDim.x + threadIdx.x)==0){
printf("corner = %d\n",corner[0]);
}
}
//1 thread to up, in for loop to the end
template<class T>
__global__ void large_grid(int* res){
interval_gpu<T>* x = new interval_gpu<T>[dev_n_of_ints[0]];
x[0] = interval_gpu<T>(dev_box[0] + (threadIdx.x) * (dev_box[1] - dev_box[0])/blockDim.x,
dev_box[0] +(1+threadIdx.x) * (dev_box[1] - dev_box[0])/blockDim.x);
x[1] = interval_gpu<T>(dev_box[2] + (blockIdx.x) * (dev_box[3] - dev_box[2])/gridDim.x,
dev_box[2] + (1+blockIdx.x) * (dev_box[3] - dev_box[2])/gridDim.x);
res[(blockIdx.x*blockDim.x + threadIdx.x)] = 1;
for(int i = 0; i < dev_n_of_func; i++){
res[(blockIdx.x*blockDim.x + threadIdx.x)] *= (*dev_func_pp[i])(x);
}
// if( (blockIdx.x*blockDim.x + threadIdx.x) == 2926){printf("[%f:%f]:[%f:%f]\n",
// dev_box[0] + (threadIdx.x) * (dev_box[1] - dev_box[0])/blockDim.x,
// dev_box[0] +(1+threadIdx.x) * (dev_box[1] - dev_box[0])/blockDim.x,
// dev_box[2] + (blockIdx.x) * (dev_box[3] - dev_box[2])/gridDim.x,
// dev_box[2] + (1+blockIdx.x) * (dev_box[3] - dev_box[2])/gridDim.x);}
// if(res[(blockIdx.x*blockDim.x + threadIdx.x)]%16>0){
// //call
// }
}
//в ÑÑПÑМеМОО МÑжЎаÑÑÑÑ ÑПлÑкП гÑаМОÑМÑе ÑÑейкО.
//вПзвÑаÑаеÑÑÑ 2048 ОМЎекÑПв МПЌеÑПв кÑÑпМПй ÑеÑкО
//launch kernell fromkernell cudalaunchkernel
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
int main(){
int n_of_ints = 2;
float host_box[4] = {-15.0,0.0,0.0,7.5};
int lb = 64;
int lt = lb*2;
int * res;
int * detail_res;
int*corner;
//cout<<fixed;
//cout.precision(4);
cudaMallocManaged(&corner, sizeof(int));
cudaMallocManaged(&res, sizeof(int)*lb*lt);
cudaMallocManaged(&detail_res, sizeof(int)*lb*lb);
cudaMemcpyToSymbol(dev_n_of_ints, &n_of_ints, sizeof(int));
cudaMemcpyToSymbol(dev_threads, <, sizeof(int));
cudaMemcpyToSymbol(dev_blocks, &lb, sizeof(int));
cudaMemcpyToSymbol(dev_box, &host_box, sizeof(float)*4);
large_grid<T><<<lb, lt>>>(res);
cudaDeviceSynchronize();
int counter = 0;
for(int i = 0; i < lb; i++){
for(int j = 0; j < lt; j++){
if(int(res[(i*lt+j)])%16>0){
interval_gpu<T> xb1(host_box[0] + (j) * (host_box[1] - host_box[0])/lt ,host_box[0]+(1+j) * (host_box[1] - host_box[0])/lt);
interval_gpu<T> xb2(host_box[2] + (i) * (host_box[3] - host_box[2])/lb ,host_box[2]+(1+i) * (host_box[3] - host_box[2])/lb);
// cout<<xb1<<":"<<xb2<<"\n";
}
if(int(res[(i*lt+j)])%16>0){
counter++;
corner[0] = (i*lt+j);//
// corner[0] = 2926;
//cout<<corner[0]<<"\n";
// break;
// //cout<<"x1_low = "<<((i*lt+j)% lb)*(host_box[1] - host_box[0])/lt<<"\n";
// //cout<<"x2_low = "<<((i*lt+j)/ lb)*(host_box[3] - host_box[2])/lb<<"\n";
cout<<"counter = "<<counter<<"\n";
second_grid<T><<<lb,lb>>>(detail_res,corner);
CudaCheckError();
cudaDeviceSynchronize();
for(int k = 0; k < lb; k++){
for(int m = 0; m < lb; m++){
if(int(detail_res[k*lb+m])%16>0){
double x1_low = host_box[0] + (j) * (host_box[1] - host_box[0])/lt ; //host_box[0]+(1+j) * (host_box[1] - host_box[0])/lt
double x2_low = host_box[2] + (i) * (host_box[3] - host_box[2])/lb ; //host_box[2]+(1+i) * (host_box[3] - host_box[2])/lb
interval_gpu<T> x3(x1_low + m*(host_box[1] - host_box[0])/lt/lb,x1_low + (m+1)*(host_box[1] - host_box[0])/lt/lb);
interval_gpu<T> x4(x2_low + k*(host_box[3] - host_box[2])/lb/lb,x2_low + (k+1)*(host_box[3] - host_box[2])/lb/lb);
// cout<<x3<<":"<<x4<<"\n";
}
detail_res[k*lb+m] = 0;
}
}
cudaDeviceSynchronize();
// if(counter == 21){i = lb; j = lt; break;}
}
}
}
// cout<<"dick"<<"\n";
// cudaFree(res);
// for(int i = 0; i < lb; i++){
// for(int j = 0; j < lt; j++){
// if(int(res[(i*lt+j)])%16>0){
// interval_gpu<T> xb1(host_box[0] + (j) * (host_box[1] - host_box[0])/lt ,host_box[0]+(1+j) * (host_box[1] - host_box[0])/lt);
// interval_gpu<T> xb2(host_box[2] + (i) * (host_box[3] - host_box[2])/lb ,host_box[2]+(1+i) * (host_box[3] - host_box[2])/lb);
// cout<<xb1<<":"<<xb2<<"\n";
// }
// }
// }
cudaFree(res);
cudaFree(detail_res);
cudaFree(dev_blocks);
cudaFree(dev_threads);
cudaFree(corner);
cudaFree(dev_n_of_ints);
cudaFree(dev_box);
return 0;
}
|
f06158210c0bad73be8d62038f8ab2ea2143b770.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_runtime.h"
__global__ void spmm_kernel(const int *indptr, const int *indices,
const float *data, const float *B, float *C, int k,
int n, int m, int start_pos, int end_pos) {
// matC (n , m) matB (k , m) C = A * B
// data & indices (nnz) ,indptr(n)
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t ind = id / m;
size_t offset = id - ind * m;
if (ind >= n)
return;
C[m * ind + offset] = 0;
int i_s = indptr[ind], i_e = indptr[ind + 1];
if (start_pos == -1) {
for (int i = i_s; i < i_e; i++) {
int from = indices[i];
float scale = data[i];
C[m * ind + offset] += B[m * from + offset] * scale;
}
} else {
for (int i = i_s; i < i_e; i++) {
if (indices[i] >= start_pos && indices[i] < end_pos) {
int from = indices[i] - start_pos;
float scale = data[i];
C[m * ind + offset] += B[m * from + offset] * scale;
}
}
}
return;
}
__global__ void spmm_set_zero_kernel(float *output, size_t size) {
size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= size)
return;
output[ind] = 0;
}
__global__ void spmm_T_kernel(const int *indptr, const int *indices,
const float *data, const float *B, float *C,
int k, int n, int m) {
// matC (n , m) matB (k , m) C = A^T *B
// data & indices (nnz) ,indptr(k)
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t ind = id / m;
size_t offset = id - ind * m;
if (ind >= k)
return;
// C[m * ind + offset] = 0;
int i_s = indptr[ind], i_e = indptr[ind + 1];
float val = B[m * ind + offset];
for (int i = i_s; i < i_e; i++) {
int to = indices[i];
float addend = data[i] * val;
atomicAdd(&C[m * to + offset], addend);
}
return;
}
int CuSparse_DLGpuCsrmm0(const DLArrayHandle data_handle,
const DLArrayHandle row_handle,
const DLArrayHandle col_handle, int nrow, int ncol,
const DLArrayHandle matB, DLArrayHandle matC,
int start_pos, int end_pos,
DLStreamHandle stream_handle = NULL) {
int n = matC->shape[0];
int m = matC->shape[1];
int k = matB->shape[0];
dim3 blocks;
dim3 threads;
if (n * m <= 1024) {
threads.x = n * m;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (n * m + 1023) / 1024;
}
if (stream_handle) {
hipLaunchKernelGGL(( spmm_kernel), dim3(blocks), dim3(threads), 0,
*(hipStream_t *)stream_handle->handle,
(const int *)row_handle->data, (const int *)col_handle->data,
(const float *)data_handle->data, (const float *)matB->data,
(float *)matC->data, k, n, m, start_pos, end_pos);
} else {
hipLaunchKernelGGL(( spmm_kernel), dim3(blocks), dim3(threads), 0, 0,
(const int *)row_handle->data, (const int *)col_handle->data,
(const float *)data_handle->data, (const float *)matB->data,
(float *)matC->data, k, n, m, start_pos, end_pos);
}
return 0;
}
int CuSparse_DLGpuCsrmm1(const DLArrayHandle data_handle,
const DLArrayHandle row_handle,
const DLArrayHandle col_handle, int nrow, int ncol,
const DLArrayHandle matB, DLArrayHandle matC,
DLStreamHandle stream_handle = NULL) {
int n = matC->shape[0];
int m = matC->shape[1];
int k = matB->shape[0];
dim3 blocks;
dim3 threads;
if (k * m <= 1024) {
threads.x = k * m;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (k * m + 1023) / 1024;
}
if (stream_handle) {
hipLaunchKernelGGL(( spmm_set_zero_kernel), dim3(n), dim3(m), 0,
*(hipStream_t *)stream_handle->handle,
(float *)matC->data, n * m);
hipLaunchKernelGGL(( spmm_T_kernel), dim3(blocks), dim3(threads), 0,
*(hipStream_t *)stream_handle->handle,
(const int *)row_handle->data, (const int *)col_handle->data,
(const float *)data_handle->data, (const float *)matB->data,
(float *)matC->data, k, n, m);
} else {
hipLaunchKernelGGL(( spmm_set_zero_kernel), dim3(n), dim3(m), 0, 0, (float *)matC->data, n * m);
hipLaunchKernelGGL(( spmm_T_kernel), dim3(blocks), dim3(threads), 0, 0,
(const int *)row_handle->data, (const int *)col_handle->data,
(const float *)data_handle->data, (const float *)matB->data,
(float *)matC->data, k, n, m);
}
return 0;
}
int CuSparse_DLGpuCsrmm(const DLArrayHandle data_handle,
const DLArrayHandle row_handle,
const DLArrayHandle col_handle, int nrow, int ncol,
bool transposeA, const DLArrayHandle matB,
bool transposeB, DLArrayHandle matC, int start_pos = -1,
int end_pos = -1, DLStreamHandle stream_handle = NULL) {
assert(!transposeB);
assert(data_handle->ndim == 1);
assert(row_handle->ndim == 1);
assert(col_handle->ndim == 1);
assert(matB->ndim == 2);
assert(matC->ndim == 2);
if (!transposeA) {
return CuSparse_DLGpuCsrmm0(data_handle, row_handle, col_handle, nrow,
ncol, matB, matC, start_pos, end_pos,
stream_handle);
} else {
return CuSparse_DLGpuCsrmm1(data_handle, row_handle, col_handle, nrow,
ncol, matB, matC, stream_handle);
}
} | f06158210c0bad73be8d62038f8ab2ea2143b770.cu | #include "gpu_runtime.h"
__global__ void spmm_kernel(const int *indptr, const int *indices,
const float *data, const float *B, float *C, int k,
int n, int m, int start_pos, int end_pos) {
// matC (n , m) matB (k , m) C = A * B
// data & indices (nnz) ,indptr(n)
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t ind = id / m;
size_t offset = id - ind * m;
if (ind >= n)
return;
C[m * ind + offset] = 0;
int i_s = indptr[ind], i_e = indptr[ind + 1];
if (start_pos == -1) {
for (int i = i_s; i < i_e; i++) {
int from = indices[i];
float scale = data[i];
C[m * ind + offset] += B[m * from + offset] * scale;
}
} else {
for (int i = i_s; i < i_e; i++) {
if (indices[i] >= start_pos && indices[i] < end_pos) {
int from = indices[i] - start_pos;
float scale = data[i];
C[m * ind + offset] += B[m * from + offset] * scale;
}
}
}
return;
}
__global__ void spmm_set_zero_kernel(float *output, size_t size) {
size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= size)
return;
output[ind] = 0;
}
__global__ void spmm_T_kernel(const int *indptr, const int *indices,
const float *data, const float *B, float *C,
int k, int n, int m) {
// matC (n , m) matB (k , m) C = A^T *B
// data & indices (nnz) ,indptr(k)
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t ind = id / m;
size_t offset = id - ind * m;
if (ind >= k)
return;
// C[m * ind + offset] = 0;
int i_s = indptr[ind], i_e = indptr[ind + 1];
float val = B[m * ind + offset];
for (int i = i_s; i < i_e; i++) {
int to = indices[i];
float addend = data[i] * val;
atomicAdd(&C[m * to + offset], addend);
}
return;
}
int CuSparse_DLGpuCsrmm0(const DLArrayHandle data_handle,
const DLArrayHandle row_handle,
const DLArrayHandle col_handle, int nrow, int ncol,
const DLArrayHandle matB, DLArrayHandle matC,
int start_pos, int end_pos,
DLStreamHandle stream_handle = NULL) {
int n = matC->shape[0];
int m = matC->shape[1];
int k = matB->shape[0];
dim3 blocks;
dim3 threads;
if (n * m <= 1024) {
threads.x = n * m;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (n * m + 1023) / 1024;
}
if (stream_handle) {
spmm_kernel<<<blocks, threads, 0,
*(cudaStream_t *)stream_handle->handle>>>(
(const int *)row_handle->data, (const int *)col_handle->data,
(const float *)data_handle->data, (const float *)matB->data,
(float *)matC->data, k, n, m, start_pos, end_pos);
} else {
spmm_kernel<<<blocks, threads>>>(
(const int *)row_handle->data, (const int *)col_handle->data,
(const float *)data_handle->data, (const float *)matB->data,
(float *)matC->data, k, n, m, start_pos, end_pos);
}
return 0;
}
int CuSparse_DLGpuCsrmm1(const DLArrayHandle data_handle,
const DLArrayHandle row_handle,
const DLArrayHandle col_handle, int nrow, int ncol,
const DLArrayHandle matB, DLArrayHandle matC,
DLStreamHandle stream_handle = NULL) {
int n = matC->shape[0];
int m = matC->shape[1];
int k = matB->shape[0];
dim3 blocks;
dim3 threads;
if (k * m <= 1024) {
threads.x = k * m;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (k * m + 1023) / 1024;
}
if (stream_handle) {
spmm_set_zero_kernel<<<n, m, 0,
*(cudaStream_t *)stream_handle->handle>>>(
(float *)matC->data, n * m);
spmm_T_kernel<<<blocks, threads, 0,
*(cudaStream_t *)stream_handle->handle>>>(
(const int *)row_handle->data, (const int *)col_handle->data,
(const float *)data_handle->data, (const float *)matB->data,
(float *)matC->data, k, n, m);
} else {
spmm_set_zero_kernel<<<n, m>>>((float *)matC->data, n * m);
spmm_T_kernel<<<blocks, threads>>>(
(const int *)row_handle->data, (const int *)col_handle->data,
(const float *)data_handle->data, (const float *)matB->data,
(float *)matC->data, k, n, m);
}
return 0;
}
int CuSparse_DLGpuCsrmm(const DLArrayHandle data_handle,
const DLArrayHandle row_handle,
const DLArrayHandle col_handle, int nrow, int ncol,
bool transposeA, const DLArrayHandle matB,
bool transposeB, DLArrayHandle matC, int start_pos = -1,
int end_pos = -1, DLStreamHandle stream_handle = NULL) {
assert(!transposeB);
assert(data_handle->ndim == 1);
assert(row_handle->ndim == 1);
assert(col_handle->ndim == 1);
assert(matB->ndim == 2);
assert(matC->ndim == 2);
if (!transposeA) {
return CuSparse_DLGpuCsrmm0(data_handle, row_handle, col_handle, nrow,
ncol, matB, matC, start_pos, end_pos,
stream_handle);
} else {
return CuSparse_DLGpuCsrmm1(data_handle, row_handle, col_handle, nrow,
ncol, matB, matC, stream_handle);
}
} |
48a0e417b5f7c05ad4dfd608828318f915e128fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "ImageHandler.h"
#include "ImageModel.h"
#include "helper.h"
#define BLOCK_SIZE 32 // ideal blocksize for preformance: 32 x 32 = 1024 => block core count
enum Filter
{
BoxBlur = 0,
Sharpen = 1
};
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char* file, int line, bool abort = true)
{
if (code != hipSuccess)
{
printf("GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
typedef unsigned char byte_t;
__global__ void convolution(float* pixelMap, float* filter, float* resultMap, int width, int height, int components, const int FILTER_SIZE) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
const int filterRadius = FILTER_SIZE / 2;
if (i >= width || j >= height)
return;
for (int z = 0; z < components; z++) {// iterate thru colors
float sum = 0.0;
for (int x = -filterRadius; x <= filterRadius; x++) // iterate thru filter rows
for (int y = -filterRadius; y <= filterRadius; y++) // iterate thru filter cols
sum += (i + x >= width || i + x < 0 || y + j >= height || y + j < 0)
? 0 // edge ignore solution
: filter[(x + 1) * FILTER_SIZE + (y + 1)] // filter x pixel[color]
* pixelMap[((i + x) * width + (j + y)) * components + z];
resultMap[(i * width + j) * components + z] = sum;
}
}
int main(char** argv, int argc) {
float* d_pixelMap, * d_resultMap, * h_resultMap, *** filters;
char** filter_names;
int* filter_sizes, filter_count;
int size;
//-----------------
readFilters("filters.txt",&filters,&filter_sizes,&filter_names, &filter_count);
int pick = showMenu(filter_names, filter_count);
const int FILTER_SIZE = filter_sizes[pick];
auto inputImage = importPPM("lena.ppm");
auto outputImage = Image_new(inputImage->width, inputImage->height, inputImage->channels);
size = inputImage->width * inputImage->height * inputImage->channels;
float* flatFilter = flatenFilter(filters[pick], FILTER_SIZE);
float* d_filter;
/*
Declare and allocate host and device memory. <
Initialize host data. <
Transfer data from the host to the device. <
Execute one or more kernels. <
Transfer results from the device to the host. <
*/
// malloc
gpuErrchk(hipMalloc((void**)&d_filter, sizeof(float) * FILTER_SIZE * FILTER_SIZE));
gpuErrchk(hipMalloc((void**)&d_pixelMap, sizeof(float) * size));
gpuErrchk(hipMalloc((void**)&d_resultMap, sizeof(float) * size));
//---cpy
gpuErrchk(hipMemcpy(d_pixelMap, inputImage->data, size * sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_filter, flatFilter, sizeof(float) * FILTER_SIZE * FILTER_SIZE, hipMemcpyHostToDevice));
//DO STUFF
dim3 numberOfBlocks(ceil(inputImage->width) / BLOCK_SIZE, ceil(inputImage->height / BLOCK_SIZE)); // this divides the image to 32x32+/- blocks
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); // set to 32 x 32 = 1024 This is the maximum thread count per block (best preformance)
auto start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( convolution) , dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, d_pixelMap, d_filter, d_resultMap, inputImage->width, inputImage->height, 3, FILTER_SIZE);
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
gpuErrchk(hipPeekAtLastError());
printf("Success! Took %I64d mqs\n", duration);
h_resultMap = (float*)malloc(sizeof(float) * inputImage->width * inputImage->height * inputImage->channels);
gpuErrchk(hipMemcpy(h_resultMap, d_resultMap, size * sizeof(float), hipMemcpyDeviceToHost));
outputImage->data = h_resultMap;
exportPPM("output.ppm", outputImage);
if (shouldRunSequential()) {
auto seq_outputImage = Image_new(inputImage->width, inputImage->height, inputImage->channels);
auto seq_start = std::chrono::high_resolution_clock::now();
seq_outputImage->data = sequencialConvolution(inputImage->data, flatFilter, inputImage->width, inputImage->height, inputImage->channels, filter_sizes[pick]);
auto seq_end = std::chrono::high_resolution_clock::now();
auto seq_duration = std::chrono::duration_cast<std::chrono::microseconds>(seq_end - seq_start).count();
printf("Success! CPU convolution took %I64d mqs\n", seq_duration);
printf("Speed up of %d times!\n", seq_duration / duration);
exportPPM("seq.ppm", seq_outputImage);
Image_delete(seq_outputImage);
flushStdinSafe();
getchar();
}
char* ext = ".bmp";
char output[32];
char base[64] = "magick output.ppm ";
strcpy(output, filter_names[pick]);
strcat(output, ext);
system(strcat(base, output));
system(output);
//clean up
free(flatFilter);
hipFree(d_filter);
hipFree(d_resultMap);
hipFree(d_pixelMap);
Image_delete(inputImage);
Image_delete(outputImage);
return 0;
} | 48a0e417b5f7c05ad4dfd608828318f915e128fa.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "ImageHandler.h"
#include "ImageModel.h"
#include "helper.h"
#define BLOCK_SIZE 32 // ideal blocksize for preformance: 32 x 32 = 1024 => block core count
enum Filter
{
BoxBlur = 0,
Sharpen = 1
};
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
printf("GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
typedef unsigned char byte_t;
__global__ void convolution(float* pixelMap, float* filter, float* resultMap, int width, int height, int components, const int FILTER_SIZE) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
const int filterRadius = FILTER_SIZE / 2;
if (i >= width || j >= height)
return;
for (int z = 0; z < components; z++) {// iterate thru colors
float sum = 0.0;
for (int x = -filterRadius; x <= filterRadius; x++) // iterate thru filter rows
for (int y = -filterRadius; y <= filterRadius; y++) // iterate thru filter cols
sum += (i + x >= width || i + x < 0 || y + j >= height || y + j < 0)
? 0 // edge ignore solution
: filter[(x + 1) * FILTER_SIZE + (y + 1)] // filter x pixel[color]
* pixelMap[((i + x) * width + (j + y)) * components + z];
resultMap[(i * width + j) * components + z] = sum;
}
}
int main(char** argv, int argc) {
float* d_pixelMap, * d_resultMap, * h_resultMap, *** filters;
char** filter_names;
int* filter_sizes, filter_count;
int size;
//-----------------
readFilters("filters.txt",&filters,&filter_sizes,&filter_names, &filter_count);
int pick = showMenu(filter_names, filter_count);
const int FILTER_SIZE = filter_sizes[pick];
auto inputImage = importPPM("lena.ppm");
auto outputImage = Image_new(inputImage->width, inputImage->height, inputImage->channels);
size = inputImage->width * inputImage->height * inputImage->channels;
float* flatFilter = flatenFilter(filters[pick], FILTER_SIZE);
float* d_filter;
/*
Declare and allocate host and device memory. <
Initialize host data. <
Transfer data from the host to the device. <
Execute one or more kernels. <
Transfer results from the device to the host. <
*/
// malloc
gpuErrchk(cudaMalloc((void**)&d_filter, sizeof(float) * FILTER_SIZE * FILTER_SIZE));
gpuErrchk(cudaMalloc((void**)&d_pixelMap, sizeof(float) * size));
gpuErrchk(cudaMalloc((void**)&d_resultMap, sizeof(float) * size));
//---cpy
gpuErrchk(cudaMemcpy(d_pixelMap, inputImage->data, size * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_filter, flatFilter, sizeof(float) * FILTER_SIZE * FILTER_SIZE, cudaMemcpyHostToDevice));
//DO STUFF
dim3 numberOfBlocks(ceil(inputImage->width) / BLOCK_SIZE, ceil(inputImage->height / BLOCK_SIZE)); // this divides the image to 32x32+/- blocks
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); // set to 32 x 32 = 1024 This is the maximum thread count per block (best preformance)
auto start = std::chrono::high_resolution_clock::now();
convolution <<<numberOfBlocks, threadsPerBlock>>> (d_pixelMap, d_filter, d_resultMap, inputImage->width, inputImage->height, 3, FILTER_SIZE);
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
gpuErrchk(cudaPeekAtLastError());
printf("Success! Took %I64d mqs\n", duration);
h_resultMap = (float*)malloc(sizeof(float) * inputImage->width * inputImage->height * inputImage->channels);
gpuErrchk(cudaMemcpy(h_resultMap, d_resultMap, size * sizeof(float), cudaMemcpyDeviceToHost));
outputImage->data = h_resultMap;
exportPPM("output.ppm", outputImage);
if (shouldRunSequential()) {
auto seq_outputImage = Image_new(inputImage->width, inputImage->height, inputImage->channels);
auto seq_start = std::chrono::high_resolution_clock::now();
seq_outputImage->data = sequencialConvolution(inputImage->data, flatFilter, inputImage->width, inputImage->height, inputImage->channels, filter_sizes[pick]);
auto seq_end = std::chrono::high_resolution_clock::now();
auto seq_duration = std::chrono::duration_cast<std::chrono::microseconds>(seq_end - seq_start).count();
printf("Success! CPU convolution took %I64d mqs\n", seq_duration);
printf("Speed up of %d times!\n", seq_duration / duration);
exportPPM("seq.ppm", seq_outputImage);
Image_delete(seq_outputImage);
flushStdinSafe();
getchar();
}
char* ext = ".bmp";
char output[32];
char base[64] = "magick output.ppm ";
strcpy(output, filter_names[pick]);
strcat(output, ext);
system(strcat(base, output));
system(output);
//clean up
free(flatFilter);
cudaFree(d_filter);
cudaFree(d_resultMap);
cudaFree(d_pixelMap);
Image_delete(inputImage);
Image_delete(outputImage);
return 0;
} |
296d5ea02dc3b64a79d0a65e81edf6c7f2a997bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file exercise2.cu
* @author Alessandro Capotondi
* @date 5 May 2020
* @brief Exercise 3 - CUDA MATMUL Optimized
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
static inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
extern "C"
{
#include "utils.h"
}
#define TWO02 (1 << 2)
#define TWO04 (1 << 4)
#define TWO08 (1 << 8)
#ifndef N
#define N (1 << 10)
#endif
#ifndef TILE_W
#define TILE_W 128
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 32
#endif
void gemm(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
{
#pragma omp parallel for collapse(2)
for (int i = 0; i < n; ++i)
{
for (int j = 0; j < n; ++j)
{
float sum = 0.0;
for (int k = 0; k < n; ++k)
{
sum += a[i * n + k] * b[k * n + j];
}
c[i * n + j] = sum;
}
}
}
__global__ void gemm_v1(float * __restrict__ a, float * __restrict__ b, float * __restrict__ c, int n)
{
int row = threadIdx.x + blockIdx.x * blockDim.x;
int col = threadIdx.y + blockIdx.y * blockDim.y;
float sum = 0.0;
for (int k = 0; k < n; ++k)
{
sum += a[row * n + k] * b[k * n + col];
}
c[row * n + col] = sum;
}
__device__ int get_offset(int idx_i, int idx_j, int n)
{
return idx_i * n * BLOCK_SIZE + idx_j * BLOCK_SIZE;
}
__global__ void gemm_v2(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
{
//TODO Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//TODO Block row and column
int ib = blockIdx.y;
int jb = blockIdx.x;
//TODO Thread row and column within Csub
int it = threadIdx.y;
int jt = threadIdx.x;
int a_offset, b_offset, c_offset;
//TODO Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0.0f;
//TODO Loop over all the sub-matrices of A and B that are
// required to compute Csub.
// Multiply each pair of sub-matrices together
// and accumulate the results.
for (int kb = 0; kb < (n / BLOCK_SIZE); ++kb)
{
//TODO Get the starting address (a_offset) of Asub
// (sub-matrix of A of dimension BLOCK_SIZE x BLOCK_SIZE)
// Asub is located i_block sub-matrices to the right and
// k_block sub-matrices down from the upper-left corner of A
a_offset = get_offset(ib, kb, n);
//TODO Get the starting address (b_offset) of Bsub
b_offset = get_offset(kb, jb, n);
//TODO Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[it][jt] = a[a_offset + it * n + jt];
Bs[it][jt] = b[b_offset + it * n + jt];
//TODO Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
//TODO Multiply As and Bs together
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Cvalue += As[it][k] * Bs[k][jt];
}
//TODO Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
c_offset = get_offset(ib, jb, n);
//TODO Each thread block computes one sub-matrix Csub of C
c[c_offset + it * n + jt] = Cvalue;
}
__global__ void gemm_v3(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
{
//TODO Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//TODO Block row and column
int ib = blockIdx.y;
int jb = blockIdx.x;
//TODO Thread row and column within Csub
int it = threadIdx.y;
int jt = threadIdx.x;
int a_offset, b_offset, c_offset;
//TODO Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0.0f;
//TODO Loop over all the sub-matrices of A and B that are
// required to compute Csub.
// Multiply each pair of sub-matrices together
// and accumulate the results.
for (int kb = 0; kb < (n / BLOCK_SIZE); ++kb)
{
//TODO Get the starting address (a_offset) of Asub
// (sub-matrix of A of dimension BLOCK_SIZE x BLOCK_SIZE)
// Asub is located i_block sub-matrices to the right and
// k_block sub-matrices down from the upper-left corner of A
a_offset = get_offset(ib, kb, n);
//TODO Get the starting address (b_offset) of Bsub
b_offset = get_offset(ib, kb, n);
//TODO Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[it][jt] = a[a_offset + it * n + jt];
Bs[it][jt] = b[b_offset + it * n + jt];
//TODO Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
//TODO Multiply As and Bs together
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Cvalue += As[it][k] * Bs[k][jt];
}
//TODO Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
c_offset = get_offset(ib, jb, n);
//TODO Each thread block computes one sub-matrix Csub of C
c[c_offset + it * n + jt] = Cvalue;
}
int main(int argc, char *argv[])
{
int n = N, iret = 0;
float *a, *b, *c, *g;
struct timespec rt[2];
double wt; // walltime
if (argc > 1)
n = atoi(argv[1]);
if (NULL == (a = (float *)malloc(sizeof(*a) * n * n)))
{
printf("error: memory allocation for 'x'\n");
iret = -1;
}
if (NULL == (b = (float *)malloc(sizeof(*b) * n * n)))
{
printf("error: memory allocation for 'y'\n");
iret = -1;
}
if (NULL == (c = (float *)malloc(sizeof(*c) * n * n)))
{
printf("error: memory allocation for 'z'\n");
iret = -1;
}
if (NULL == (g = (float *)malloc(sizeof(*g) * n * n)))
{
printf("error: memory allocation for 'z'\n");
iret = -1;
}
if (0 != iret)
{
free(a);
free(b);
free(c);
free(g);
exit(EXIT_FAILURE);
}
//Init Data
int _b = rand() % TWO04;
int _c = rand() % TWO08;
#pragma omp parallel for
for (int i = 0; i < n * n; i++)
{
a[i] = _b / (float)TWO02;
b[i] = _c / (float)TWO04;
c[i] = g[i] = 0.0;
}
clock_gettime(CLOCK_REALTIME, rt + 0);
gemm(a, b, g, n);
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("GEMM (Host) : %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
//CUDA Buffer Allocation
float *d_a, *d_b, *d_c;
gpuErrchk(hipMalloc((void **)&d_a, sizeof(float) * n * n));
gpuErrchk(hipMalloc((void **)&d_b, sizeof(float) * n * n));
gpuErrchk(hipMalloc((void **)&d_c, sizeof(float) * n * n));
clock_gettime(CLOCK_REALTIME, rt + 0);
gpuErrchk(hipMemcpy(d_a, a, sizeof(float) * n * n, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_b, b, sizeof(float) * n * n, hipMemcpyHostToDevice));
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((n + (BLOCK_SIZE)-1) / (BLOCK_SIZE), (n + (BLOCK_SIZE)-1) / (BLOCK_SIZE));
hipLaunchKernelGGL(( gemm_v1), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, n);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipMemcpy(c, d_c, sizeof(float) * n * n, hipMemcpyDeviceToHost));
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("GEMM-v1 (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
for (int i = 0; i < n * n; i++)
{
iret = *(int *)(g + i) ^ *(int *)(c + i);
assert(iret == 0);
}
clock_gettime(CLOCK_REALTIME, rt + 0);
gpuErrchk(hipMemcpy(d_a, a, sizeof(float) * n * n, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_b, b, sizeof(float) * n * n, hipMemcpyHostToDevice));
//dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//dim3 dimGrid((n + (BLOCK_SIZE)-1) / (BLOCK_SIZE), (n + (BLOCK_SIZE)-1) / (BLOCK_SIZE));
hipLaunchKernelGGL(( gemm_v2), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, n);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipMemcpy(c, d_c, sizeof(float) * n * n, hipMemcpyDeviceToHost));
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("GEMM-v2 (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
for (int i = 0; i < n * n; i++)
{
iret = *(int *)(g + i) ^ *(int *)(c + i);
assert(iret == 0);
}
clock_gettime(CLOCK_REALTIME, rt + 0);
gpuErrchk(hipMemcpy(d_a, a, sizeof(float) * n * n, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_b, b, sizeof(float) * n * n, hipMemcpyHostToDevice));
//dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//dim3 dimGrid((n + (BLOCK_SIZE)-1) / (BLOCK_SIZE), (n + (BLOCK_SIZE)-1) / (BLOCK_SIZE));
hipLaunchKernelGGL(( gemm_v3), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, n);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipMemcpy(c, d_c, sizeof(float) * n * n, hipMemcpyDeviceToHost));
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("GEMM-v3 (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
for (int i = 0; i < n * n; i++)
{
iret = *(int *)(g + i) ^ *(int *)(c + i);
assert(iret == 0);
}
free(a);
free(b);
free(c);
free(g);
gpuErrchk(hipFree(d_a));
gpuErrchk(hipFree(d_b));
gpuErrchk(hipFree(d_c));
return 0;
}
| 296d5ea02dc3b64a79d0a65e81edf6c7f2a997bc.cu | /*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file exercise2.cu
* @author Alessandro Capotondi
* @date 5 May 2020
* @brief Exercise 3 - CUDA MATMUL Optimized
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
extern "C"
{
#include "utils.h"
}
#define TWO02 (1 << 2)
#define TWO04 (1 << 4)
#define TWO08 (1 << 8)
#ifndef N
#define N (1 << 10)
#endif
#ifndef TILE_W
#define TILE_W 128
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 32
#endif
void gemm(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
{
#pragma omp parallel for collapse(2)
for (int i = 0; i < n; ++i)
{
for (int j = 0; j < n; ++j)
{
float sum = 0.0;
for (int k = 0; k < n; ++k)
{
sum += a[i * n + k] * b[k * n + j];
}
c[i * n + j] = sum;
}
}
}
__global__ void gemm_v1(float * __restrict__ a, float * __restrict__ b, float * __restrict__ c, int n)
{
int row = threadIdx.x + blockIdx.x * blockDim.x;
int col = threadIdx.y + blockIdx.y * blockDim.y;
float sum = 0.0;
for (int k = 0; k < n; ++k)
{
sum += a[row * n + k] * b[k * n + col];
}
c[row * n + col] = sum;
}
__device__ int get_offset(int idx_i, int idx_j, int n)
{
return idx_i * n * BLOCK_SIZE + idx_j * BLOCK_SIZE;
}
__global__ void gemm_v2(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
{
//TODO Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//TODO Block row and column
int ib = blockIdx.y;
int jb = blockIdx.x;
//TODO Thread row and column within Csub
int it = threadIdx.y;
int jt = threadIdx.x;
int a_offset, b_offset, c_offset;
//TODO Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0.0f;
//TODO Loop over all the sub-matrices of A and B that are
// required to compute Csub.
// Multiply each pair of sub-matrices together
// and accumulate the results.
for (int kb = 0; kb < (n / BLOCK_SIZE); ++kb)
{
//TODO Get the starting address (a_offset) of Asub
// (sub-matrix of A of dimension BLOCK_SIZE x BLOCK_SIZE)
// Asub is located i_block sub-matrices to the right and
// k_block sub-matrices down from the upper-left corner of A
a_offset = get_offset(ib, kb, n);
//TODO Get the starting address (b_offset) of Bsub
b_offset = get_offset(kb, jb, n);
//TODO Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[it][jt] = a[a_offset + it * n + jt];
Bs[it][jt] = b[b_offset + it * n + jt];
//TODO Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
//TODO Multiply As and Bs together
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Cvalue += As[it][k] * Bs[k][jt];
}
//TODO Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
c_offset = get_offset(ib, jb, n);
//TODO Each thread block computes one sub-matrix Csub of C
c[c_offset + it * n + jt] = Cvalue;
}
__global__ void gemm_v3(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
{
//TODO Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//TODO Block row and column
int ib = blockIdx.y;
int jb = blockIdx.x;
//TODO Thread row and column within Csub
int it = threadIdx.y;
int jt = threadIdx.x;
int a_offset, b_offset, c_offset;
//TODO Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0.0f;
//TODO Loop over all the sub-matrices of A and B that are
// required to compute Csub.
// Multiply each pair of sub-matrices together
// and accumulate the results.
for (int kb = 0; kb < (n / BLOCK_SIZE); ++kb)
{
//TODO Get the starting address (a_offset) of Asub
// (sub-matrix of A of dimension BLOCK_SIZE x BLOCK_SIZE)
// Asub is located i_block sub-matrices to the right and
// k_block sub-matrices down from the upper-left corner of A
a_offset = get_offset(ib, kb, n);
//TODO Get the starting address (b_offset) of Bsub
b_offset = get_offset(ib, kb, n);
//TODO Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[it][jt] = a[a_offset + it * n + jt];
Bs[it][jt] = b[b_offset + it * n + jt];
//TODO Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
//TODO Multiply As and Bs together
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Cvalue += As[it][k] * Bs[k][jt];
}
//TODO Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
c_offset = get_offset(ib, jb, n);
//TODO Each thread block computes one sub-matrix Csub of C
c[c_offset + it * n + jt] = Cvalue;
}
int main(int argc, char *argv[])
{
int n = N, iret = 0;
float *a, *b, *c, *g;
struct timespec rt[2];
double wt; // walltime
if (argc > 1)
n = atoi(argv[1]);
if (NULL == (a = (float *)malloc(sizeof(*a) * n * n)))
{
printf("error: memory allocation for 'x'\n");
iret = -1;
}
if (NULL == (b = (float *)malloc(sizeof(*b) * n * n)))
{
printf("error: memory allocation for 'y'\n");
iret = -1;
}
if (NULL == (c = (float *)malloc(sizeof(*c) * n * n)))
{
printf("error: memory allocation for 'z'\n");
iret = -1;
}
if (NULL == (g = (float *)malloc(sizeof(*g) * n * n)))
{
printf("error: memory allocation for 'z'\n");
iret = -1;
}
if (0 != iret)
{
free(a);
free(b);
free(c);
free(g);
exit(EXIT_FAILURE);
}
//Init Data
int _b = rand() % TWO04;
int _c = rand() % TWO08;
#pragma omp parallel for
for (int i = 0; i < n * n; i++)
{
a[i] = _b / (float)TWO02;
b[i] = _c / (float)TWO04;
c[i] = g[i] = 0.0;
}
clock_gettime(CLOCK_REALTIME, rt + 0);
gemm(a, b, g, n);
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("GEMM (Host) : %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
//CUDA Buffer Allocation
float *d_a, *d_b, *d_c;
gpuErrchk(cudaMalloc((void **)&d_a, sizeof(float) * n * n));
gpuErrchk(cudaMalloc((void **)&d_b, sizeof(float) * n * n));
gpuErrchk(cudaMalloc((void **)&d_c, sizeof(float) * n * n));
clock_gettime(CLOCK_REALTIME, rt + 0);
gpuErrchk(cudaMemcpy(d_a, a, sizeof(float) * n * n, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_b, b, sizeof(float) * n * n, cudaMemcpyHostToDevice));
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((n + (BLOCK_SIZE)-1) / (BLOCK_SIZE), (n + (BLOCK_SIZE)-1) / (BLOCK_SIZE));
gemm_v1<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaMemcpy(c, d_c, sizeof(float) * n * n, cudaMemcpyDeviceToHost));
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("GEMM-v1 (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
for (int i = 0; i < n * n; i++)
{
iret = *(int *)(g + i) ^ *(int *)(c + i);
assert(iret == 0);
}
clock_gettime(CLOCK_REALTIME, rt + 0);
gpuErrchk(cudaMemcpy(d_a, a, sizeof(float) * n * n, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_b, b, sizeof(float) * n * n, cudaMemcpyHostToDevice));
//dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//dim3 dimGrid((n + (BLOCK_SIZE)-1) / (BLOCK_SIZE), (n + (BLOCK_SIZE)-1) / (BLOCK_SIZE));
gemm_v2<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaMemcpy(c, d_c, sizeof(float) * n * n, cudaMemcpyDeviceToHost));
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("GEMM-v2 (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
for (int i = 0; i < n * n; i++)
{
iret = *(int *)(g + i) ^ *(int *)(c + i);
assert(iret == 0);
}
clock_gettime(CLOCK_REALTIME, rt + 0);
gpuErrchk(cudaMemcpy(d_a, a, sizeof(float) * n * n, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_b, b, sizeof(float) * n * n, cudaMemcpyHostToDevice));
//dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//dim3 dimGrid((n + (BLOCK_SIZE)-1) / (BLOCK_SIZE), (n + (BLOCK_SIZE)-1) / (BLOCK_SIZE));
gemm_v3<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaMemcpy(c, d_c, sizeof(float) * n * n, cudaMemcpyDeviceToHost));
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("GEMM-v3 (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
for (int i = 0; i < n * n; i++)
{
iret = *(int *)(g + i) ^ *(int *)(c + i);
assert(iret == 0);
}
free(a);
free(b);
free(c);
free(g);
gpuErrchk(cudaFree(d_a));
gpuErrchk(cudaFree(d_b));
gpuErrchk(cudaFree(d_c));
return 0;
}
|
a299936687362bdf1500958c6f313ff87339376c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define __out
__device__ __constant__ float EPS2;
__device__ __constant__ float DT_TICK;
struct ds64
{
union
{
float2 val;
double dbl;
};
__device__ ds64() {}
__device__ ds64(float x) : val(make_float2(x, x)) {}
__device__ ds64 operator+=(const float x)
{
const float vx = val.x + x;
const float vy = val.y - ((vx - val.x) - x);
val = make_float2(vx, vy);
return *this;
}
__device__ double to_double() const { return (double)val.x + (double)val.y; }
};
template<class REAL>
struct cuvec3
{
REAL x, y, z;
__host__ __device__ cuvec3() {}
__host__ __device__ cuvec3(const REAL v) : x(v), y(v), z(v) {}
__host__ __device__ cuvec3(const REAL _x, const REAL _y, const REAL _z) : x(_x), y(_y), z(_z) {}
__host__ __device__ cuvec3 operator=(const cuvec3<float> v) {x = v.x; y = v.y; z = v.z; return *this;};
__host__ __device__ cuvec3 operator=(const cuvec3<double > v) {x = v.x; y = v.y; z = v.z; return *this;};
__host__ __device__ REAL operator*(const cuvec3<REAL> v) const {return (x*v.x + y*v.y + z*v.z);}
__host__ __device__ cuvec3 operator*(const REAL v) const {return cuvec3(x*v, y*v, z*v);}
// __host__ __device__ cuvec3 operator+(const cuvec3<REAL> v) const {return cuvec3(x+v.x, y+v.y, z+v.z);}
__host__ __device__ cuvec3 operator-(const cuvec3<REAL> v) const {return cuvec3(x-v.x, y-v.y, z-v.z);}
__host__ __device__ cuvec3 operator%(const cuvec3<REAL> v) const {return cuvec3(x*v.y - y*v.x, y*v.z-z*v.y, z*v.x - x*v.z);}
__host__ __device__ cuvec3 operator-() const {return cuvec3(-x, -y, -z);}
__host__ __device__ cuvec3 operator+(const cuvec3<float> v) const {return cuvec3(x+v.x, y+v.y, z+v.z);}
__host__ __device__ cuvec3 operator+(const cuvec3<double > v) const {return cuvec3(x+v.x, y+v.y, z+v.z);}
__host__ __device__ cuvec3 operator += (const cuvec3<REAL> v)
{
*this = *this + v;
return *this;
}
__host__ __device__ cuvec3 operator -= (const cuvec3<REAL> v)
{
*this = *this - v;
return *this;
}
__host__ __device__ cuvec3 operator *= (const REAL s)
{
*this = *this * s;
return *this;
}
__host__ __device__ friend cuvec3 operator * (const REAL s ,const cuvec3<REAL> v)
{
return v*s;
}
__host__ __device__ REAL norm2() const {return (*this)*(*this);};
};
typedef cuvec3<double > dcuvec3;
typedef cuvec3<float> fcuvec3;
__device__ float sqr(const float x)
{
return x*x;
}
/****************************/
/****************************/
/****************************/
template<class T>
struct ADDOP
{
__device__ static inline T identity() {return (T)(0);}
__device__ static inline T apply(T a, T b) {return (T)(a + b);};
__device__ static inline T unapply(T a, T b) {return (T)(a - b);};
__device__ static inline T mask(bool flag, T b) {return (T)(-(int)(flag) & b);};
};
template<class OP, class T>
__device__ __forceinline__ T inclusive_scan_warp(volatile T *ptr, T mysum, const unsigned int idx )
{
const unsigned int lane = idx & 31;
if (lane >= 1) ptr[idx] = mysum = OP::apply(ptr[idx - 1], mysum);
if (lane >= 2) ptr[idx] = mysum = OP::apply(ptr[idx - 2], mysum);
if (lane >= 4) ptr[idx] = mysum = OP::apply(ptr[idx - 4], mysum);
if (lane >= 8) ptr[idx] = mysum = OP::apply(ptr[idx - 8], mysum);
if (lane >= 16) ptr[idx] = mysum = OP::apply(ptr[idx - 16], mysum);
return ptr[idx];
}
template<class OP, class T>
__device__ T inclusive_scan_block(volatile T *ptr, const unsigned int idx)
{
const unsigned int lane = idx & 31;
const unsigned int warpid = idx >> 5;
T mysum = ptr[idx];
__syncthreads();
// step 1: Intra-warp scan in each warp
T val = inclusive_scan_warp<OP, T>(ptr, mysum, idx);
__syncthreads();
// step 2: Collect per-warp particle results
if (lane == 31) ptr[warpid] = ptr[idx];
__syncthreads();
mysum = ptr[idx];
// step 3: Use 1st warp to scan per-warp results
if (warpid == 0) inclusive_scan_warp<OP, T>(ptr,mysum, idx);
__syncthreads();
// step 4: Accumulate results from Steps 1 and 3;
if (warpid > 0) val = OP::apply(ptr[warpid - 1], val);
__syncthreads();
// Step 5: Write and return the final result
ptr[idx] = val;
__syncthreads();
return val; //ptr[blockDim.x - 1];
}
template<class OP, class T>
__device__ T inclusive_scan_array(volatile T *ptr_global, const int N, const unsigned int idx)
{
T y = OP::identity();
volatile T *ptr = ptr_global;
for (int p = 0; p < N; p += blockDim.x)
{
ptr = &ptr_global[p];
inclusive_scan_block<OP, T>(ptr, idx);
ptr[idx] = OP::apply(ptr[idx], y);
__syncthreads();
y = ptr[blockDim.x - 1];
__syncthreads();
}
return y;
}
/****************************/
/****************************/
/****************************/
struct dev_particle
{
dcuvec3 pos; // 6
fcuvec3 vel; // 9
fcuvec3 acc; // 12
fcuvec3 jrk; // 15
float mass; // 16
float h2; // 17
unsigned int time; // 18
int id; // 19
int iPad; // 20
int iPadX[12];
__host__ __device__ dev_particle() {}
__host__ dev_particle(const regf4::Particle&);
};
#define PTCL_LEN (sizeof(dev_particle) / sizeof(float4))
struct dev_predictor
{
fcuvec3 pos; // 3
fcuvec3 vel; // 6
union
{
float mass; // 7
float dt;
};
float h2; // 8
};
#define PRED_LEN (sizeof(dev_predictor) / sizeof(float4))
struct dev_force
{
ds64 accx, accy, accz; // 6
fcuvec3 jrk; // 9
float h2; // 10
int nngb; // 11
int iPad; // 12
__device__ dev_force() : accx(0.0f), accy(0.0f), accz(0.0f), jrk(0.0f), nngb(0) {}
};
/********************************/
/********************************/
/********************************/
__global__ void dev_predict_ptcl(
const int ni,
const unsigned int tsys,
const dev_particle *ptcl_in,
__out dev_predictor *pred_out,
__out float *dt_out)
{
const int id = blockIdx.x*blockDim.x + threadIdx.x;
const int addr = id < ni ? id : ni-1;
const dev_particle ip = ptcl_in[addr];
dev_predictor ipred;
const float dt = DT_TICK*(tsys - ip.time);
const float dt2 = dt*(1.0f/2.0f);
const float dt3 = dt*(1.0f/3.0f);
ipred.pos = ip.pos + dt*(ip.vel + dt2*(ip.acc + dt3*ip.jrk));
ipred.vel = ip.vel + dt*(ip.acc + dt2* ip.jrk);
ipred.mass = ip.mass;
ipred.h2 = ip.h2;
if (id < ni)
{
pred_out[addr] = ipred;
dt_out [addr] = dt;
}
}
/********************************/
/********************************/
/********************************/
template<int NGB_PER_BLOCK>
__forceinline__ __device__ dev_force dev_regfij(
const unsigned int jidx,
const dev_predictor pi,
const dev_predictor pj,
__out dev_force fi,
__out unsigned int *ngb_list)
{
const fcuvec3 dr = pj.pos - pi.pos;
const fcuvec3 dv = pj.vel - pi.vel;
const float r2 = dr*dr;
const float r2p = fminf(r2, (dr + pi.dt*dv).norm2());
if (r2p < pi.h2)
{
if (pj.mass > 0.0f)
{
ngb_list[fi.nngb & (NGB_PER_BLOCK-1)] = jidx;
fi.nngb += (r2 > 0.0f);
}
}
else
{
const float rv = dr*dv;
const float rinv1 = rsqrt(r2 + EPS2);
const float rinv2 = rinv1*rinv1;
const float rinv3 = pj.mass*(rinv1*rinv2);
const float alpha = rv*rinv2;
const fcuvec3 Aij = rinv3*dr;
const fcuvec3 Jij = rinv3*dv - Aij*(3.0f*alpha);
fi.accx += Aij.x;
fi.accy += Aij.y;
fi.accz += Aij.z;
fi.jrk += Jij;
}
return fi;
}
/********************************/
template<int NTHREAD, int NJBLOCK, int NJBLOCK2, int NGB_PER_BLOCK>
__global__ void
#if 0
__launch_bounds__ (NTHREAD, 1)
#endif
dev_regf(
const int ni,
const int nj_per_block,
const int *active_list,
const dev_predictor *pred_in,
const float *dt_in,
__out dev_force *force_out,
__out unsigned int *ngb_out)
{
__shared__ dev_predictor jpshared[NTHREAD];
// compute iblock & jblock offset
const int iblock = blockIdx.x*NTHREAD;
const int jblock = blockIdx.y;
const int tid = threadIdx.x;
// read i-particle into registers
const int idx = iblock + tid;
const int addr = active_list[idx < ni ? idx : ni - 1];
dev_predictor ipred = pred_in[addr];
ipred.dt = dt_in[addr];
// initialize i-particle's force
dev_force iforce;
// obtain beginning & end of j particles for this block
const int jbeg = jblock*nj_per_block;
const int jend = jbeg + nj_per_block;
unsigned int *ingb_ptr = ngb_out + NGB_PER_BLOCK*(jblock + NJBLOCK*idx);
for (int j = jbeg; j < jend; j += NTHREAD)
{
#if 0
jpshared[tid] = pred_in[j + tid];
#else
float4 *src = (float4*)&pred_in[j];
float4 *dst = (float4*)jpshared;
#pragma unroll
for (int it = 0; it < PRED_LEN; it++)
{
dst[tid] = src[tid];
dst += NTHREAD;
src += NTHREAD;
}
#endif
__syncthreads();
if (idx < ni)
{
#pragma unroll 8
for (int jj = 0; jj < NTHREAD; jj++)
iforce = dev_regfij<NGB_PER_BLOCK>(j+jj, ipred, jpshared[jj], iforce, ingb_ptr);
}
__syncthreads();
}
if (idx < ni)
{
iforce.h2 = ipred.h2;
force_out[jblock + idx*NJBLOCK2] = iforce;
}
}
/********************************/
/********************************/
/********************************/
template<class OP, class T, int NTHREAD>
__device__ T reduce_block(volatile T *ptr, T mySum, const unsigned int tid)
{
ptr[tid] = mySum;
__syncthreads();
if (NTHREAD >= 512) { if (tid < 256) { ptr[tid] = mySum = OP::apply(mySum, ptr[tid+256]); } __syncthreads(); }
if (NTHREAD >= 256) { if (tid < 128) { ptr[tid] = mySum = OP::apply(mySum, ptr[tid+128]); } __syncthreads(); }
if (NTHREAD >= 128) { if (tid < 64) { ptr[tid] = mySum = OP::apply(mySum, ptr[tid+ 64]); } __syncthreads(); }
if (tid < 32)
{
if (NTHREAD >= 64) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+32]);
if (NTHREAD >= 32) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+16]);
if (NTHREAD >= 16) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+ 8]);
if (NTHREAD >= 8) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+ 4]);
if (NTHREAD >= 4) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+ 2]);
if (NTHREAD >= 2) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+ 1]);
}
__syncthreads();
return ptr[0];
}
// here each particle is assigned to a single block...
// for 60 active blocks in dev_regf, and 64 threads the max efficiency is 60/64...
template<int NTHREAD, int NJBLOCK, int NJBLOCK2>
__global__ void dev_reduce_regf(
const dev_force *force_in,
__out int2 *ngb_offset,
__out dev_force *force_out)
{
// we use parallel prefix sum to obtain reduce forces
const int idx = blockIdx.x; // body id
const int tid = threadIdx.x; // block id
__shared__ float shdata[2*NTHREAD];
double *shdbl = (double*)shdata;
dev_force iforce;
if (tid < NJBLOCK)
iforce = force_in[tid + idx*NJBLOCK2];
iforce.accx.dbl = reduce_block<ADDOP<double>, double, NTHREAD>(shdbl, iforce.accx.to_double(), tid);
iforce.accy.dbl = reduce_block<ADDOP<double>, double, NTHREAD>(shdbl, iforce.accy.to_double(), tid);
iforce.accz.dbl = reduce_block<ADDOP<double>, double, NTHREAD>(shdbl, iforce.accz.to_double(), tid);
iforce.jrk.x = reduce_block<ADDOP<float>, float, NTHREAD>(shdata, iforce.jrk.x, tid);
iforce.jrk.y = reduce_block<ADDOP<float>, float, NTHREAD>(shdata, iforce.jrk.y, tid);
iforce.jrk.z = reduce_block<ADDOP<float>, float, NTHREAD>(shdata, iforce.jrk.z, tid);
int *shint = (int*)shdata;
shint[tid] = iforce.nngb;
inclusive_scan_block<ADDOP<int>, int>(shint, tid);
const int nngb = shint[NTHREAD-1];
/* #ngb in a block, memory offset */
#if 0
if (idx == 0)
{
for (int t = 0; t < NTHREAD; t++)
{
__syncthreads();
if (t == tid)
printf(" nnbb= %d offset= %d addr= %d tid= %d NJBLOCK= %d\n",
iforce.nngb, shint[tid] - iforce.nngb,
idx + tid*NJBLOCK, tid, NJBLOCK);
}
}
#endif
if (tid < NJBLOCK)
ngb_offset[tid + idx*NJBLOCK] = (int2){iforce.nngb, shint[tid] - iforce.nngb};
if (tid == 0)
{
iforce.nngb = nngb;
force_out[idx] = iforce;
}
}
/********************************/
template<int NTHREAD, int NJBLOCK, int NGB_PER_BLOCK, int NGB_MAX>
__global__ void dev_reduce_ngb(
const int2 *ngb_offset,
const unsigned int *ngb_in,
__out unsigned int *ngb_out
)
{
const int idx = blockIdx.x; // body id
const int tid = threadIdx.x;
for (int i = 0; i < NJBLOCK; i++)
{
const int2 ingb = ngb_offset[i + idx*NJBLOCK];
const int nngb = ingb.x;
const int offset = ingb.y;
if (tid < nngb)
{
#if 0
if (idx == 0)
{
for (int t = 0; t < NTHREAD; t++)
{
__syncthreads();
if (tid == t)
printf("block= %d tid= %d: addr= %d offset= %d nngb= %d newx= %d\n", i, tid, idx*NGB_MAX+offset+tid, offset, nngb ,offset+nngb);
}
}
#endif
const int offset_tot = min(offset+tid, NGB_MAX-1);
ngb_out[idx*NGB_MAX+offset_tot] = ngb_in[NGB_PER_BLOCK*(i + NJBLOCK*idx)+tid];
}
}
}
/********************************/
__global__ void dev_move_particles(
const int nj,
const int *addr_in,
const dev_particle *ptcl_in,
__out dev_particle *ptcl_out)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= nj) return;
const int addr = addr_in[idx];
ptcl_out[addr] = ptcl_in[idx];
}
/********************************/
struct gpot_struct
{
dcuvec3 pos;
float mass;
};
template<int BLOCKSIZE>
__global__ void dev_compute_potential(
const int ni,
const int nj,
const dev_particle *ptcl_in,
__out float *gpot_out)
{
const int idx = blockDim.x*blockIdx.x + threadIdx.x;
const int addr = idx < ni ? idx : ni - 1;
const int tid = threadIdx.x;
__shared__ gpot_struct shmem[BLOCKSIZE];
ds64 gpot(0.0f);
const dcuvec3 ipos = ptcl_in[addr].pos;
for (int j = 0; j < nj; j += BLOCKSIZE)
{
dev_particle pj = ptcl_in[j+tid];
shmem[tid].pos = pj.pos;
shmem[tid].mass = pj.mass;
__syncthreads();
#pragma unroll
for (int jj = 0; jj < BLOCKSIZE; jj++)
{
const dcuvec3 jpos = shmem[jj].pos;
const float jmass = shmem[jj].mass;
const fcuvec3 dr = fcuvec3(jpos.x - ipos.x, jpos.y - ipos.y, jpos.z - ipos.z);
const float r2 = dr*dr;
const float rinv = (r2 > 0.0f) ? rsqrt(r2 + EPS2) : 0.0f;
gpot += jmass * rinv;
}
__syncthreads();
}
if (idx < ni)
gpot_out[idx] = -gpot.to_double();
}
| a299936687362bdf1500958c6f313ff87339376c.cu | #define __out
__device__ __constant__ float EPS2;
__device__ __constant__ float DT_TICK;
struct ds64
{
union
{
float2 val;
double dbl;
};
__device__ ds64() {}
__device__ ds64(float x) : val(make_float2(x, x)) {}
__device__ ds64 operator+=(const float x)
{
const float vx = val.x + x;
const float vy = val.y - ((vx - val.x) - x);
val = make_float2(vx, vy);
return *this;
}
__device__ double to_double() const { return (double)val.x + (double)val.y; }
};
template<class REAL>
struct cuvec3
{
REAL x, y, z;
__host__ __device__ cuvec3() {}
__host__ __device__ cuvec3(const REAL v) : x(v), y(v), z(v) {}
__host__ __device__ cuvec3(const REAL _x, const REAL _y, const REAL _z) : x(_x), y(_y), z(_z) {}
__host__ __device__ cuvec3 operator=(const cuvec3<float> v) {x = v.x; y = v.y; z = v.z; return *this;};
__host__ __device__ cuvec3 operator=(const cuvec3<double > v) {x = v.x; y = v.y; z = v.z; return *this;};
__host__ __device__ REAL operator*(const cuvec3<REAL> v) const {return (x*v.x + y*v.y + z*v.z);}
__host__ __device__ cuvec3 operator*(const REAL v) const {return cuvec3(x*v, y*v, z*v);}
// __host__ __device__ cuvec3 operator+(const cuvec3<REAL> v) const {return cuvec3(x+v.x, y+v.y, z+v.z);}
__host__ __device__ cuvec3 operator-(const cuvec3<REAL> v) const {return cuvec3(x-v.x, y-v.y, z-v.z);}
__host__ __device__ cuvec3 operator%(const cuvec3<REAL> v) const {return cuvec3(x*v.y - y*v.x, y*v.z-z*v.y, z*v.x - x*v.z);}
__host__ __device__ cuvec3 operator-() const {return cuvec3(-x, -y, -z);}
__host__ __device__ cuvec3 operator+(const cuvec3<float> v) const {return cuvec3(x+v.x, y+v.y, z+v.z);}
__host__ __device__ cuvec3 operator+(const cuvec3<double > v) const {return cuvec3(x+v.x, y+v.y, z+v.z);}
__host__ __device__ cuvec3 operator += (const cuvec3<REAL> v)
{
*this = *this + v;
return *this;
}
__host__ __device__ cuvec3 operator -= (const cuvec3<REAL> v)
{
*this = *this - v;
return *this;
}
__host__ __device__ cuvec3 operator *= (const REAL s)
{
*this = *this * s;
return *this;
}
__host__ __device__ friend cuvec3 operator * (const REAL s ,const cuvec3<REAL> v)
{
return v*s;
}
__host__ __device__ REAL norm2() const {return (*this)*(*this);};
};
typedef cuvec3<double > dcuvec3;
typedef cuvec3<float> fcuvec3;
__device__ float sqr(const float x)
{
return x*x;
}
/****************************/
/****************************/
/****************************/
template<class T>
struct ADDOP
{
__device__ static inline T identity() {return (T)(0);}
__device__ static inline T apply(T a, T b) {return (T)(a + b);};
__device__ static inline T unapply(T a, T b) {return (T)(a - b);};
__device__ static inline T mask(bool flag, T b) {return (T)(-(int)(flag) & b);};
};
template<class OP, class T>
__device__ __forceinline__ T inclusive_scan_warp(volatile T *ptr, T mysum, const unsigned int idx )
{
const unsigned int lane = idx & 31;
if (lane >= 1) ptr[idx] = mysum = OP::apply(ptr[idx - 1], mysum);
if (lane >= 2) ptr[idx] = mysum = OP::apply(ptr[idx - 2], mysum);
if (lane >= 4) ptr[idx] = mysum = OP::apply(ptr[idx - 4], mysum);
if (lane >= 8) ptr[idx] = mysum = OP::apply(ptr[idx - 8], mysum);
if (lane >= 16) ptr[idx] = mysum = OP::apply(ptr[idx - 16], mysum);
return ptr[idx];
}
template<class OP, class T>
__device__ T inclusive_scan_block(volatile T *ptr, const unsigned int idx)
{
const unsigned int lane = idx & 31;
const unsigned int warpid = idx >> 5;
T mysum = ptr[idx];
__syncthreads();
// step 1: Intra-warp scan in each warp
T val = inclusive_scan_warp<OP, T>(ptr, mysum, idx);
__syncthreads();
// step 2: Collect per-warp particle results
if (lane == 31) ptr[warpid] = ptr[idx];
__syncthreads();
mysum = ptr[idx];
// step 3: Use 1st warp to scan per-warp results
if (warpid == 0) inclusive_scan_warp<OP, T>(ptr,mysum, idx);
__syncthreads();
// step 4: Accumulate results from Steps 1 and 3;
if (warpid > 0) val = OP::apply(ptr[warpid - 1], val);
__syncthreads();
// Step 5: Write and return the final result
ptr[idx] = val;
__syncthreads();
return val; //ptr[blockDim.x - 1];
}
template<class OP, class T>
__device__ T inclusive_scan_array(volatile T *ptr_global, const int N, const unsigned int idx)
{
T y = OP::identity();
volatile T *ptr = ptr_global;
for (int p = 0; p < N; p += blockDim.x)
{
ptr = &ptr_global[p];
inclusive_scan_block<OP, T>(ptr, idx);
ptr[idx] = OP::apply(ptr[idx], y);
__syncthreads();
y = ptr[blockDim.x - 1];
__syncthreads();
}
return y;
}
/****************************/
/****************************/
/****************************/
struct dev_particle
{
dcuvec3 pos; // 6
fcuvec3 vel; // 9
fcuvec3 acc; // 12
fcuvec3 jrk; // 15
float mass; // 16
float h2; // 17
unsigned int time; // 18
int id; // 19
int iPad; // 20
int iPadX[12];
__host__ __device__ dev_particle() {}
__host__ dev_particle(const regf4::Particle&);
};
#define PTCL_LEN (sizeof(dev_particle) / sizeof(float4))
struct dev_predictor
{
fcuvec3 pos; // 3
fcuvec3 vel; // 6
union
{
float mass; // 7
float dt;
};
float h2; // 8
};
#define PRED_LEN (sizeof(dev_predictor) / sizeof(float4))
struct dev_force
{
ds64 accx, accy, accz; // 6
fcuvec3 jrk; // 9
float h2; // 10
int nngb; // 11
int iPad; // 12
__device__ dev_force() : accx(0.0f), accy(0.0f), accz(0.0f), jrk(0.0f), nngb(0) {}
};
/********************************/
/********************************/
/********************************/
__global__ void dev_predict_ptcl(
const int ni,
const unsigned int tsys,
const dev_particle *ptcl_in,
__out dev_predictor *pred_out,
__out float *dt_out)
{
const int id = blockIdx.x*blockDim.x + threadIdx.x;
const int addr = id < ni ? id : ni-1;
const dev_particle ip = ptcl_in[addr];
dev_predictor ipred;
const float dt = DT_TICK*(tsys - ip.time);
const float dt2 = dt*(1.0f/2.0f);
const float dt3 = dt*(1.0f/3.0f);
ipred.pos = ip.pos + dt*(ip.vel + dt2*(ip.acc + dt3*ip.jrk));
ipred.vel = ip.vel + dt*(ip.acc + dt2* ip.jrk);
ipred.mass = ip.mass;
ipred.h2 = ip.h2;
if (id < ni)
{
pred_out[addr] = ipred;
dt_out [addr] = dt;
}
}
/********************************/
/********************************/
/********************************/
template<int NGB_PER_BLOCK>
__forceinline__ __device__ dev_force dev_regfij(
const unsigned int jidx,
const dev_predictor pi,
const dev_predictor pj,
__out dev_force fi,
__out unsigned int *ngb_list)
{
const fcuvec3 dr = pj.pos - pi.pos;
const fcuvec3 dv = pj.vel - pi.vel;
const float r2 = dr*dr;
const float r2p = fminf(r2, (dr + pi.dt*dv).norm2());
if (r2p < pi.h2)
{
if (pj.mass > 0.0f)
{
ngb_list[fi.nngb & (NGB_PER_BLOCK-1)] = jidx;
fi.nngb += (r2 > 0.0f);
}
}
else
{
const float rv = dr*dv;
const float rinv1 = rsqrt(r2 + EPS2);
const float rinv2 = rinv1*rinv1;
const float rinv3 = pj.mass*(rinv1*rinv2);
const float alpha = rv*rinv2;
const fcuvec3 Aij = rinv3*dr;
const fcuvec3 Jij = rinv3*dv - Aij*(3.0f*alpha);
fi.accx += Aij.x;
fi.accy += Aij.y;
fi.accz += Aij.z;
fi.jrk += Jij;
}
return fi;
}
/********************************/
template<int NTHREAD, int NJBLOCK, int NJBLOCK2, int NGB_PER_BLOCK>
__global__ void
#if 0
__launch_bounds__ (NTHREAD, 1)
#endif
dev_regf(
const int ni,
const int nj_per_block,
const int *active_list,
const dev_predictor *pred_in,
const float *dt_in,
__out dev_force *force_out,
__out unsigned int *ngb_out)
{
__shared__ dev_predictor jpshared[NTHREAD];
// compute iblock & jblock offset
const int iblock = blockIdx.x*NTHREAD;
const int jblock = blockIdx.y;
const int tid = threadIdx.x;
// read i-particle into registers
const int idx = iblock + tid;
const int addr = active_list[idx < ni ? idx : ni - 1];
dev_predictor ipred = pred_in[addr];
ipred.dt = dt_in[addr];
// initialize i-particle's force
dev_force iforce;
// obtain beginning & end of j particles for this block
const int jbeg = jblock*nj_per_block;
const int jend = jbeg + nj_per_block;
unsigned int *ingb_ptr = ngb_out + NGB_PER_BLOCK*(jblock + NJBLOCK*idx);
for (int j = jbeg; j < jend; j += NTHREAD)
{
#if 0
jpshared[tid] = pred_in[j + tid];
#else
float4 *src = (float4*)&pred_in[j];
float4 *dst = (float4*)jpshared;
#pragma unroll
for (int it = 0; it < PRED_LEN; it++)
{
dst[tid] = src[tid];
dst += NTHREAD;
src += NTHREAD;
}
#endif
__syncthreads();
if (idx < ni)
{
#pragma unroll 8
for (int jj = 0; jj < NTHREAD; jj++)
iforce = dev_regfij<NGB_PER_BLOCK>(j+jj, ipred, jpshared[jj], iforce, ingb_ptr);
}
__syncthreads();
}
if (idx < ni)
{
iforce.h2 = ipred.h2;
force_out[jblock + idx*NJBLOCK2] = iforce;
}
}
/********************************/
/********************************/
/********************************/
template<class OP, class T, int NTHREAD>
__device__ T reduce_block(volatile T *ptr, T mySum, const unsigned int tid)
{
ptr[tid] = mySum;
__syncthreads();
if (NTHREAD >= 512) { if (tid < 256) { ptr[tid] = mySum = OP::apply(mySum, ptr[tid+256]); } __syncthreads(); }
if (NTHREAD >= 256) { if (tid < 128) { ptr[tid] = mySum = OP::apply(mySum, ptr[tid+128]); } __syncthreads(); }
if (NTHREAD >= 128) { if (tid < 64) { ptr[tid] = mySum = OP::apply(mySum, ptr[tid+ 64]); } __syncthreads(); }
if (tid < 32)
{
if (NTHREAD >= 64) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+32]);
if (NTHREAD >= 32) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+16]);
if (NTHREAD >= 16) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+ 8]);
if (NTHREAD >= 8) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+ 4]);
if (NTHREAD >= 4) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+ 2]);
if (NTHREAD >= 2) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+ 1]);
}
__syncthreads();
return ptr[0];
}
// here each particle is assigned to a single block...
// for 60 active blocks in dev_regf, and 64 threads the max efficiency is 60/64...
template<int NTHREAD, int NJBLOCK, int NJBLOCK2>
__global__ void dev_reduce_regf(
const dev_force *force_in,
__out int2 *ngb_offset,
__out dev_force *force_out)
{
// we use parallel prefix sum to obtain reduce forces
const int idx = blockIdx.x; // body id
const int tid = threadIdx.x; // block id
__shared__ float shdata[2*NTHREAD];
double *shdbl = (double*)shdata;
dev_force iforce;
if (tid < NJBLOCK)
iforce = force_in[tid + idx*NJBLOCK2];
iforce.accx.dbl = reduce_block<ADDOP<double>, double, NTHREAD>(shdbl, iforce.accx.to_double(), tid);
iforce.accy.dbl = reduce_block<ADDOP<double>, double, NTHREAD>(shdbl, iforce.accy.to_double(), tid);
iforce.accz.dbl = reduce_block<ADDOP<double>, double, NTHREAD>(shdbl, iforce.accz.to_double(), tid);
iforce.jrk.x = reduce_block<ADDOP<float>, float, NTHREAD>(shdata, iforce.jrk.x, tid);
iforce.jrk.y = reduce_block<ADDOP<float>, float, NTHREAD>(shdata, iforce.jrk.y, tid);
iforce.jrk.z = reduce_block<ADDOP<float>, float, NTHREAD>(shdata, iforce.jrk.z, tid);
int *shint = (int*)shdata;
shint[tid] = iforce.nngb;
inclusive_scan_block<ADDOP<int>, int>(shint, tid);
const int nngb = shint[NTHREAD-1];
/* #ngb in a block, memory offset */
#if 0
if (idx == 0)
{
for (int t = 0; t < NTHREAD; t++)
{
__syncthreads();
if (t == tid)
printf(" nnbb= %d offset= %d addr= %d tid= %d NJBLOCK= %d\n",
iforce.nngb, shint[tid] - iforce.nngb,
idx + tid*NJBLOCK, tid, NJBLOCK);
}
}
#endif
if (tid < NJBLOCK)
ngb_offset[tid + idx*NJBLOCK] = (int2){iforce.nngb, shint[tid] - iforce.nngb};
if (tid == 0)
{
iforce.nngb = nngb;
force_out[idx] = iforce;
}
}
/********************************/
template<int NTHREAD, int NJBLOCK, int NGB_PER_BLOCK, int NGB_MAX>
__global__ void dev_reduce_ngb(
const int2 *ngb_offset,
const unsigned int *ngb_in,
__out unsigned int *ngb_out
)
{
const int idx = blockIdx.x; // body id
const int tid = threadIdx.x;
for (int i = 0; i < NJBLOCK; i++)
{
const int2 ingb = ngb_offset[i + idx*NJBLOCK];
const int nngb = ingb.x;
const int offset = ingb.y;
if (tid < nngb)
{
#if 0
if (idx == 0)
{
for (int t = 0; t < NTHREAD; t++)
{
__syncthreads();
if (tid == t)
printf("block= %d tid= %d: addr= %d offset= %d nngb= %d newx= %d\n", i, tid, idx*NGB_MAX+offset+tid, offset, nngb ,offset+nngb);
}
}
#endif
const int offset_tot = min(offset+tid, NGB_MAX-1);
ngb_out[idx*NGB_MAX+offset_tot] = ngb_in[NGB_PER_BLOCK*(i + NJBLOCK*idx)+tid];
}
}
}
/********************************/
__global__ void dev_move_particles(
const int nj,
const int *addr_in,
const dev_particle *ptcl_in,
__out dev_particle *ptcl_out)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= nj) return;
const int addr = addr_in[idx];
ptcl_out[addr] = ptcl_in[idx];
}
/********************************/
struct gpot_struct
{
dcuvec3 pos;
float mass;
};
template<int BLOCKSIZE>
__global__ void dev_compute_potential(
const int ni,
const int nj,
const dev_particle *ptcl_in,
__out float *gpot_out)
{
const int idx = blockDim.x*blockIdx.x + threadIdx.x;
const int addr = idx < ni ? idx : ni - 1;
const int tid = threadIdx.x;
__shared__ gpot_struct shmem[BLOCKSIZE];
ds64 gpot(0.0f);
const dcuvec3 ipos = ptcl_in[addr].pos;
for (int j = 0; j < nj; j += BLOCKSIZE)
{
dev_particle pj = ptcl_in[j+tid];
shmem[tid].pos = pj.pos;
shmem[tid].mass = pj.mass;
__syncthreads();
#pragma unroll
for (int jj = 0; jj < BLOCKSIZE; jj++)
{
const dcuvec3 jpos = shmem[jj].pos;
const float jmass = shmem[jj].mass;
const fcuvec3 dr = fcuvec3(jpos.x - ipos.x, jpos.y - ipos.y, jpos.z - ipos.z);
const float r2 = dr*dr;
const float rinv = (r2 > 0.0f) ? rsqrt(r2 + EPS2) : 0.0f;
gpot += jmass * rinv;
}
__syncthreads();
}
if (idx < ni)
gpot_out[idx] = -gpot.to_double();
}
|
28b30f38e5b59f0367154b02a97ca5941c3718b4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* FLAME GPU v 1.5.X for CUDA 9
* Copyright University of Sheffield.
* Original Author: Dr Paul Richmond (user contributions tracked on https://github.com/FLAMEGPU/FLAMEGPU)
* Contact: [email protected] (http://www.paulrichmond.staff.shef.ac.uk)
*
* University of Sheffield retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* University of Sheffield is strictly prohibited.
*
* For terms of licence agreement please attached licence or view licence
* on www.flamegpu.com website.
*
*/
//Disable internal thrust warnings about conversions
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning (disable : 4267)
#pragma warning (disable : 4244)
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
// includes
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cmath>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/extrema.h>
#include <thrust/system/hip/execution_policy.h>
#include <hipcub/hipcub.hpp>
// include FLAME kernels
#include "FLAMEGPU_kernals.cu"
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
/* Error check function for safe CUDA API calling */
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/* Error check function for post CUDA Kernel calling */
#define gpuErrchkLaunch() { gpuLaunchAssert(__FILE__, __LINE__); }
inline void gpuLaunchAssert(const char *file, int line, bool abort=true)
{
gpuAssert( hipPeekAtLastError(), file, line );
#ifdef _DEBUG
gpuAssert( hipDeviceSynchronize(), file, line );
#endif
}
/* SM padding and offset variables */
int SM_START;
int PADDING;
unsigned int g_iterationNumber;
/* Agent Memory */
/* A Agent variables these lists are used in the agent function where as the other lists are used only outside the agent functions*/
xmachine_memory_A_list* d_As; /**< Pointer to agent list (population) on the device*/
xmachine_memory_A_list* d_As_swap; /**< Pointer to agent list swap on the device (used when killing agents)*/
xmachine_memory_A_list* d_As_new; /**< Pointer to new agent list on the device (used to hold new agents before they are appended to the population)*/
int h_xmachine_memory_A_count; /**< Agent population size counter */
uint * d_xmachine_memory_A_keys; /**< Agent sort identifiers keys*/
uint * d_xmachine_memory_A_values; /**< Agent sort identifiers value */
/* A state variables */
xmachine_memory_A_list* h_As_moving; /**< Pointer to agent list (population) on host*/
xmachine_memory_A_list* d_As_moving; /**< Pointer to agent list (population) on the device*/
int h_xmachine_memory_A_moving_count; /**< Agent population size counter */
/* A state variables */
xmachine_memory_A_list* h_As_change_direction; /**< Pointer to agent list (population) on host*/
xmachine_memory_A_list* d_As_change_direction; /**< Pointer to agent list (population) on the device*/
int h_xmachine_memory_A_change_direction_count; /**< Agent population size counter */
/* A state variables */
xmachine_memory_A_list* h_As_get_going_again; /**< Pointer to agent list (population) on host*/
xmachine_memory_A_list* d_As_get_going_again; /**< Pointer to agent list (population) on the device*/
int h_xmachine_memory_A_get_going_again_count; /**< Agent population size counter */
/* Variables to track the state of host copies of state lists, for the purposes of host agent data access.
* @future - if the host data is current it may be possible to avoid duplicating memcpy in xml output.
*/
unsigned int h_As_moving_variable_id_data_iteration;
unsigned int h_As_moving_variable_x_data_iteration;
unsigned int h_As_moving_variable_y_data_iteration;
unsigned int h_As_moving_variable_z_data_iteration;
unsigned int h_As_moving_variable_fx_data_iteration;
unsigned int h_As_moving_variable_fy_data_iteration;
unsigned int h_As_moving_variable_fz_data_iteration;
unsigned int h_As_change_direction_variable_id_data_iteration;
unsigned int h_As_change_direction_variable_x_data_iteration;
unsigned int h_As_change_direction_variable_y_data_iteration;
unsigned int h_As_change_direction_variable_z_data_iteration;
unsigned int h_As_change_direction_variable_fx_data_iteration;
unsigned int h_As_change_direction_variable_fy_data_iteration;
unsigned int h_As_change_direction_variable_fz_data_iteration;
unsigned int h_As_get_going_again_variable_id_data_iteration;
unsigned int h_As_get_going_again_variable_x_data_iteration;
unsigned int h_As_get_going_again_variable_y_data_iteration;
unsigned int h_As_get_going_again_variable_z_data_iteration;
unsigned int h_As_get_going_again_variable_fx_data_iteration;
unsigned int h_As_get_going_again_variable_fy_data_iteration;
unsigned int h_As_get_going_again_variable_fz_data_iteration;
/* Message Memory */
/* location Message variables */
xmachine_message_location_list* h_locations; /**< Pointer to message list on host*/
xmachine_message_location_list* d_locations; /**< Pointer to message list on device*/
xmachine_message_location_list* d_locations_swap; /**< Pointer to message swap list on device (used for holding optional messages)*/
/* Non partitioned and spatial partitioned message variables */
int h_message_location_count; /**< message list counter*/
int h_message_location_output_type; /**< message output type (single or optional)*/
/* CUDA Streams for function layers */
hipStream_t stream1;
/* Device memory and sizes for CUB values */
void * d_temp_scan_storage_A;
size_t temp_scan_storage_bytes_A;
/*Global condition counts*/
/* RNG rand48 */
RNG_rand48* h_rand48; /**< Pointer to RNG_rand48 seed list on host*/
RNG_rand48* d_rand48; /**< Pointer to RNG_rand48 seed list on device*/
/* Early simulation exit*/
bool g_exit_early;
/* Cuda Event Timers for Instrumentation */
#if defined(INSTRUMENT_ITERATIONS) && INSTRUMENT_ITERATIONS
hipEvent_t instrument_iteration_start, instrument_iteration_stop;
float instrument_iteration_milliseconds = 0.0f;
#endif
#if (defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS) || (defined(INSTRUMENT_INIT_FUNCTIONS) && INSTRUMENT_INIT_FUNCTIONS) || (defined(INSTRUMENT_STEP_FUNCTIONS) && INSTRUMENT_STEP_FUNCTIONS) || (defined(INSTRUMENT_EXIT_FUNCTIONS) && INSTRUMENT_EXIT_FUNCTIONS)
hipEvent_t instrument_start, instrument_stop;
float instrument_milliseconds = 0.0f;
#endif
/* CUDA Parallel Primatives variables */
int scan_last_sum; /**< Indicates if the position (in message list) of last message*/
int scan_last_included; /**< Indicates if last sum value is included in the total sum count*/
/* Agent function prototypes */
/** A_move
* Agent function prototype for move function of A agent
*/
void A_move(hipStream_t &stream);
/** A_reverse_direction
* Agent function prototype for reverse_direction function of A agent
*/
void A_reverse_direction(hipStream_t &stream);
/** A_resume_movement
* Agent function prototype for resume_movement function of A agent
*/
void A_resume_movement(hipStream_t &stream);
void setPaddingAndOffset()
{
PROFILE_SCOPED_RANGE("setPaddingAndOffset");
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
int x64_sys = 0;
// This function call returns 9999 for both major & minor fields, if no CUDA capable devices are present
if (deviceProp.major == 9999 && deviceProp.minor == 9999){
printf("Error: There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
}
//check if double is used and supported
#ifdef _DOUBLE_SUPPORT_REQUIRED_
printf("Simulation requires full precision double values\n");
if ((deviceProp.major < 2)&&(deviceProp.minor < 3)){
printf("Error: Hardware does not support full precision double values!\n");
exit(EXIT_FAILURE);
}
#endif
//check 32 or 64bit
x64_sys = (sizeof(void*)==8);
if (x64_sys)
{
printf("64Bit System Detected\n");
}
else
{
printf("32Bit System Detected\n");
}
SM_START = 0;
PADDING = 0;
//copy padding and offset to GPU
gpuErrchk(hipMemcpyToSymbol( d_SM_START, &SM_START, sizeof(int)));
gpuErrchk(hipMemcpyToSymbol( d_PADDING, &PADDING, sizeof(int)));
}
int is_sqr_pow2(int x){
int r = (int)pow(4, ceil(log(x)/log(4)));
return (r == x);
}
int lowest_sqr_pow2(int x){
int l;
//escape early if x is square power of 2
if (is_sqr_pow2(x))
return x;
//lower bound
l = (int)pow(4, floor(log(x)/log(4)));
return l;
}
/* Unary function required for hipOccupancyMaxPotentialBlockSizeVariableSMem to avoid warnings */
int no_sm(int b){
return 0;
}
/* Unary function to return shared memory size for reorder message kernels */
int reorder_messages_sm_size(int blockSize)
{
return sizeof(unsigned int)*(blockSize+1);
}
/** getIterationNumber
* Get the iteration number (host)
* @return a 1 indexed value for the iteration number, which is incremented at the start of each simulation step.
* I.e. it is 0 on up until the first call to singleIteration()
*/
extern unsigned int getIterationNumber(){
return g_iterationNumber;
}
void initialise(char * inputfile){
PROFILE_SCOPED_RANGE("initialise");
//set the padding and offset values depending on architecture and OS
setPaddingAndOffset();
// Initialise some global variables
g_iterationNumber = 0;
g_exit_early = false;
// Initialise variables for tracking which iterations' data is accessible on the host.
h_As_moving_variable_id_data_iteration = 0;
h_As_moving_variable_x_data_iteration = 0;
h_As_moving_variable_y_data_iteration = 0;
h_As_moving_variable_z_data_iteration = 0;
h_As_moving_variable_fx_data_iteration = 0;
h_As_moving_variable_fy_data_iteration = 0;
h_As_moving_variable_fz_data_iteration = 0;
h_As_change_direction_variable_id_data_iteration = 0;
h_As_change_direction_variable_x_data_iteration = 0;
h_As_change_direction_variable_y_data_iteration = 0;
h_As_change_direction_variable_z_data_iteration = 0;
h_As_change_direction_variable_fx_data_iteration = 0;
h_As_change_direction_variable_fy_data_iteration = 0;
h_As_change_direction_variable_fz_data_iteration = 0;
h_As_get_going_again_variable_id_data_iteration = 0;
h_As_get_going_again_variable_x_data_iteration = 0;
h_As_get_going_again_variable_y_data_iteration = 0;
h_As_get_going_again_variable_z_data_iteration = 0;
h_As_get_going_again_variable_fx_data_iteration = 0;
h_As_get_going_again_variable_fy_data_iteration = 0;
h_As_get_going_again_variable_fz_data_iteration = 0;
printf("Allocating Host and Device memory\n");
PROFILE_PUSH_RANGE("allocate host");
/* Agent memory allocation (CPU) */
int xmachine_A_SoA_size = sizeof(xmachine_memory_A_list);
h_As_moving = (xmachine_memory_A_list*)malloc(xmachine_A_SoA_size);
h_As_change_direction = (xmachine_memory_A_list*)malloc(xmachine_A_SoA_size);
h_As_get_going_again = (xmachine_memory_A_list*)malloc(xmachine_A_SoA_size);
/* Message memory allocation (CPU) */
int message_location_SoA_size = sizeof(xmachine_message_location_list);
h_locations = (xmachine_message_location_list*)malloc(message_location_SoA_size);
//Exit if agent or message buffer sizes are to small for function outputs
/* Graph memory allocation (CPU) */
PROFILE_POP_RANGE(); //"allocate host"
//read initial states
readInitialStates(inputfile, h_As_moving, &h_xmachine_memory_A_moving_count);
// Read graphs from disk
PROFILE_PUSH_RANGE("allocate device");
/* A Agent memory allocation (GPU) */
gpuErrchk( hipMalloc( (void**) &d_As, xmachine_A_SoA_size));
gpuErrchk( hipMalloc( (void**) &d_As_swap, xmachine_A_SoA_size));
gpuErrchk( hipMalloc( (void**) &d_As_new, xmachine_A_SoA_size));
//continuous agent sort identifiers
gpuErrchk( hipMalloc( (void**) &d_xmachine_memory_A_keys, xmachine_memory_A_MAX* sizeof(uint)));
gpuErrchk( hipMalloc( (void**) &d_xmachine_memory_A_values, xmachine_memory_A_MAX* sizeof(uint)));
/* moving memory allocation (GPU) */
gpuErrchk( hipMalloc( (void**) &d_As_moving, xmachine_A_SoA_size));
gpuErrchk( hipMemcpy( d_As_moving, h_As_moving, xmachine_A_SoA_size, hipMemcpyHostToDevice));
/* change_direction memory allocation (GPU) */
gpuErrchk( hipMalloc( (void**) &d_As_change_direction, xmachine_A_SoA_size));
gpuErrchk( hipMemcpy( d_As_change_direction, h_As_change_direction, xmachine_A_SoA_size, hipMemcpyHostToDevice));
/* get_going_again memory allocation (GPU) */
gpuErrchk( hipMalloc( (void**) &d_As_get_going_again, xmachine_A_SoA_size));
gpuErrchk( hipMemcpy( d_As_get_going_again, h_As_get_going_again, xmachine_A_SoA_size, hipMemcpyHostToDevice));
/* location Message memory allocation (GPU) */
gpuErrchk( hipMalloc( (void**) &d_locations, message_location_SoA_size));
gpuErrchk( hipMalloc( (void**) &d_locations_swap, message_location_SoA_size));
gpuErrchk( hipMemcpy( d_locations, h_locations, message_location_SoA_size, hipMemcpyHostToDevice));
/* Allocate device memory for graphs */
PROFILE_POP_RANGE(); // "allocate device"
/* Calculate and allocate CUB temporary memory for exclusive scans */
d_temp_scan_storage_A = nullptr;
temp_scan_storage_bytes_A = 0;
hipcub::DeviceScan::ExclusiveSum(
d_temp_scan_storage_A,
temp_scan_storage_bytes_A,
(int*) nullptr,
(int*) nullptr,
xmachine_memory_A_MAX
);
gpuErrchk(hipMalloc(&d_temp_scan_storage_A, temp_scan_storage_bytes_A));
/*Set global condition counts*/
/* RNG rand48 */
PROFILE_PUSH_RANGE("Initialse RNG_rand48");
int h_rand48_SoA_size = sizeof(RNG_rand48);
h_rand48 = (RNG_rand48*)malloc(h_rand48_SoA_size);
//allocate on GPU
gpuErrchk( hipMalloc( (void**) &d_rand48, h_rand48_SoA_size));
// calculate strided iteration constants
static const unsigned long long a = 0x5DEECE66DLL, c = 0xB;
int seed = 123;
unsigned long long A, C;
A = 1LL; C = 0LL;
for (unsigned int i = 0; i < buffer_size_MAX; ++i) {
C += A*c;
A *= a;
}
h_rand48->A.x = A & 0xFFFFFFLL;
h_rand48->A.y = (A >> 24) & 0xFFFFFFLL;
h_rand48->C.x = C & 0xFFFFFFLL;
h_rand48->C.y = (C >> 24) & 0xFFFFFFLL;
// prepare first nThreads random numbers from seed
unsigned long long x = (((unsigned long long)seed) << 16) | 0x330E;
for (unsigned int i = 0; i < buffer_size_MAX; ++i) {
x = a*x + c;
h_rand48->seeds[i].x = x & 0xFFFFFFLL;
h_rand48->seeds[i].y = (x >> 24) & 0xFFFFFFLL;
}
//copy to device
gpuErrchk( hipMemcpy( d_rand48, h_rand48, h_rand48_SoA_size, hipMemcpyHostToDevice));
PROFILE_POP_RANGE();
/* Call all init functions */
/* Prepare cuda event timers for instrumentation */
#if defined(INSTRUMENT_ITERATIONS) && INSTRUMENT_ITERATIONS
hipEventCreate(&instrument_iteration_start);
hipEventCreate(&instrument_iteration_stop);
#endif
#if (defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS) || (defined(INSTRUMENT_INIT_FUNCTIONS) && INSTRUMENT_INIT_FUNCTIONS) || (defined(INSTRUMENT_STEP_FUNCTIONS) && INSTRUMENT_STEP_FUNCTIONS) || (defined(INSTRUMENT_EXIT_FUNCTIONS) && INSTRUMENT_EXIT_FUNCTIONS)
hipEventCreate(&instrument_start);
hipEventCreate(&instrument_stop);
#endif
/* Init CUDA Streams for function layers */
gpuErrchk(hipStreamCreate(&stream1));
#if defined(OUTPUT_POPULATION_PER_ITERATION) && OUTPUT_POPULATION_PER_ITERATION
// Print the agent population size of all agents in all states
printf("Init agent_A_moving_count: %u\n",get_agent_A_moving_count());
printf("Init agent_A_change_direction_count: %u\n",get_agent_A_change_direction_count());
printf("Init agent_A_get_going_again_count: %u\n",get_agent_A_get_going_again_count());
#endif
}
void sort_As_moving(void (*generate_key_value_pairs)(unsigned int* keys, unsigned int* values, xmachine_memory_A_list* agents))
{
int blockSize;
int minGridSize;
int gridSize;
//generate sort keys
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, generate_key_value_pairs, no_sm, h_xmachine_memory_A_moving_count);
gridSize = (h_xmachine_memory_A_moving_count + blockSize - 1) / blockSize; // Round up according to array size
hipLaunchKernelGGL(( generate_key_value_pairs), dim3(gridSize), dim3(blockSize), 0, 0, d_xmachine_memory_A_keys, d_xmachine_memory_A_values, d_As_moving);
gpuErrchkLaunch();
//updated Thrust sort
thrust::sort_by_key( thrust::device_pointer_cast(d_xmachine_memory_A_keys), thrust::device_pointer_cast(d_xmachine_memory_A_keys) + h_xmachine_memory_A_moving_count, thrust::device_pointer_cast(d_xmachine_memory_A_values));
gpuErrchkLaunch();
//reorder agents
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, reorder_A_agents, no_sm, h_xmachine_memory_A_moving_count);
gridSize = (h_xmachine_memory_A_moving_count + blockSize - 1) / blockSize; // Round up according to array size
hipLaunchKernelGGL(( reorder_A_agents), dim3(gridSize), dim3(blockSize), 0, 0, d_xmachine_memory_A_values, d_As_moving, d_As_swap);
gpuErrchkLaunch();
//swap
xmachine_memory_A_list* d_As_temp = d_As_moving;
d_As_moving = d_As_swap;
d_As_swap = d_As_temp;
}
void sort_As_change_direction(void (*generate_key_value_pairs)(unsigned int* keys, unsigned int* values, xmachine_memory_A_list* agents))
{
int blockSize;
int minGridSize;
int gridSize;
//generate sort keys
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, generate_key_value_pairs, no_sm, h_xmachine_memory_A_change_direction_count);
gridSize = (h_xmachine_memory_A_change_direction_count + blockSize - 1) / blockSize; // Round up according to array size
hipLaunchKernelGGL(( generate_key_value_pairs), dim3(gridSize), dim3(blockSize), 0, 0, d_xmachine_memory_A_keys, d_xmachine_memory_A_values, d_As_change_direction);
gpuErrchkLaunch();
//updated Thrust sort
thrust::sort_by_key( thrust::device_pointer_cast(d_xmachine_memory_A_keys), thrust::device_pointer_cast(d_xmachine_memory_A_keys) + h_xmachine_memory_A_change_direction_count, thrust::device_pointer_cast(d_xmachine_memory_A_values));
gpuErrchkLaunch();
//reorder agents
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, reorder_A_agents, no_sm, h_xmachine_memory_A_change_direction_count);
gridSize = (h_xmachine_memory_A_change_direction_count + blockSize - 1) / blockSize; // Round up according to array size
hipLaunchKernelGGL(( reorder_A_agents), dim3(gridSize), dim3(blockSize), 0, 0, d_xmachine_memory_A_values, d_As_change_direction, d_As_swap);
gpuErrchkLaunch();
//swap
xmachine_memory_A_list* d_As_temp = d_As_change_direction;
d_As_change_direction = d_As_swap;
d_As_swap = d_As_temp;
}
void sort_As_get_going_again(void (*generate_key_value_pairs)(unsigned int* keys, unsigned int* values, xmachine_memory_A_list* agents))
{
int blockSize;
int minGridSize;
int gridSize;
//generate sort keys
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, generate_key_value_pairs, no_sm, h_xmachine_memory_A_get_going_again_count);
gridSize = (h_xmachine_memory_A_get_going_again_count + blockSize - 1) / blockSize; // Round up according to array size
hipLaunchKernelGGL(( generate_key_value_pairs), dim3(gridSize), dim3(blockSize), 0, 0, d_xmachine_memory_A_keys, d_xmachine_memory_A_values, d_As_get_going_again);
gpuErrchkLaunch();
//updated Thrust sort
thrust::sort_by_key( thrust::device_pointer_cast(d_xmachine_memory_A_keys), thrust::device_pointer_cast(d_xmachine_memory_A_keys) + h_xmachine_memory_A_get_going_again_count, thrust::device_pointer_cast(d_xmachine_memory_A_values));
gpuErrchkLaunch();
//reorder agents
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, reorder_A_agents, no_sm, h_xmachine_memory_A_get_going_again_count);
gridSize = (h_xmachine_memory_A_get_going_again_count + blockSize - 1) / blockSize; // Round up according to array size
hipLaunchKernelGGL(( reorder_A_agents), dim3(gridSize), dim3(blockSize), 0, 0, d_xmachine_memory_A_values, d_As_get_going_again, d_As_swap);
gpuErrchkLaunch();
//swap
xmachine_memory_A_list* d_As_temp = d_As_get_going_again;
d_As_get_going_again = d_As_swap;
d_As_swap = d_As_temp;
}
void cleanup(){
PROFILE_SCOPED_RANGE("cleanup");
/* Call all exit functions */
/* Agent data free*/
/* A Agent variables */
gpuErrchk(hipFree(d_As));
gpuErrchk(hipFree(d_As_swap));
gpuErrchk(hipFree(d_As_new));
free( h_As_moving);
gpuErrchk(hipFree(d_As_moving));
free( h_As_change_direction);
gpuErrchk(hipFree(d_As_change_direction));
free( h_As_get_going_again);
gpuErrchk(hipFree(d_As_get_going_again));
/* Message data free */
/* location Message variables */
free( h_locations);
gpuErrchk(hipFree(d_locations));
gpuErrchk(hipFree(d_locations_swap));
/* Free temporary CUB memory if required. */
if(d_temp_scan_storage_A != nullptr){
gpuErrchk(hipFree(d_temp_scan_storage_A));
d_temp_scan_storage_A = nullptr;
temp_scan_storage_bytes_A = 0;
}
/* Graph data free */
/* CUDA Streams for function layers */
gpuErrchk(hipStreamDestroy(stream1));
/* CUDA Event Timers for Instrumentation */
#if defined(INSTRUMENT_ITERATIONS) && INSTRUMENT_ITERATIONS
hipEventDestroy(instrument_iteration_start);
hipEventDestroy(instrument_iteration_stop);
#endif
#if (defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS) || (defined(INSTRUMENT_INIT_FUNCTIONS) && INSTRUMENT_INIT_FUNCTIONS) || (defined(INSTRUMENT_STEP_FUNCTIONS) && INSTRUMENT_STEP_FUNCTIONS) || (defined(INSTRUMENT_EXIT_FUNCTIONS) && INSTRUMENT_EXIT_FUNCTIONS)
hipEventDestroy(instrument_start);
hipEventDestroy(instrument_stop);
#endif
}
void singleIteration(){
PROFILE_SCOPED_RANGE("singleIteration");
#if defined(INSTRUMENT_ITERATIONS) && INSTRUMENT_ITERATIONS
hipEventRecord(instrument_iteration_start);
#endif
// Increment the iteration number.
g_iterationNumber++;
/* set all non partitioned, spatial partitioned and On-Graph Partitioned message counts to 0*/
h_message_location_count = 0;
//upload to device constant
gpuErrchk(hipMemcpyToSymbol( d_message_location_count, &h_message_location_count, sizeof(int)));
/* Call agent functions in order iterating through the layer functions */
/* Layer 1*/
#if defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS
hipEventRecord(instrument_start);
#endif
PROFILE_PUSH_RANGE("A_move");
A_move(stream1);
PROFILE_POP_RANGE();
#if defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS
hipEventRecord(instrument_stop);
hipEventSynchronize(instrument_stop);
hipEventElapsedTime(&instrument_milliseconds, instrument_start, instrument_stop);
printf("Instrumentation: A_move = %f (ms)\n", instrument_milliseconds);
#endif
hipDeviceSynchronize();
/* Layer 2*/
#if defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS
hipEventRecord(instrument_start);
#endif
PROFILE_PUSH_RANGE("A_reverse_direction");
A_reverse_direction(stream1);
PROFILE_POP_RANGE();
#if defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS
hipEventRecord(instrument_stop);
hipEventSynchronize(instrument_stop);
hipEventElapsedTime(&instrument_milliseconds, instrument_start, instrument_stop);
printf("Instrumentation: A_reverse_direction = %f (ms)\n", instrument_milliseconds);
#endif
hipDeviceSynchronize();
/* Layer 3*/
#if defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS
hipEventRecord(instrument_start);
#endif
PROFILE_PUSH_RANGE("A_resume_movement");
A_resume_movement(stream1);
PROFILE_POP_RANGE();
#if defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS
hipEventRecord(instrument_stop);
hipEventSynchronize(instrument_stop);
hipEventElapsedTime(&instrument_milliseconds, instrument_start, instrument_stop);
printf("Instrumentation: A_resume_movement = %f (ms)\n", instrument_milliseconds);
#endif
hipDeviceSynchronize();
/* Call all step functions */
#if defined(OUTPUT_POPULATION_PER_ITERATION) && OUTPUT_POPULATION_PER_ITERATION
// Print the agent population size of all agents in all states
printf("agent_A_moving_count: %u\n",get_agent_A_moving_count());
printf("agent_A_change_direction_count: %u\n",get_agent_A_change_direction_count());
printf("agent_A_get_going_again_count: %u\n",get_agent_A_get_going_again_count());
#endif
#if defined(INSTRUMENT_ITERATIONS) && INSTRUMENT_ITERATIONS
hipEventRecord(instrument_iteration_stop);
hipEventSynchronize(instrument_iteration_stop);
hipEventElapsedTime(&instrument_iteration_milliseconds, instrument_iteration_start, instrument_iteration_stop);
printf("Instrumentation: Iteration Time = %f (ms)\n", instrument_iteration_milliseconds);
#endif
}
/* finish whole simulation after this step */
void set_exit_early() {
g_exit_early = true;
}
bool get_exit_early() {
return g_exit_early;
}
/* Environment functions */
//host constant declaration
/* Agent data access functions*/
int get_agent_A_MAX_count(){
return xmachine_memory_A_MAX;
}
int get_agent_A_moving_count(){
//continuous agent
return h_xmachine_memory_A_moving_count;
}
xmachine_memory_A_list* get_device_A_moving_agents(){
return d_As_moving;
}
xmachine_memory_A_list* get_host_A_moving_agents(){
return h_As_moving;
}
int get_agent_A_change_direction_count(){
//continuous agent
return h_xmachine_memory_A_change_direction_count;
}
xmachine_memory_A_list* get_device_A_change_direction_agents(){
return d_As_change_direction;
}
xmachine_memory_A_list* get_host_A_change_direction_agents(){
return h_As_change_direction;
}
int get_agent_A_get_going_again_count(){
//continuous agent
return h_xmachine_memory_A_get_going_again_count;
}
xmachine_memory_A_list* get_device_A_get_going_again_agents(){
return d_As_get_going_again;
}
xmachine_memory_A_list* get_host_A_get_going_again_agents(){
return h_As_get_going_again;
}
/* Host based access of agent variables*/
/** int get_A_moving_variable_id(unsigned int index)
* Gets the value of the id variable of an A agent in the moving state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable id
*/
__host__ int get_A_moving_variable_id(unsigned int index){
unsigned int count = get_agent_A_moving_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_moving_variable_id_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_moving->id,
d_As_moving->id,
count * sizeof(int),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_moving_variable_id_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_moving->id[index];
} else {
fprintf(stderr, "Warning: Attempting to access id for the %u th member of A_moving. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_moving_variable_x(unsigned int index)
* Gets the value of the x variable of an A agent in the moving state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable x
*/
__host__ float get_A_moving_variable_x(unsigned int index){
unsigned int count = get_agent_A_moving_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_moving_variable_x_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_moving->x,
d_As_moving->x,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_moving_variable_x_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_moving->x[index];
} else {
fprintf(stderr, "Warning: Attempting to access x for the %u th member of A_moving. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_moving_variable_y(unsigned int index)
* Gets the value of the y variable of an A agent in the moving state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable y
*/
__host__ float get_A_moving_variable_y(unsigned int index){
unsigned int count = get_agent_A_moving_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_moving_variable_y_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_moving->y,
d_As_moving->y,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_moving_variable_y_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_moving->y[index];
} else {
fprintf(stderr, "Warning: Attempting to access y for the %u th member of A_moving. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_moving_variable_z(unsigned int index)
* Gets the value of the z variable of an A agent in the moving state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable z
*/
__host__ float get_A_moving_variable_z(unsigned int index){
unsigned int count = get_agent_A_moving_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_moving_variable_z_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_moving->z,
d_As_moving->z,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_moving_variable_z_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_moving->z[index];
} else {
fprintf(stderr, "Warning: Attempting to access z for the %u th member of A_moving. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_moving_variable_fx(unsigned int index)
* Gets the value of the fx variable of an A agent in the moving state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fx
*/
__host__ float get_A_moving_variable_fx(unsigned int index){
unsigned int count = get_agent_A_moving_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_moving_variable_fx_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_moving->fx,
d_As_moving->fx,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_moving_variable_fx_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_moving->fx[index];
} else {
fprintf(stderr, "Warning: Attempting to access fx for the %u th member of A_moving. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_moving_variable_fy(unsigned int index)
* Gets the value of the fy variable of an A agent in the moving state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fy
*/
__host__ float get_A_moving_variable_fy(unsigned int index){
unsigned int count = get_agent_A_moving_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_moving_variable_fy_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_moving->fy,
d_As_moving->fy,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_moving_variable_fy_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_moving->fy[index];
} else {
fprintf(stderr, "Warning: Attempting to access fy for the %u th member of A_moving. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_moving_variable_fz(unsigned int index)
* Gets the value of the fz variable of an A agent in the moving state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fz
*/
__host__ float get_A_moving_variable_fz(unsigned int index){
unsigned int count = get_agent_A_moving_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_moving_variable_fz_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_moving->fz,
d_As_moving->fz,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_moving_variable_fz_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_moving->fz[index];
} else {
fprintf(stderr, "Warning: Attempting to access fz for the %u th member of A_moving. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** int get_A_change_direction_variable_id(unsigned int index)
* Gets the value of the id variable of an A agent in the change_direction state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable id
*/
__host__ int get_A_change_direction_variable_id(unsigned int index){
unsigned int count = get_agent_A_change_direction_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_change_direction_variable_id_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_change_direction->id,
d_As_change_direction->id,
count * sizeof(int),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_change_direction_variable_id_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_change_direction->id[index];
} else {
fprintf(stderr, "Warning: Attempting to access id for the %u th member of A_change_direction. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_change_direction_variable_x(unsigned int index)
* Gets the value of the x variable of an A agent in the change_direction state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable x
*/
__host__ float get_A_change_direction_variable_x(unsigned int index){
unsigned int count = get_agent_A_change_direction_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_change_direction_variable_x_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_change_direction->x,
d_As_change_direction->x,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_change_direction_variable_x_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_change_direction->x[index];
} else {
fprintf(stderr, "Warning: Attempting to access x for the %u th member of A_change_direction. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_change_direction_variable_y(unsigned int index)
* Gets the value of the y variable of an A agent in the change_direction state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable y
*/
__host__ float get_A_change_direction_variable_y(unsigned int index){
unsigned int count = get_agent_A_change_direction_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_change_direction_variable_y_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_change_direction->y,
d_As_change_direction->y,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_change_direction_variable_y_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_change_direction->y[index];
} else {
fprintf(stderr, "Warning: Attempting to access y for the %u th member of A_change_direction. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_change_direction_variable_z(unsigned int index)
* Gets the value of the z variable of an A agent in the change_direction state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable z
*/
__host__ float get_A_change_direction_variable_z(unsigned int index){
unsigned int count = get_agent_A_change_direction_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_change_direction_variable_z_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_change_direction->z,
d_As_change_direction->z,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_change_direction_variable_z_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_change_direction->z[index];
} else {
fprintf(stderr, "Warning: Attempting to access z for the %u th member of A_change_direction. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_change_direction_variable_fx(unsigned int index)
* Gets the value of the fx variable of an A agent in the change_direction state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fx
*/
__host__ float get_A_change_direction_variable_fx(unsigned int index){
unsigned int count = get_agent_A_change_direction_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_change_direction_variable_fx_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_change_direction->fx,
d_As_change_direction->fx,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_change_direction_variable_fx_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_change_direction->fx[index];
} else {
fprintf(stderr, "Warning: Attempting to access fx for the %u th member of A_change_direction. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_change_direction_variable_fy(unsigned int index)
* Gets the value of the fy variable of an A agent in the change_direction state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fy
*/
__host__ float get_A_change_direction_variable_fy(unsigned int index){
unsigned int count = get_agent_A_change_direction_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_change_direction_variable_fy_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_change_direction->fy,
d_As_change_direction->fy,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_change_direction_variable_fy_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_change_direction->fy[index];
} else {
fprintf(stderr, "Warning: Attempting to access fy for the %u th member of A_change_direction. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_change_direction_variable_fz(unsigned int index)
* Gets the value of the fz variable of an A agent in the change_direction state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fz
*/
__host__ float get_A_change_direction_variable_fz(unsigned int index){
unsigned int count = get_agent_A_change_direction_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_change_direction_variable_fz_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_change_direction->fz,
d_As_change_direction->fz,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_change_direction_variable_fz_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_change_direction->fz[index];
} else {
fprintf(stderr, "Warning: Attempting to access fz for the %u th member of A_change_direction. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** int get_A_get_going_again_variable_id(unsigned int index)
* Gets the value of the id variable of an A agent in the get_going_again state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable id
*/
__host__ int get_A_get_going_again_variable_id(unsigned int index){
unsigned int count = get_agent_A_get_going_again_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_get_going_again_variable_id_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_get_going_again->id,
d_As_get_going_again->id,
count * sizeof(int),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_get_going_again_variable_id_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_get_going_again->id[index];
} else {
fprintf(stderr, "Warning: Attempting to access id for the %u th member of A_get_going_again. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_get_going_again_variable_x(unsigned int index)
* Gets the value of the x variable of an A agent in the get_going_again state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable x
*/
__host__ float get_A_get_going_again_variable_x(unsigned int index){
unsigned int count = get_agent_A_get_going_again_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_get_going_again_variable_x_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_get_going_again->x,
d_As_get_going_again->x,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_get_going_again_variable_x_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_get_going_again->x[index];
} else {
fprintf(stderr, "Warning: Attempting to access x for the %u th member of A_get_going_again. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_get_going_again_variable_y(unsigned int index)
* Gets the value of the y variable of an A agent in the get_going_again state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable y
*/
__host__ float get_A_get_going_again_variable_y(unsigned int index){
unsigned int count = get_agent_A_get_going_again_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_get_going_again_variable_y_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_get_going_again->y,
d_As_get_going_again->y,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_get_going_again_variable_y_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_get_going_again->y[index];
} else {
fprintf(stderr, "Warning: Attempting to access y for the %u th member of A_get_going_again. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_get_going_again_variable_z(unsigned int index)
* Gets the value of the z variable of an A agent in the get_going_again state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable z
*/
__host__ float get_A_get_going_again_variable_z(unsigned int index){
unsigned int count = get_agent_A_get_going_again_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_get_going_again_variable_z_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_get_going_again->z,
d_As_get_going_again->z,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_get_going_again_variable_z_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_get_going_again->z[index];
} else {
fprintf(stderr, "Warning: Attempting to access z for the %u th member of A_get_going_again. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_get_going_again_variable_fx(unsigned int index)
* Gets the value of the fx variable of an A agent in the get_going_again state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fx
*/
__host__ float get_A_get_going_again_variable_fx(unsigned int index){
unsigned int count = get_agent_A_get_going_again_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_get_going_again_variable_fx_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_get_going_again->fx,
d_As_get_going_again->fx,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_get_going_again_variable_fx_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_get_going_again->fx[index];
} else {
fprintf(stderr, "Warning: Attempting to access fx for the %u th member of A_get_going_again. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_get_going_again_variable_fy(unsigned int index)
* Gets the value of the fy variable of an A agent in the get_going_again state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fy
*/
__host__ float get_A_get_going_again_variable_fy(unsigned int index){
unsigned int count = get_agent_A_get_going_again_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_get_going_again_variable_fy_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_get_going_again->fy,
d_As_get_going_again->fy,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_get_going_again_variable_fy_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_get_going_again->fy[index];
} else {
fprintf(stderr, "Warning: Attempting to access fy for the %u th member of A_get_going_again. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_get_going_again_variable_fz(unsigned int index)
* Gets the value of the fz variable of an A agent in the get_going_again state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fz
*/
__host__ float get_A_get_going_again_variable_fz(unsigned int index){
unsigned int count = get_agent_A_get_going_again_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_get_going_again_variable_fz_data_iteration != currentIteration){
gpuErrchk(
hipMemcpy(
h_As_get_going_again->fz,
d_As_get_going_again->fz,
count * sizeof(float),
hipMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_get_going_again_variable_fz_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_get_going_again->fz[index];
} else {
fprintf(stderr, "Warning: Attempting to access fz for the %u th member of A_get_going_again. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/* Host based agent creation functions */
// These are only available for continuous agents.
/* copy_single_xmachine_memory_A_hostToDevice
* Private function to copy a host agent struct into a device SoA agent list.
* @param d_dst destination agent state list
* @param h_agent agent struct
*/
void copy_single_xmachine_memory_A_hostToDevice(xmachine_memory_A_list * d_dst, xmachine_memory_A * h_agent){
gpuErrchk(hipMemcpy(d_dst->id, &h_agent->id, sizeof(int), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_dst->x, &h_agent->x, sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_dst->y, &h_agent->y, sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_dst->z, &h_agent->z, sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_dst->fx, &h_agent->fx, sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_dst->fy, &h_agent->fy, sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_dst->fz, &h_agent->fz, sizeof(float), hipMemcpyHostToDevice));
}
/*
* Private function to copy some elements from a host based struct of arrays to a device based struct of arrays for a single agent state.
* Individual copies of `count` elements are performed for each agent variable or each component of agent array variables, to avoid wasted data transfer.
* There will be a point at which a single hipMemcpy will outperform many smaller memcpys, however host based agent creation should typically only populate a fraction of the maximum buffer size, so this should be more efficient.
* @optimisation - experimentally find the proportion at which transferring the whole SoA would be better and incorporate this. The same will apply to agent variable arrays.
*
* @param d_dst device destination SoA
* @oaram h_src host source SoA
* @param count the number of agents to transfer data for
*/
void copy_partial_xmachine_memory_A_hostToDevice(xmachine_memory_A_list * d_dst, xmachine_memory_A_list * h_src, unsigned int count){
// Only copy elements if there is data to move.
if (count > 0){
gpuErrchk(hipMemcpy(d_dst->id, h_src->id, count * sizeof(int), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_dst->x, h_src->x, count * sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_dst->y, h_src->y, count * sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_dst->z, h_src->z, count * sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_dst->fx, h_src->fx, count * sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_dst->fy, h_src->fy, count * sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_dst->fz, h_src->fz, count * sizeof(float), hipMemcpyHostToDevice));
}
}
xmachine_memory_A* h_allocate_agent_A(){
xmachine_memory_A* agent = (xmachine_memory_A*)malloc(sizeof(xmachine_memory_A));
// Memset the whole agent strcuture
memset(agent, 0, sizeof(xmachine_memory_A));
return agent;
}
void h_free_agent_A(xmachine_memory_A** agent){
free((*agent));
(*agent) = NULL;
}
xmachine_memory_A** h_allocate_agent_A_array(unsigned int count){
xmachine_memory_A ** agents = (xmachine_memory_A**)malloc(count * sizeof(xmachine_memory_A*));
for (unsigned int i = 0; i < count; i++) {
agents[i] = h_allocate_agent_A();
}
return agents;
}
void h_free_agent_A_array(xmachine_memory_A*** agents, unsigned int count){
for (unsigned int i = 0; i < count; i++) {
h_free_agent_A(&((*agents)[i]));
}
free((*agents));
(*agents) = NULL;
}
void h_unpack_agents_A_AoS_to_SoA(xmachine_memory_A_list * dst, xmachine_memory_A** src, unsigned int count){
if(count > 0){
for(unsigned int i = 0; i < count; i++){
dst->id[i] = src[i]->id;
dst->x[i] = src[i]->x;
dst->y[i] = src[i]->y;
dst->z[i] = src[i]->z;
dst->fx[i] = src[i]->fx;
dst->fy[i] = src[i]->fy;
dst->fz[i] = src[i]->fz;
}
}
}
void h_add_agent_A_moving(xmachine_memory_A* agent){
if (h_xmachine_memory_A_count + 1 > xmachine_memory_A_MAX){
printf("Error: Buffer size of A agents in state moving will be exceeded by h_add_agent_A_moving\n");
exit(EXIT_FAILURE);
}
int blockSize;
int minGridSize;
int gridSize;
unsigned int count = 1;
// Copy data from host struct to device SoA for target state
copy_single_xmachine_memory_A_hostToDevice(d_As_new, agent);
// Use append kernel (@optimisation - This can be replaced with a pointer swap if the target state list is empty)
hipOccupancyMaxPotentialBlockSizeVariableSMem(&minGridSize, &blockSize, append_A_Agents, no_sm, count);
gridSize = (count + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( append_A_Agents) , dim3(gridSize), dim3(blockSize), 0, stream1 , d_As_moving, d_As_new, h_xmachine_memory_A_moving_count, count);
gpuErrchkLaunch();
// Update the number of agents in this state.
h_xmachine_memory_A_moving_count += count;
gpuErrchk(hipMemcpyToSymbol(d_xmachine_memory_A_moving_count, &h_xmachine_memory_A_moving_count, sizeof(int)));
hipDeviceSynchronize();
// Reset host variable status flags for the relevant agent state list as the device state list has been modified.
h_As_moving_variable_id_data_iteration = 0;
h_As_moving_variable_x_data_iteration = 0;
h_As_moving_variable_y_data_iteration = 0;
h_As_moving_variable_z_data_iteration = 0;
h_As_moving_variable_fx_data_iteration = 0;
h_As_moving_variable_fy_data_iteration = 0;
h_As_moving_variable_fz_data_iteration = 0;
}
void h_add_agents_A_moving(xmachine_memory_A** agents, unsigned int count){
if(count > 0){
int blockSize;
int minGridSize;
int gridSize;
if (h_xmachine_memory_A_count + count > xmachine_memory_A_MAX){
printf("Error: Buffer size of A agents in state moving will be exceeded by h_add_agents_A_moving\n");
exit(EXIT_FAILURE);
}
// Unpack data from AoS into the pre-existing SoA
h_unpack_agents_A_AoS_to_SoA(h_As_moving, agents, count);
// Copy data from the host SoA to the device SoA for the target state
copy_partial_xmachine_memory_A_hostToDevice(d_As_new, h_As_moving, count);
// Use append kernel (@optimisation - This can be replaced with a pointer swap if the target state list is empty)
hipOccupancyMaxPotentialBlockSizeVariableSMem(&minGridSize, &blockSize, append_A_Agents, no_sm, count);
gridSize = (count + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( append_A_Agents) , dim3(gridSize), dim3(blockSize), 0, stream1 , d_As_moving, d_As_new, h_xmachine_memory_A_moving_count, count);
gpuErrchkLaunch();
// Update the number of agents in this state.
h_xmachine_memory_A_moving_count += count;
gpuErrchk(hipMemcpyToSymbol(d_xmachine_memory_A_moving_count, &h_xmachine_memory_A_moving_count, sizeof(int)));
hipDeviceSynchronize();
// Reset host variable status flags for the relevant agent state list as the device state list has been modified.
h_As_moving_variable_id_data_iteration = 0;
h_As_moving_variable_x_data_iteration = 0;
h_As_moving_variable_y_data_iteration = 0;
h_As_moving_variable_z_data_iteration = 0;
h_As_moving_variable_fx_data_iteration = 0;
h_As_moving_variable_fy_data_iteration = 0;
h_As_moving_variable_fz_data_iteration = 0;
}
}
void h_add_agent_A_change_direction(xmachine_memory_A* agent){
if (h_xmachine_memory_A_count + 1 > xmachine_memory_A_MAX){
printf("Error: Buffer size of A agents in state change_direction will be exceeded by h_add_agent_A_change_direction\n");
exit(EXIT_FAILURE);
}
int blockSize;
int minGridSize;
int gridSize;
unsigned int count = 1;
// Copy data from host struct to device SoA for target state
copy_single_xmachine_memory_A_hostToDevice(d_As_new, agent);
// Use append kernel (@optimisation - This can be replaced with a pointer swap if the target state list is empty)
hipOccupancyMaxPotentialBlockSizeVariableSMem(&minGridSize, &blockSize, append_A_Agents, no_sm, count);
gridSize = (count + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( append_A_Agents) , dim3(gridSize), dim3(blockSize), 0, stream1 , d_As_change_direction, d_As_new, h_xmachine_memory_A_change_direction_count, count);
gpuErrchkLaunch();
// Update the number of agents in this state.
h_xmachine_memory_A_change_direction_count += count;
gpuErrchk(hipMemcpyToSymbol(d_xmachine_memory_A_change_direction_count, &h_xmachine_memory_A_change_direction_count, sizeof(int)));
hipDeviceSynchronize();
// Reset host variable status flags for the relevant agent state list as the device state list has been modified.
h_As_change_direction_variable_id_data_iteration = 0;
h_As_change_direction_variable_x_data_iteration = 0;
h_As_change_direction_variable_y_data_iteration = 0;
h_As_change_direction_variable_z_data_iteration = 0;
h_As_change_direction_variable_fx_data_iteration = 0;
h_As_change_direction_variable_fy_data_iteration = 0;
h_As_change_direction_variable_fz_data_iteration = 0;
}
void h_add_agents_A_change_direction(xmachine_memory_A** agents, unsigned int count){
if(count > 0){
int blockSize;
int minGridSize;
int gridSize;
if (h_xmachine_memory_A_count + count > xmachine_memory_A_MAX){
printf("Error: Buffer size of A agents in state change_direction will be exceeded by h_add_agents_A_change_direction\n");
exit(EXIT_FAILURE);
}
// Unpack data from AoS into the pre-existing SoA
h_unpack_agents_A_AoS_to_SoA(h_As_change_direction, agents, count);
// Copy data from the host SoA to the device SoA for the target state
copy_partial_xmachine_memory_A_hostToDevice(d_As_new, h_As_change_direction, count);
// Use append kernel (@optimisation - This can be replaced with a pointer swap if the target state list is empty)
hipOccupancyMaxPotentialBlockSizeVariableSMem(&minGridSize, &blockSize, append_A_Agents, no_sm, count);
gridSize = (count + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( append_A_Agents) , dim3(gridSize), dim3(blockSize), 0, stream1 , d_As_change_direction, d_As_new, h_xmachine_memory_A_change_direction_count, count);
gpuErrchkLaunch();
// Update the number of agents in this state.
h_xmachine_memory_A_change_direction_count += count;
gpuErrchk(hipMemcpyToSymbol(d_xmachine_memory_A_change_direction_count, &h_xmachine_memory_A_change_direction_count, sizeof(int)));
hipDeviceSynchronize();
// Reset host variable status flags for the relevant agent state list as the device state list has been modified.
h_As_change_direction_variable_id_data_iteration = 0;
h_As_change_direction_variable_x_data_iteration = 0;
h_As_change_direction_variable_y_data_iteration = 0;
h_As_change_direction_variable_z_data_iteration = 0;
h_As_change_direction_variable_fx_data_iteration = 0;
h_As_change_direction_variable_fy_data_iteration = 0;
h_As_change_direction_variable_fz_data_iteration = 0;
}
}
void h_add_agent_A_get_going_again(xmachine_memory_A* agent){
if (h_xmachine_memory_A_count + 1 > xmachine_memory_A_MAX){
printf("Error: Buffer size of A agents in state get_going_again will be exceeded by h_add_agent_A_get_going_again\n");
exit(EXIT_FAILURE);
}
int blockSize;
int minGridSize;
int gridSize;
unsigned int count = 1;
// Copy data from host struct to device SoA for target state
copy_single_xmachine_memory_A_hostToDevice(d_As_new, agent);
// Use append kernel (@optimisation - This can be replaced with a pointer swap if the target state list is empty)
hipOccupancyMaxPotentialBlockSizeVariableSMem(&minGridSize, &blockSize, append_A_Agents, no_sm, count);
gridSize = (count + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( append_A_Agents) , dim3(gridSize), dim3(blockSize), 0, stream1 , d_As_get_going_again, d_As_new, h_xmachine_memory_A_get_going_again_count, count);
gpuErrchkLaunch();
// Update the number of agents in this state.
h_xmachine_memory_A_get_going_again_count += count;
gpuErrchk(hipMemcpyToSymbol(d_xmachine_memory_A_get_going_again_count, &h_xmachine_memory_A_get_going_again_count, sizeof(int)));
hipDeviceSynchronize();
// Reset host variable status flags for the relevant agent state list as the device state list has been modified.
h_As_get_going_again_variable_id_data_iteration = 0;
h_As_get_going_again_variable_x_data_iteration = 0;
h_As_get_going_again_variable_y_data_iteration = 0;
h_As_get_going_again_variable_z_data_iteration = 0;
h_As_get_going_again_variable_fx_data_iteration = 0;
h_As_get_going_again_variable_fy_data_iteration = 0;
h_As_get_going_again_variable_fz_data_iteration = 0;
}
void h_add_agents_A_get_going_again(xmachine_memory_A** agents, unsigned int count){
if(count > 0){
int blockSize;
int minGridSize;
int gridSize;
if (h_xmachine_memory_A_count + count > xmachine_memory_A_MAX){
printf("Error: Buffer size of A agents in state get_going_again will be exceeded by h_add_agents_A_get_going_again\n");
exit(EXIT_FAILURE);
}
// Unpack data from AoS into the pre-existing SoA
h_unpack_agents_A_AoS_to_SoA(h_As_get_going_again, agents, count);
// Copy data from the host SoA to the device SoA for the target state
copy_partial_xmachine_memory_A_hostToDevice(d_As_new, h_As_get_going_again, count);
// Use append kernel (@optimisation - This can be replaced with a pointer swap if the target state list is empty)
hipOccupancyMaxPotentialBlockSizeVariableSMem(&minGridSize, &blockSize, append_A_Agents, no_sm, count);
gridSize = (count + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( append_A_Agents) , dim3(gridSize), dim3(blockSize), 0, stream1 , d_As_get_going_again, d_As_new, h_xmachine_memory_A_get_going_again_count, count);
gpuErrchkLaunch();
// Update the number of agents in this state.
h_xmachine_memory_A_get_going_again_count += count;
gpuErrchk(hipMemcpyToSymbol(d_xmachine_memory_A_get_going_again_count, &h_xmachine_memory_A_get_going_again_count, sizeof(int)));
hipDeviceSynchronize();
// Reset host variable status flags for the relevant agent state list as the device state list has been modified.
h_As_get_going_again_variable_id_data_iteration = 0;
h_As_get_going_again_variable_x_data_iteration = 0;
h_As_get_going_again_variable_y_data_iteration = 0;
h_As_get_going_again_variable_z_data_iteration = 0;
h_As_get_going_again_variable_fx_data_iteration = 0;
h_As_get_going_again_variable_fy_data_iteration = 0;
h_As_get_going_again_variable_fz_data_iteration = 0;
}
}
/* Analytics Functions */
int reduce_A_moving_id_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_moving->id), thrust::device_pointer_cast(d_As_moving->id) + h_xmachine_memory_A_moving_count);
}
int count_A_moving_id_variable(int count_value){
//count in default stream
return (int)thrust::count(thrust::device_pointer_cast(d_As_moving->id), thrust::device_pointer_cast(d_As_moving->id) + h_xmachine_memory_A_moving_count, count_value);
}
int min_A_moving_id_variable(){
//min in default stream
thrust::device_ptr<int> thrust_ptr = thrust::device_pointer_cast(d_As_moving->id);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
int max_A_moving_id_variable(){
//max in default stream
thrust::device_ptr<int> thrust_ptr = thrust::device_pointer_cast(d_As_moving->id);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_moving_x_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_moving->x), thrust::device_pointer_cast(d_As_moving->x) + h_xmachine_memory_A_moving_count);
}
float min_A_moving_x_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->x);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_moving_x_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->x);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_moving_y_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_moving->y), thrust::device_pointer_cast(d_As_moving->y) + h_xmachine_memory_A_moving_count);
}
float min_A_moving_y_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->y);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_moving_y_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->y);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_moving_z_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_moving->z), thrust::device_pointer_cast(d_As_moving->z) + h_xmachine_memory_A_moving_count);
}
float min_A_moving_z_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->z);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_moving_z_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->z);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_moving_fx_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_moving->fx), thrust::device_pointer_cast(d_As_moving->fx) + h_xmachine_memory_A_moving_count);
}
float min_A_moving_fx_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->fx);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_moving_fx_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->fx);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_moving_fy_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_moving->fy), thrust::device_pointer_cast(d_As_moving->fy) + h_xmachine_memory_A_moving_count);
}
float min_A_moving_fy_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->fy);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_moving_fy_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->fy);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_moving_fz_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_moving->fz), thrust::device_pointer_cast(d_As_moving->fz) + h_xmachine_memory_A_moving_count);
}
float min_A_moving_fz_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->fz);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_moving_fz_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->fz);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
int reduce_A_change_direction_id_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_change_direction->id), thrust::device_pointer_cast(d_As_change_direction->id) + h_xmachine_memory_A_change_direction_count);
}
int count_A_change_direction_id_variable(int count_value){
//count in default stream
return (int)thrust::count(thrust::device_pointer_cast(d_As_change_direction->id), thrust::device_pointer_cast(d_As_change_direction->id) + h_xmachine_memory_A_change_direction_count, count_value);
}
int min_A_change_direction_id_variable(){
//min in default stream
thrust::device_ptr<int> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->id);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
int max_A_change_direction_id_variable(){
//max in default stream
thrust::device_ptr<int> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->id);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_change_direction_x_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_change_direction->x), thrust::device_pointer_cast(d_As_change_direction->x) + h_xmachine_memory_A_change_direction_count);
}
float min_A_change_direction_x_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->x);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_change_direction_x_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->x);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_change_direction_y_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_change_direction->y), thrust::device_pointer_cast(d_As_change_direction->y) + h_xmachine_memory_A_change_direction_count);
}
float min_A_change_direction_y_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->y);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_change_direction_y_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->y);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_change_direction_z_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_change_direction->z), thrust::device_pointer_cast(d_As_change_direction->z) + h_xmachine_memory_A_change_direction_count);
}
float min_A_change_direction_z_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->z);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_change_direction_z_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->z);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_change_direction_fx_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_change_direction->fx), thrust::device_pointer_cast(d_As_change_direction->fx) + h_xmachine_memory_A_change_direction_count);
}
float min_A_change_direction_fx_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->fx);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_change_direction_fx_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->fx);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_change_direction_fy_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_change_direction->fy), thrust::device_pointer_cast(d_As_change_direction->fy) + h_xmachine_memory_A_change_direction_count);
}
float min_A_change_direction_fy_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->fy);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_change_direction_fy_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->fy);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_change_direction_fz_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_change_direction->fz), thrust::device_pointer_cast(d_As_change_direction->fz) + h_xmachine_memory_A_change_direction_count);
}
float min_A_change_direction_fz_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->fz);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_change_direction_fz_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->fz);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
int reduce_A_get_going_again_id_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_get_going_again->id), thrust::device_pointer_cast(d_As_get_going_again->id) + h_xmachine_memory_A_get_going_again_count);
}
int count_A_get_going_again_id_variable(int count_value){
//count in default stream
return (int)thrust::count(thrust::device_pointer_cast(d_As_get_going_again->id), thrust::device_pointer_cast(d_As_get_going_again->id) + h_xmachine_memory_A_get_going_again_count, count_value);
}
int min_A_get_going_again_id_variable(){
//min in default stream
thrust::device_ptr<int> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->id);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
int max_A_get_going_again_id_variable(){
//max in default stream
thrust::device_ptr<int> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->id);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_get_going_again_x_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_get_going_again->x), thrust::device_pointer_cast(d_As_get_going_again->x) + h_xmachine_memory_A_get_going_again_count);
}
float min_A_get_going_again_x_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->x);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_get_going_again_x_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->x);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_get_going_again_y_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_get_going_again->y), thrust::device_pointer_cast(d_As_get_going_again->y) + h_xmachine_memory_A_get_going_again_count);
}
float min_A_get_going_again_y_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->y);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_get_going_again_y_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->y);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_get_going_again_z_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_get_going_again->z), thrust::device_pointer_cast(d_As_get_going_again->z) + h_xmachine_memory_A_get_going_again_count);
}
float min_A_get_going_again_z_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->z);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_get_going_again_z_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->z);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_get_going_again_fx_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_get_going_again->fx), thrust::device_pointer_cast(d_As_get_going_again->fx) + h_xmachine_memory_A_get_going_again_count);
}
float min_A_get_going_again_fx_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->fx);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_get_going_again_fx_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->fx);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_get_going_again_fy_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_get_going_again->fy), thrust::device_pointer_cast(d_As_get_going_again->fy) + h_xmachine_memory_A_get_going_again_count);
}
float min_A_get_going_again_fy_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->fy);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_get_going_again_fy_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->fy);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_get_going_again_fz_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_get_going_again->fz), thrust::device_pointer_cast(d_As_get_going_again->fz) + h_xmachine_memory_A_get_going_again_count);
}
float min_A_get_going_again_fz_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->fz);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_get_going_again_fz_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->fz);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
/* Agent functions */
/* Shared memory size calculator for agent function */
int A_move_sm_size(int blockSize){
int sm_size;
sm_size = SM_START;
return sm_size;
}
/** A_move
* Agent function prototype for move function of A agent
*/
void A_move(hipStream_t &stream){
int sm_size;
int blockSize;
int minGridSize;
int gridSize;
int state_list_size;
dim3 g; //grid for agent func
dim3 b; //block for agent func
//CHECK THE CURRENT STATE LIST COUNT IS NOT EQUAL TO 0
if (h_xmachine_memory_A_moving_count == 0)
{
return;
}
//SET SM size to 0 and save state list size for occupancy calculations
sm_size = SM_START;
state_list_size = h_xmachine_memory_A_moving_count;
//******************************** AGENT FUNCTION CONDITION *********************
//THERE IS NOT A FUNCTION CONDITION
//currentState maps to working list
xmachine_memory_A_list* As_moving_temp = d_As;
d_As = d_As_moving;
d_As_moving = As_moving_temp;
//set working count to current state count
h_xmachine_memory_A_count = h_xmachine_memory_A_moving_count;
gpuErrchk( hipMemcpyToSymbol( d_xmachine_memory_A_count, &h_xmachine_memory_A_count, sizeof(int)));
//set current state count to 0
h_xmachine_memory_A_moving_count = 0;
gpuErrchk( hipMemcpyToSymbol( d_xmachine_memory_A_moving_count, &h_xmachine_memory_A_moving_count, sizeof(int)));
//******************************** AGENT FUNCTION *******************************
//calculate the grid block size for main agent function
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, GPUFLAME_move, A_move_sm_size, state_list_size);
gridSize = (state_list_size + blockSize - 1) / blockSize;
b.x = blockSize;
g.x = gridSize;
sm_size = A_move_sm_size(blockSize);
//MAIN XMACHINE FUNCTION CALL (move)
//Reallocate : false
//Input :
//Output :
//Agent Output :
hipLaunchKernelGGL(( GPUFLAME_move), dim3(g), dim3(b), sm_size, stream, d_As);
gpuErrchkLaunch();
//************************ MOVE AGENTS TO NEXT STATE ****************************
//check the working agents wont exceed the buffer size in the new state list
if (h_xmachine_memory_A_moving_count+h_xmachine_memory_A_count > xmachine_memory_A_MAX){
printf("Error: Buffer size of move agents in state moving will be exceeded moving working agents to next state in function move\n");
exit(EXIT_FAILURE);
}
//pointer swap the updated data
As_moving_temp = d_As;
d_As = d_As_moving;
d_As_moving = As_moving_temp;
//update new state agent size
h_xmachine_memory_A_moving_count += h_xmachine_memory_A_count;
gpuErrchk( hipMemcpyToSymbol( d_xmachine_memory_A_moving_count, &h_xmachine_memory_A_moving_count, sizeof(int)));
}
/* Shared memory size calculator for agent function */
int A_reverse_direction_sm_size(int blockSize){
int sm_size;
sm_size = SM_START;
return sm_size;
}
/** A_reverse_direction
* Agent function prototype for reverse_direction function of A agent
*/
void A_reverse_direction(hipStream_t &stream){
int sm_size;
int blockSize;
int minGridSize;
int gridSize;
int state_list_size;
dim3 g; //grid for agent func
dim3 b; //block for agent func
//CHECK THE CURRENT STATE LIST COUNT IS NOT EQUAL TO 0
if (h_xmachine_memory_A_moving_count == 0)
{
return;
}
//SET SM size to 0 and save state list size for occupancy calculations
sm_size = SM_START;
state_list_size = h_xmachine_memory_A_moving_count;
//******************************** AGENT FUNCTION CONDITION *********************
//THERE IS NOT A FUNCTION CONDITION
//currentState maps to working list
xmachine_memory_A_list* As_moving_temp = d_As;
d_As = d_As_moving;
d_As_moving = As_moving_temp;
//set working count to current state count
h_xmachine_memory_A_count = h_xmachine_memory_A_moving_count;
gpuErrchk( hipMemcpyToSymbol( d_xmachine_memory_A_count, &h_xmachine_memory_A_count, sizeof(int)));
//set current state count to 0
h_xmachine_memory_A_moving_count = 0;
gpuErrchk( hipMemcpyToSymbol( d_xmachine_memory_A_moving_count, &h_xmachine_memory_A_moving_count, sizeof(int)));
//******************************** AGENT FUNCTION *******************************
//calculate the grid block size for main agent function
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, GPUFLAME_reverse_direction, A_reverse_direction_sm_size, state_list_size);
gridSize = (state_list_size + blockSize - 1) / blockSize;
b.x = blockSize;
g.x = gridSize;
sm_size = A_reverse_direction_sm_size(blockSize);
//MAIN XMACHINE FUNCTION CALL (reverse_direction)
//Reallocate : false
//Input :
//Output :
//Agent Output :
hipLaunchKernelGGL(( GPUFLAME_reverse_direction), dim3(g), dim3(b), sm_size, stream, d_As);
gpuErrchkLaunch();
//************************ MOVE AGENTS TO NEXT STATE ****************************
//check the working agents wont exceed the buffer size in the new state list
if (h_xmachine_memory_A_change_direction_count+h_xmachine_memory_A_count > xmachine_memory_A_MAX){
printf("Error: Buffer size of reverse_direction agents in state change_direction will be exceeded moving working agents to next state in function reverse_direction\n");
exit(EXIT_FAILURE);
}
//append agents to next state list
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, append_A_Agents, no_sm, state_list_size);
gridSize = (state_list_size + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( append_A_Agents), dim3(gridSize), dim3(blockSize), 0, stream, d_As_change_direction, d_As, h_xmachine_memory_A_change_direction_count, h_xmachine_memory_A_count);
gpuErrchkLaunch();
//update new state agent size
h_xmachine_memory_A_change_direction_count += h_xmachine_memory_A_count;
gpuErrchk( hipMemcpyToSymbol( d_xmachine_memory_A_change_direction_count, &h_xmachine_memory_A_change_direction_count, sizeof(int)));
}
/* Shared memory size calculator for agent function */
int A_resume_movement_sm_size(int blockSize){
int sm_size;
sm_size = SM_START;
return sm_size;
}
/** A_resume_movement
* Agent function prototype for resume_movement function of A agent
*/
void A_resume_movement(hipStream_t &stream){
int sm_size;
int blockSize;
int minGridSize;
int gridSize;
int state_list_size;
dim3 g; //grid for agent func
dim3 b; //block for agent func
//CHECK THE CURRENT STATE LIST COUNT IS NOT EQUAL TO 0
if (h_xmachine_memory_A_change_direction_count == 0)
{
return;
}
//SET SM size to 0 and save state list size for occupancy calculations
sm_size = SM_START;
state_list_size = h_xmachine_memory_A_change_direction_count;
//******************************** AGENT FUNCTION CONDITION *********************
//THERE IS NOT A FUNCTION CONDITION
//currentState maps to working list
xmachine_memory_A_list* As_change_direction_temp = d_As;
d_As = d_As_change_direction;
d_As_change_direction = As_change_direction_temp;
//set working count to current state count
h_xmachine_memory_A_count = h_xmachine_memory_A_change_direction_count;
gpuErrchk( hipMemcpyToSymbol( d_xmachine_memory_A_count, &h_xmachine_memory_A_count, sizeof(int)));
//set current state count to 0
h_xmachine_memory_A_change_direction_count = 0;
gpuErrchk( hipMemcpyToSymbol( d_xmachine_memory_A_change_direction_count, &h_xmachine_memory_A_change_direction_count, sizeof(int)));
//******************************** AGENT FUNCTION *******************************
//calculate the grid block size for main agent function
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, GPUFLAME_resume_movement, A_resume_movement_sm_size, state_list_size);
gridSize = (state_list_size + blockSize - 1) / blockSize;
b.x = blockSize;
g.x = gridSize;
sm_size = A_resume_movement_sm_size(blockSize);
//MAIN XMACHINE FUNCTION CALL (resume_movement)
//Reallocate : false
//Input :
//Output :
//Agent Output :
hipLaunchKernelGGL(( GPUFLAME_resume_movement), dim3(g), dim3(b), sm_size, stream, d_As);
gpuErrchkLaunch();
//************************ MOVE AGENTS TO NEXT STATE ****************************
//check the working agents wont exceed the buffer size in the new state list
if (h_xmachine_memory_A_moving_count+h_xmachine_memory_A_count > xmachine_memory_A_MAX){
printf("Error: Buffer size of resume_movement agents in state moving will be exceeded moving working agents to next state in function resume_movement\n");
exit(EXIT_FAILURE);
}
//append agents to next state list
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, append_A_Agents, no_sm, state_list_size);
gridSize = (state_list_size + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( append_A_Agents), dim3(gridSize), dim3(blockSize), 0, stream, d_As_moving, d_As, h_xmachine_memory_A_moving_count, h_xmachine_memory_A_count);
gpuErrchkLaunch();
//update new state agent size
h_xmachine_memory_A_moving_count += h_xmachine_memory_A_count;
gpuErrchk( hipMemcpyToSymbol( d_xmachine_memory_A_moving_count, &h_xmachine_memory_A_moving_count, sizeof(int)));
}
extern void reset_A_moving_count()
{
h_xmachine_memory_A_moving_count = 0;
}
extern void reset_A_change_direction_count()
{
h_xmachine_memory_A_change_direction_count = 0;
}
extern void reset_A_get_going_again_count()
{
h_xmachine_memory_A_get_going_again_count = 0;
}
| 28b30f38e5b59f0367154b02a97ca5941c3718b4.cu |
/*
* FLAME GPU v 1.5.X for CUDA 9
* Copyright University of Sheffield.
* Original Author: Dr Paul Richmond (user contributions tracked on https://github.com/FLAMEGPU/FLAMEGPU)
* Contact: [email protected] (http://www.paulrichmond.staff.shef.ac.uk)
*
* University of Sheffield retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* University of Sheffield is strictly prohibited.
*
* For terms of licence agreement please attached licence or view licence
* on www.flamegpu.com website.
*
*/
//Disable internal thrust warnings about conversions
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning (disable : 4267)
#pragma warning (disable : 4244)
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
// includes
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cmath>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/extrema.h>
#include <thrust/system/cuda/execution_policy.h>
#include <cub/cub.cuh>
// include FLAME kernels
#include "FLAMEGPU_kernals.cu"
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
/* Error check function for safe CUDA API calling */
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/* Error check function for post CUDA Kernel calling */
#define gpuErrchkLaunch() { gpuLaunchAssert(__FILE__, __LINE__); }
inline void gpuLaunchAssert(const char *file, int line, bool abort=true)
{
gpuAssert( cudaPeekAtLastError(), file, line );
#ifdef _DEBUG
gpuAssert( cudaDeviceSynchronize(), file, line );
#endif
}
/* SM padding and offset variables */
int SM_START;
int PADDING;
unsigned int g_iterationNumber;
/* Agent Memory */
/* A Agent variables these lists are used in the agent function where as the other lists are used only outside the agent functions*/
xmachine_memory_A_list* d_As; /**< Pointer to agent list (population) on the device*/
xmachine_memory_A_list* d_As_swap; /**< Pointer to agent list swap on the device (used when killing agents)*/
xmachine_memory_A_list* d_As_new; /**< Pointer to new agent list on the device (used to hold new agents before they are appended to the population)*/
int h_xmachine_memory_A_count; /**< Agent population size counter */
uint * d_xmachine_memory_A_keys; /**< Agent sort identifiers keys*/
uint * d_xmachine_memory_A_values; /**< Agent sort identifiers value */
/* A state variables */
xmachine_memory_A_list* h_As_moving; /**< Pointer to agent list (population) on host*/
xmachine_memory_A_list* d_As_moving; /**< Pointer to agent list (population) on the device*/
int h_xmachine_memory_A_moving_count; /**< Agent population size counter */
/* A state variables */
xmachine_memory_A_list* h_As_change_direction; /**< Pointer to agent list (population) on host*/
xmachine_memory_A_list* d_As_change_direction; /**< Pointer to agent list (population) on the device*/
int h_xmachine_memory_A_change_direction_count; /**< Agent population size counter */
/* A state variables */
xmachine_memory_A_list* h_As_get_going_again; /**< Pointer to agent list (population) on host*/
xmachine_memory_A_list* d_As_get_going_again; /**< Pointer to agent list (population) on the device*/
int h_xmachine_memory_A_get_going_again_count; /**< Agent population size counter */
/* Variables to track the state of host copies of state lists, for the purposes of host agent data access.
* @future - if the host data is current it may be possible to avoid duplicating memcpy in xml output.
*/
unsigned int h_As_moving_variable_id_data_iteration;
unsigned int h_As_moving_variable_x_data_iteration;
unsigned int h_As_moving_variable_y_data_iteration;
unsigned int h_As_moving_variable_z_data_iteration;
unsigned int h_As_moving_variable_fx_data_iteration;
unsigned int h_As_moving_variable_fy_data_iteration;
unsigned int h_As_moving_variable_fz_data_iteration;
unsigned int h_As_change_direction_variable_id_data_iteration;
unsigned int h_As_change_direction_variable_x_data_iteration;
unsigned int h_As_change_direction_variable_y_data_iteration;
unsigned int h_As_change_direction_variable_z_data_iteration;
unsigned int h_As_change_direction_variable_fx_data_iteration;
unsigned int h_As_change_direction_variable_fy_data_iteration;
unsigned int h_As_change_direction_variable_fz_data_iteration;
unsigned int h_As_get_going_again_variable_id_data_iteration;
unsigned int h_As_get_going_again_variable_x_data_iteration;
unsigned int h_As_get_going_again_variable_y_data_iteration;
unsigned int h_As_get_going_again_variable_z_data_iteration;
unsigned int h_As_get_going_again_variable_fx_data_iteration;
unsigned int h_As_get_going_again_variable_fy_data_iteration;
unsigned int h_As_get_going_again_variable_fz_data_iteration;
/* Message Memory */
/* location Message variables */
xmachine_message_location_list* h_locations; /**< Pointer to message list on host*/
xmachine_message_location_list* d_locations; /**< Pointer to message list on device*/
xmachine_message_location_list* d_locations_swap; /**< Pointer to message swap list on device (used for holding optional messages)*/
/* Non partitioned and spatial partitioned message variables */
int h_message_location_count; /**< message list counter*/
int h_message_location_output_type; /**< message output type (single or optional)*/
/* CUDA Streams for function layers */
cudaStream_t stream1;
/* Device memory and sizes for CUB values */
void * d_temp_scan_storage_A;
size_t temp_scan_storage_bytes_A;
/*Global condition counts*/
/* RNG rand48 */
RNG_rand48* h_rand48; /**< Pointer to RNG_rand48 seed list on host*/
RNG_rand48* d_rand48; /**< Pointer to RNG_rand48 seed list on device*/
/* Early simulation exit*/
bool g_exit_early;
/* Cuda Event Timers for Instrumentation */
#if defined(INSTRUMENT_ITERATIONS) && INSTRUMENT_ITERATIONS
cudaEvent_t instrument_iteration_start, instrument_iteration_stop;
float instrument_iteration_milliseconds = 0.0f;
#endif
#if (defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS) || (defined(INSTRUMENT_INIT_FUNCTIONS) && INSTRUMENT_INIT_FUNCTIONS) || (defined(INSTRUMENT_STEP_FUNCTIONS) && INSTRUMENT_STEP_FUNCTIONS) || (defined(INSTRUMENT_EXIT_FUNCTIONS) && INSTRUMENT_EXIT_FUNCTIONS)
cudaEvent_t instrument_start, instrument_stop;
float instrument_milliseconds = 0.0f;
#endif
/* CUDA Parallel Primatives variables */
int scan_last_sum; /**< Indicates if the position (in message list) of last message*/
int scan_last_included; /**< Indicates if last sum value is included in the total sum count*/
/* Agent function prototypes */
/** A_move
* Agent function prototype for move function of A agent
*/
void A_move(cudaStream_t &stream);
/** A_reverse_direction
* Agent function prototype for reverse_direction function of A agent
*/
void A_reverse_direction(cudaStream_t &stream);
/** A_resume_movement
* Agent function prototype for resume_movement function of A agent
*/
void A_resume_movement(cudaStream_t &stream);
void setPaddingAndOffset()
{
PROFILE_SCOPED_RANGE("setPaddingAndOffset");
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
int x64_sys = 0;
// This function call returns 9999 for both major & minor fields, if no CUDA capable devices are present
if (deviceProp.major == 9999 && deviceProp.minor == 9999){
printf("Error: There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
}
//check if double is used and supported
#ifdef _DOUBLE_SUPPORT_REQUIRED_
printf("Simulation requires full precision double values\n");
if ((deviceProp.major < 2)&&(deviceProp.minor < 3)){
printf("Error: Hardware does not support full precision double values!\n");
exit(EXIT_FAILURE);
}
#endif
//check 32 or 64bit
x64_sys = (sizeof(void*)==8);
if (x64_sys)
{
printf("64Bit System Detected\n");
}
else
{
printf("32Bit System Detected\n");
}
SM_START = 0;
PADDING = 0;
//copy padding and offset to GPU
gpuErrchk(cudaMemcpyToSymbol( d_SM_START, &SM_START, sizeof(int)));
gpuErrchk(cudaMemcpyToSymbol( d_PADDING, &PADDING, sizeof(int)));
}
int is_sqr_pow2(int x){
int r = (int)pow(4, ceil(log(x)/log(4)));
return (r == x);
}
int lowest_sqr_pow2(int x){
int l;
//escape early if x is square power of 2
if (is_sqr_pow2(x))
return x;
//lower bound
l = (int)pow(4, floor(log(x)/log(4)));
return l;
}
/* Unary function required for cudaOccupancyMaxPotentialBlockSizeVariableSMem to avoid warnings */
int no_sm(int b){
return 0;
}
/* Unary function to return shared memory size for reorder message kernels */
int reorder_messages_sm_size(int blockSize)
{
return sizeof(unsigned int)*(blockSize+1);
}
/** getIterationNumber
* Get the iteration number (host)
* @return a 1 indexed value for the iteration number, which is incremented at the start of each simulation step.
* I.e. it is 0 on up until the first call to singleIteration()
*/
extern unsigned int getIterationNumber(){
return g_iterationNumber;
}
void initialise(char * inputfile){
PROFILE_SCOPED_RANGE("initialise");
//set the padding and offset values depending on architecture and OS
setPaddingAndOffset();
// Initialise some global variables
g_iterationNumber = 0;
g_exit_early = false;
// Initialise variables for tracking which iterations' data is accessible on the host.
h_As_moving_variable_id_data_iteration = 0;
h_As_moving_variable_x_data_iteration = 0;
h_As_moving_variable_y_data_iteration = 0;
h_As_moving_variable_z_data_iteration = 0;
h_As_moving_variable_fx_data_iteration = 0;
h_As_moving_variable_fy_data_iteration = 0;
h_As_moving_variable_fz_data_iteration = 0;
h_As_change_direction_variable_id_data_iteration = 0;
h_As_change_direction_variable_x_data_iteration = 0;
h_As_change_direction_variable_y_data_iteration = 0;
h_As_change_direction_variable_z_data_iteration = 0;
h_As_change_direction_variable_fx_data_iteration = 0;
h_As_change_direction_variable_fy_data_iteration = 0;
h_As_change_direction_variable_fz_data_iteration = 0;
h_As_get_going_again_variable_id_data_iteration = 0;
h_As_get_going_again_variable_x_data_iteration = 0;
h_As_get_going_again_variable_y_data_iteration = 0;
h_As_get_going_again_variable_z_data_iteration = 0;
h_As_get_going_again_variable_fx_data_iteration = 0;
h_As_get_going_again_variable_fy_data_iteration = 0;
h_As_get_going_again_variable_fz_data_iteration = 0;
printf("Allocating Host and Device memory\n");
PROFILE_PUSH_RANGE("allocate host");
/* Agent memory allocation (CPU) */
int xmachine_A_SoA_size = sizeof(xmachine_memory_A_list);
h_As_moving = (xmachine_memory_A_list*)malloc(xmachine_A_SoA_size);
h_As_change_direction = (xmachine_memory_A_list*)malloc(xmachine_A_SoA_size);
h_As_get_going_again = (xmachine_memory_A_list*)malloc(xmachine_A_SoA_size);
/* Message memory allocation (CPU) */
int message_location_SoA_size = sizeof(xmachine_message_location_list);
h_locations = (xmachine_message_location_list*)malloc(message_location_SoA_size);
//Exit if agent or message buffer sizes are to small for function outputs
/* Graph memory allocation (CPU) */
PROFILE_POP_RANGE(); //"allocate host"
//read initial states
readInitialStates(inputfile, h_As_moving, &h_xmachine_memory_A_moving_count);
// Read graphs from disk
PROFILE_PUSH_RANGE("allocate device");
/* A Agent memory allocation (GPU) */
gpuErrchk( cudaMalloc( (void**) &d_As, xmachine_A_SoA_size));
gpuErrchk( cudaMalloc( (void**) &d_As_swap, xmachine_A_SoA_size));
gpuErrchk( cudaMalloc( (void**) &d_As_new, xmachine_A_SoA_size));
//continuous agent sort identifiers
gpuErrchk( cudaMalloc( (void**) &d_xmachine_memory_A_keys, xmachine_memory_A_MAX* sizeof(uint)));
gpuErrchk( cudaMalloc( (void**) &d_xmachine_memory_A_values, xmachine_memory_A_MAX* sizeof(uint)));
/* moving memory allocation (GPU) */
gpuErrchk( cudaMalloc( (void**) &d_As_moving, xmachine_A_SoA_size));
gpuErrchk( cudaMemcpy( d_As_moving, h_As_moving, xmachine_A_SoA_size, cudaMemcpyHostToDevice));
/* change_direction memory allocation (GPU) */
gpuErrchk( cudaMalloc( (void**) &d_As_change_direction, xmachine_A_SoA_size));
gpuErrchk( cudaMemcpy( d_As_change_direction, h_As_change_direction, xmachine_A_SoA_size, cudaMemcpyHostToDevice));
/* get_going_again memory allocation (GPU) */
gpuErrchk( cudaMalloc( (void**) &d_As_get_going_again, xmachine_A_SoA_size));
gpuErrchk( cudaMemcpy( d_As_get_going_again, h_As_get_going_again, xmachine_A_SoA_size, cudaMemcpyHostToDevice));
/* location Message memory allocation (GPU) */
gpuErrchk( cudaMalloc( (void**) &d_locations, message_location_SoA_size));
gpuErrchk( cudaMalloc( (void**) &d_locations_swap, message_location_SoA_size));
gpuErrchk( cudaMemcpy( d_locations, h_locations, message_location_SoA_size, cudaMemcpyHostToDevice));
/* Allocate device memory for graphs */
PROFILE_POP_RANGE(); // "allocate device"
/* Calculate and allocate CUB temporary memory for exclusive scans */
d_temp_scan_storage_A = nullptr;
temp_scan_storage_bytes_A = 0;
cub::DeviceScan::ExclusiveSum(
d_temp_scan_storage_A,
temp_scan_storage_bytes_A,
(int*) nullptr,
(int*) nullptr,
xmachine_memory_A_MAX
);
gpuErrchk(cudaMalloc(&d_temp_scan_storage_A, temp_scan_storage_bytes_A));
/*Set global condition counts*/
/* RNG rand48 */
PROFILE_PUSH_RANGE("Initialse RNG_rand48");
int h_rand48_SoA_size = sizeof(RNG_rand48);
h_rand48 = (RNG_rand48*)malloc(h_rand48_SoA_size);
//allocate on GPU
gpuErrchk( cudaMalloc( (void**) &d_rand48, h_rand48_SoA_size));
// calculate strided iteration constants
static const unsigned long long a = 0x5DEECE66DLL, c = 0xB;
int seed = 123;
unsigned long long A, C;
A = 1LL; C = 0LL;
for (unsigned int i = 0; i < buffer_size_MAX; ++i) {
C += A*c;
A *= a;
}
h_rand48->A.x = A & 0xFFFFFFLL;
h_rand48->A.y = (A >> 24) & 0xFFFFFFLL;
h_rand48->C.x = C & 0xFFFFFFLL;
h_rand48->C.y = (C >> 24) & 0xFFFFFFLL;
// prepare first nThreads random numbers from seed
unsigned long long x = (((unsigned long long)seed) << 16) | 0x330E;
for (unsigned int i = 0; i < buffer_size_MAX; ++i) {
x = a*x + c;
h_rand48->seeds[i].x = x & 0xFFFFFFLL;
h_rand48->seeds[i].y = (x >> 24) & 0xFFFFFFLL;
}
//copy to device
gpuErrchk( cudaMemcpy( d_rand48, h_rand48, h_rand48_SoA_size, cudaMemcpyHostToDevice));
PROFILE_POP_RANGE();
/* Call all init functions */
/* Prepare cuda event timers for instrumentation */
#if defined(INSTRUMENT_ITERATIONS) && INSTRUMENT_ITERATIONS
cudaEventCreate(&instrument_iteration_start);
cudaEventCreate(&instrument_iteration_stop);
#endif
#if (defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS) || (defined(INSTRUMENT_INIT_FUNCTIONS) && INSTRUMENT_INIT_FUNCTIONS) || (defined(INSTRUMENT_STEP_FUNCTIONS) && INSTRUMENT_STEP_FUNCTIONS) || (defined(INSTRUMENT_EXIT_FUNCTIONS) && INSTRUMENT_EXIT_FUNCTIONS)
cudaEventCreate(&instrument_start);
cudaEventCreate(&instrument_stop);
#endif
/* Init CUDA Streams for function layers */
gpuErrchk(cudaStreamCreate(&stream1));
#if defined(OUTPUT_POPULATION_PER_ITERATION) && OUTPUT_POPULATION_PER_ITERATION
// Print the agent population size of all agents in all states
printf("Init agent_A_moving_count: %u\n",get_agent_A_moving_count());
printf("Init agent_A_change_direction_count: %u\n",get_agent_A_change_direction_count());
printf("Init agent_A_get_going_again_count: %u\n",get_agent_A_get_going_again_count());
#endif
}
void sort_As_moving(void (*generate_key_value_pairs)(unsigned int* keys, unsigned int* values, xmachine_memory_A_list* agents))
{
int blockSize;
int minGridSize;
int gridSize;
//generate sort keys
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, generate_key_value_pairs, no_sm, h_xmachine_memory_A_moving_count);
gridSize = (h_xmachine_memory_A_moving_count + blockSize - 1) / blockSize; // Round up according to array size
generate_key_value_pairs<<<gridSize, blockSize>>>(d_xmachine_memory_A_keys, d_xmachine_memory_A_values, d_As_moving);
gpuErrchkLaunch();
//updated Thrust sort
thrust::sort_by_key( thrust::device_pointer_cast(d_xmachine_memory_A_keys), thrust::device_pointer_cast(d_xmachine_memory_A_keys) + h_xmachine_memory_A_moving_count, thrust::device_pointer_cast(d_xmachine_memory_A_values));
gpuErrchkLaunch();
//reorder agents
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, reorder_A_agents, no_sm, h_xmachine_memory_A_moving_count);
gridSize = (h_xmachine_memory_A_moving_count + blockSize - 1) / blockSize; // Round up according to array size
reorder_A_agents<<<gridSize, blockSize>>>(d_xmachine_memory_A_values, d_As_moving, d_As_swap);
gpuErrchkLaunch();
//swap
xmachine_memory_A_list* d_As_temp = d_As_moving;
d_As_moving = d_As_swap;
d_As_swap = d_As_temp;
}
void sort_As_change_direction(void (*generate_key_value_pairs)(unsigned int* keys, unsigned int* values, xmachine_memory_A_list* agents))
{
int blockSize;
int minGridSize;
int gridSize;
//generate sort keys
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, generate_key_value_pairs, no_sm, h_xmachine_memory_A_change_direction_count);
gridSize = (h_xmachine_memory_A_change_direction_count + blockSize - 1) / blockSize; // Round up according to array size
generate_key_value_pairs<<<gridSize, blockSize>>>(d_xmachine_memory_A_keys, d_xmachine_memory_A_values, d_As_change_direction);
gpuErrchkLaunch();
//updated Thrust sort
thrust::sort_by_key( thrust::device_pointer_cast(d_xmachine_memory_A_keys), thrust::device_pointer_cast(d_xmachine_memory_A_keys) + h_xmachine_memory_A_change_direction_count, thrust::device_pointer_cast(d_xmachine_memory_A_values));
gpuErrchkLaunch();
//reorder agents
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, reorder_A_agents, no_sm, h_xmachine_memory_A_change_direction_count);
gridSize = (h_xmachine_memory_A_change_direction_count + blockSize - 1) / blockSize; // Round up according to array size
reorder_A_agents<<<gridSize, blockSize>>>(d_xmachine_memory_A_values, d_As_change_direction, d_As_swap);
gpuErrchkLaunch();
//swap
xmachine_memory_A_list* d_As_temp = d_As_change_direction;
d_As_change_direction = d_As_swap;
d_As_swap = d_As_temp;
}
void sort_As_get_going_again(void (*generate_key_value_pairs)(unsigned int* keys, unsigned int* values, xmachine_memory_A_list* agents))
{
int blockSize;
int minGridSize;
int gridSize;
//generate sort keys
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, generate_key_value_pairs, no_sm, h_xmachine_memory_A_get_going_again_count);
gridSize = (h_xmachine_memory_A_get_going_again_count + blockSize - 1) / blockSize; // Round up according to array size
generate_key_value_pairs<<<gridSize, blockSize>>>(d_xmachine_memory_A_keys, d_xmachine_memory_A_values, d_As_get_going_again);
gpuErrchkLaunch();
//updated Thrust sort
thrust::sort_by_key( thrust::device_pointer_cast(d_xmachine_memory_A_keys), thrust::device_pointer_cast(d_xmachine_memory_A_keys) + h_xmachine_memory_A_get_going_again_count, thrust::device_pointer_cast(d_xmachine_memory_A_values));
gpuErrchkLaunch();
//reorder agents
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, reorder_A_agents, no_sm, h_xmachine_memory_A_get_going_again_count);
gridSize = (h_xmachine_memory_A_get_going_again_count + blockSize - 1) / blockSize; // Round up according to array size
reorder_A_agents<<<gridSize, blockSize>>>(d_xmachine_memory_A_values, d_As_get_going_again, d_As_swap);
gpuErrchkLaunch();
//swap
xmachine_memory_A_list* d_As_temp = d_As_get_going_again;
d_As_get_going_again = d_As_swap;
d_As_swap = d_As_temp;
}
void cleanup(){
PROFILE_SCOPED_RANGE("cleanup");
/* Call all exit functions */
/* Agent data free*/
/* A Agent variables */
gpuErrchk(cudaFree(d_As));
gpuErrchk(cudaFree(d_As_swap));
gpuErrchk(cudaFree(d_As_new));
free( h_As_moving);
gpuErrchk(cudaFree(d_As_moving));
free( h_As_change_direction);
gpuErrchk(cudaFree(d_As_change_direction));
free( h_As_get_going_again);
gpuErrchk(cudaFree(d_As_get_going_again));
/* Message data free */
/* location Message variables */
free( h_locations);
gpuErrchk(cudaFree(d_locations));
gpuErrchk(cudaFree(d_locations_swap));
/* Free temporary CUB memory if required. */
if(d_temp_scan_storage_A != nullptr){
gpuErrchk(cudaFree(d_temp_scan_storage_A));
d_temp_scan_storage_A = nullptr;
temp_scan_storage_bytes_A = 0;
}
/* Graph data free */
/* CUDA Streams for function layers */
gpuErrchk(cudaStreamDestroy(stream1));
/* CUDA Event Timers for Instrumentation */
#if defined(INSTRUMENT_ITERATIONS) && INSTRUMENT_ITERATIONS
cudaEventDestroy(instrument_iteration_start);
cudaEventDestroy(instrument_iteration_stop);
#endif
#if (defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS) || (defined(INSTRUMENT_INIT_FUNCTIONS) && INSTRUMENT_INIT_FUNCTIONS) || (defined(INSTRUMENT_STEP_FUNCTIONS) && INSTRUMENT_STEP_FUNCTIONS) || (defined(INSTRUMENT_EXIT_FUNCTIONS) && INSTRUMENT_EXIT_FUNCTIONS)
cudaEventDestroy(instrument_start);
cudaEventDestroy(instrument_stop);
#endif
}
void singleIteration(){
PROFILE_SCOPED_RANGE("singleIteration");
#if defined(INSTRUMENT_ITERATIONS) && INSTRUMENT_ITERATIONS
cudaEventRecord(instrument_iteration_start);
#endif
// Increment the iteration number.
g_iterationNumber++;
/* set all non partitioned, spatial partitioned and On-Graph Partitioned message counts to 0*/
h_message_location_count = 0;
//upload to device constant
gpuErrchk(cudaMemcpyToSymbol( d_message_location_count, &h_message_location_count, sizeof(int)));
/* Call agent functions in order iterating through the layer functions */
/* Layer 1*/
#if defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS
cudaEventRecord(instrument_start);
#endif
PROFILE_PUSH_RANGE("A_move");
A_move(stream1);
PROFILE_POP_RANGE();
#if defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS
cudaEventRecord(instrument_stop);
cudaEventSynchronize(instrument_stop);
cudaEventElapsedTime(&instrument_milliseconds, instrument_start, instrument_stop);
printf("Instrumentation: A_move = %f (ms)\n", instrument_milliseconds);
#endif
cudaDeviceSynchronize();
/* Layer 2*/
#if defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS
cudaEventRecord(instrument_start);
#endif
PROFILE_PUSH_RANGE("A_reverse_direction");
A_reverse_direction(stream1);
PROFILE_POP_RANGE();
#if defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS
cudaEventRecord(instrument_stop);
cudaEventSynchronize(instrument_stop);
cudaEventElapsedTime(&instrument_milliseconds, instrument_start, instrument_stop);
printf("Instrumentation: A_reverse_direction = %f (ms)\n", instrument_milliseconds);
#endif
cudaDeviceSynchronize();
/* Layer 3*/
#if defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS
cudaEventRecord(instrument_start);
#endif
PROFILE_PUSH_RANGE("A_resume_movement");
A_resume_movement(stream1);
PROFILE_POP_RANGE();
#if defined(INSTRUMENT_AGENT_FUNCTIONS) && INSTRUMENT_AGENT_FUNCTIONS
cudaEventRecord(instrument_stop);
cudaEventSynchronize(instrument_stop);
cudaEventElapsedTime(&instrument_milliseconds, instrument_start, instrument_stop);
printf("Instrumentation: A_resume_movement = %f (ms)\n", instrument_milliseconds);
#endif
cudaDeviceSynchronize();
/* Call all step functions */
#if defined(OUTPUT_POPULATION_PER_ITERATION) && OUTPUT_POPULATION_PER_ITERATION
// Print the agent population size of all agents in all states
printf("agent_A_moving_count: %u\n",get_agent_A_moving_count());
printf("agent_A_change_direction_count: %u\n",get_agent_A_change_direction_count());
printf("agent_A_get_going_again_count: %u\n",get_agent_A_get_going_again_count());
#endif
#if defined(INSTRUMENT_ITERATIONS) && INSTRUMENT_ITERATIONS
cudaEventRecord(instrument_iteration_stop);
cudaEventSynchronize(instrument_iteration_stop);
cudaEventElapsedTime(&instrument_iteration_milliseconds, instrument_iteration_start, instrument_iteration_stop);
printf("Instrumentation: Iteration Time = %f (ms)\n", instrument_iteration_milliseconds);
#endif
}
/* finish whole simulation after this step */
void set_exit_early() {
g_exit_early = true;
}
bool get_exit_early() {
return g_exit_early;
}
/* Environment functions */
//host constant declaration
/* Agent data access functions*/
int get_agent_A_MAX_count(){
return xmachine_memory_A_MAX;
}
int get_agent_A_moving_count(){
//continuous agent
return h_xmachine_memory_A_moving_count;
}
xmachine_memory_A_list* get_device_A_moving_agents(){
return d_As_moving;
}
xmachine_memory_A_list* get_host_A_moving_agents(){
return h_As_moving;
}
int get_agent_A_change_direction_count(){
//continuous agent
return h_xmachine_memory_A_change_direction_count;
}
xmachine_memory_A_list* get_device_A_change_direction_agents(){
return d_As_change_direction;
}
xmachine_memory_A_list* get_host_A_change_direction_agents(){
return h_As_change_direction;
}
int get_agent_A_get_going_again_count(){
//continuous agent
return h_xmachine_memory_A_get_going_again_count;
}
xmachine_memory_A_list* get_device_A_get_going_again_agents(){
return d_As_get_going_again;
}
xmachine_memory_A_list* get_host_A_get_going_again_agents(){
return h_As_get_going_again;
}
/* Host based access of agent variables*/
/** int get_A_moving_variable_id(unsigned int index)
* Gets the value of the id variable of an A agent in the moving state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable id
*/
__host__ int get_A_moving_variable_id(unsigned int index){
unsigned int count = get_agent_A_moving_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_moving_variable_id_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_moving->id,
d_As_moving->id,
count * sizeof(int),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_moving_variable_id_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_moving->id[index];
} else {
fprintf(stderr, "Warning: Attempting to access id for the %u th member of A_moving. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_moving_variable_x(unsigned int index)
* Gets the value of the x variable of an A agent in the moving state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable x
*/
__host__ float get_A_moving_variable_x(unsigned int index){
unsigned int count = get_agent_A_moving_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_moving_variable_x_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_moving->x,
d_As_moving->x,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_moving_variable_x_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_moving->x[index];
} else {
fprintf(stderr, "Warning: Attempting to access x for the %u th member of A_moving. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_moving_variable_y(unsigned int index)
* Gets the value of the y variable of an A agent in the moving state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable y
*/
__host__ float get_A_moving_variable_y(unsigned int index){
unsigned int count = get_agent_A_moving_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_moving_variable_y_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_moving->y,
d_As_moving->y,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_moving_variable_y_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_moving->y[index];
} else {
fprintf(stderr, "Warning: Attempting to access y for the %u th member of A_moving. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_moving_variable_z(unsigned int index)
* Gets the value of the z variable of an A agent in the moving state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable z
*/
__host__ float get_A_moving_variable_z(unsigned int index){
unsigned int count = get_agent_A_moving_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_moving_variable_z_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_moving->z,
d_As_moving->z,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_moving_variable_z_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_moving->z[index];
} else {
fprintf(stderr, "Warning: Attempting to access z for the %u th member of A_moving. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_moving_variable_fx(unsigned int index)
* Gets the value of the fx variable of an A agent in the moving state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fx
*/
__host__ float get_A_moving_variable_fx(unsigned int index){
unsigned int count = get_agent_A_moving_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_moving_variable_fx_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_moving->fx,
d_As_moving->fx,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_moving_variable_fx_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_moving->fx[index];
} else {
fprintf(stderr, "Warning: Attempting to access fx for the %u th member of A_moving. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_moving_variable_fy(unsigned int index)
* Gets the value of the fy variable of an A agent in the moving state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fy
*/
__host__ float get_A_moving_variable_fy(unsigned int index){
unsigned int count = get_agent_A_moving_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_moving_variable_fy_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_moving->fy,
d_As_moving->fy,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_moving_variable_fy_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_moving->fy[index];
} else {
fprintf(stderr, "Warning: Attempting to access fy for the %u th member of A_moving. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_moving_variable_fz(unsigned int index)
* Gets the value of the fz variable of an A agent in the moving state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fz
*/
__host__ float get_A_moving_variable_fz(unsigned int index){
unsigned int count = get_agent_A_moving_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_moving_variable_fz_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_moving->fz,
d_As_moving->fz,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_moving_variable_fz_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_moving->fz[index];
} else {
fprintf(stderr, "Warning: Attempting to access fz for the %u th member of A_moving. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** int get_A_change_direction_variable_id(unsigned int index)
* Gets the value of the id variable of an A agent in the change_direction state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable id
*/
__host__ int get_A_change_direction_variable_id(unsigned int index){
unsigned int count = get_agent_A_change_direction_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_change_direction_variable_id_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_change_direction->id,
d_As_change_direction->id,
count * sizeof(int),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_change_direction_variable_id_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_change_direction->id[index];
} else {
fprintf(stderr, "Warning: Attempting to access id for the %u th member of A_change_direction. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_change_direction_variable_x(unsigned int index)
* Gets the value of the x variable of an A agent in the change_direction state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable x
*/
__host__ float get_A_change_direction_variable_x(unsigned int index){
unsigned int count = get_agent_A_change_direction_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_change_direction_variable_x_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_change_direction->x,
d_As_change_direction->x,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_change_direction_variable_x_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_change_direction->x[index];
} else {
fprintf(stderr, "Warning: Attempting to access x for the %u th member of A_change_direction. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_change_direction_variable_y(unsigned int index)
* Gets the value of the y variable of an A agent in the change_direction state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable y
*/
__host__ float get_A_change_direction_variable_y(unsigned int index){
unsigned int count = get_agent_A_change_direction_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_change_direction_variable_y_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_change_direction->y,
d_As_change_direction->y,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_change_direction_variable_y_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_change_direction->y[index];
} else {
fprintf(stderr, "Warning: Attempting to access y for the %u th member of A_change_direction. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_change_direction_variable_z(unsigned int index)
* Gets the value of the z variable of an A agent in the change_direction state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable z
*/
__host__ float get_A_change_direction_variable_z(unsigned int index){
unsigned int count = get_agent_A_change_direction_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_change_direction_variable_z_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_change_direction->z,
d_As_change_direction->z,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_change_direction_variable_z_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_change_direction->z[index];
} else {
fprintf(stderr, "Warning: Attempting to access z for the %u th member of A_change_direction. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_change_direction_variable_fx(unsigned int index)
* Gets the value of the fx variable of an A agent in the change_direction state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fx
*/
__host__ float get_A_change_direction_variable_fx(unsigned int index){
unsigned int count = get_agent_A_change_direction_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_change_direction_variable_fx_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_change_direction->fx,
d_As_change_direction->fx,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_change_direction_variable_fx_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_change_direction->fx[index];
} else {
fprintf(stderr, "Warning: Attempting to access fx for the %u th member of A_change_direction. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_change_direction_variable_fy(unsigned int index)
* Gets the value of the fy variable of an A agent in the change_direction state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fy
*/
__host__ float get_A_change_direction_variable_fy(unsigned int index){
unsigned int count = get_agent_A_change_direction_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_change_direction_variable_fy_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_change_direction->fy,
d_As_change_direction->fy,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_change_direction_variable_fy_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_change_direction->fy[index];
} else {
fprintf(stderr, "Warning: Attempting to access fy for the %u th member of A_change_direction. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_change_direction_variable_fz(unsigned int index)
* Gets the value of the fz variable of an A agent in the change_direction state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fz
*/
__host__ float get_A_change_direction_variable_fz(unsigned int index){
unsigned int count = get_agent_A_change_direction_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_change_direction_variable_fz_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_change_direction->fz,
d_As_change_direction->fz,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_change_direction_variable_fz_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_change_direction->fz[index];
} else {
fprintf(stderr, "Warning: Attempting to access fz for the %u th member of A_change_direction. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** int get_A_get_going_again_variable_id(unsigned int index)
* Gets the value of the id variable of an A agent in the get_going_again state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable id
*/
__host__ int get_A_get_going_again_variable_id(unsigned int index){
unsigned int count = get_agent_A_get_going_again_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_get_going_again_variable_id_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_get_going_again->id,
d_As_get_going_again->id,
count * sizeof(int),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_get_going_again_variable_id_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_get_going_again->id[index];
} else {
fprintf(stderr, "Warning: Attempting to access id for the %u th member of A_get_going_again. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_get_going_again_variable_x(unsigned int index)
* Gets the value of the x variable of an A agent in the get_going_again state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable x
*/
__host__ float get_A_get_going_again_variable_x(unsigned int index){
unsigned int count = get_agent_A_get_going_again_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_get_going_again_variable_x_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_get_going_again->x,
d_As_get_going_again->x,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_get_going_again_variable_x_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_get_going_again->x[index];
} else {
fprintf(stderr, "Warning: Attempting to access x for the %u th member of A_get_going_again. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_get_going_again_variable_y(unsigned int index)
* Gets the value of the y variable of an A agent in the get_going_again state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable y
*/
__host__ float get_A_get_going_again_variable_y(unsigned int index){
unsigned int count = get_agent_A_get_going_again_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_get_going_again_variable_y_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_get_going_again->y,
d_As_get_going_again->y,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_get_going_again_variable_y_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_get_going_again->y[index];
} else {
fprintf(stderr, "Warning: Attempting to access y for the %u th member of A_get_going_again. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_get_going_again_variable_z(unsigned int index)
* Gets the value of the z variable of an A agent in the get_going_again state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable z
*/
__host__ float get_A_get_going_again_variable_z(unsigned int index){
unsigned int count = get_agent_A_get_going_again_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_get_going_again_variable_z_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_get_going_again->z,
d_As_get_going_again->z,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_get_going_again_variable_z_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_get_going_again->z[index];
} else {
fprintf(stderr, "Warning: Attempting to access z for the %u th member of A_get_going_again. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_get_going_again_variable_fx(unsigned int index)
* Gets the value of the fx variable of an A agent in the get_going_again state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fx
*/
__host__ float get_A_get_going_again_variable_fx(unsigned int index){
unsigned int count = get_agent_A_get_going_again_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_get_going_again_variable_fx_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_get_going_again->fx,
d_As_get_going_again->fx,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_get_going_again_variable_fx_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_get_going_again->fx[index];
} else {
fprintf(stderr, "Warning: Attempting to access fx for the %u th member of A_get_going_again. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_get_going_again_variable_fy(unsigned int index)
* Gets the value of the fy variable of an A agent in the get_going_again state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fy
*/
__host__ float get_A_get_going_again_variable_fy(unsigned int index){
unsigned int count = get_agent_A_get_going_again_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_get_going_again_variable_fy_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_get_going_again->fy,
d_As_get_going_again->fy,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_get_going_again_variable_fy_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_get_going_again->fy[index];
} else {
fprintf(stderr, "Warning: Attempting to access fy for the %u th member of A_get_going_again. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/** float get_A_get_going_again_variable_fz(unsigned int index)
* Gets the value of the fz variable of an A agent in the get_going_again state on the host.
* If the data is not currently on the host, a memcpy of the data of all agents in that state list will be issued, via a global.
* This has a potentially significant performance impact if used improperly.
* @param index the index of the agent within the list.
* @return value of agent variable fz
*/
__host__ float get_A_get_going_again_variable_fz(unsigned int index){
unsigned int count = get_agent_A_get_going_again_count();
unsigned int currentIteration = getIterationNumber();
// If the index is within bounds - no need to check >= 0 due to unsigned.
if(count > 0 && index < count ){
// If necessary, copy agent data from the device to the host in the default stream
if(h_As_get_going_again_variable_fz_data_iteration != currentIteration){
gpuErrchk(
cudaMemcpy(
h_As_get_going_again->fz,
d_As_get_going_again->fz,
count * sizeof(float),
cudaMemcpyDeviceToHost
)
);
// Update some global value indicating what data is currently present in that host array.
h_As_get_going_again_variable_fz_data_iteration = currentIteration;
}
// Return the value of the index-th element of the relevant host array.
return h_As_get_going_again->fz[index];
} else {
fprintf(stderr, "Warning: Attempting to access fz for the %u th member of A_get_going_again. count is %u at iteration %u\n", index, count, currentIteration);
// Otherwise we return a default value
return 0;
}
}
/* Host based agent creation functions */
// These are only available for continuous agents.
/* copy_single_xmachine_memory_A_hostToDevice
* Private function to copy a host agent struct into a device SoA agent list.
* @param d_dst destination agent state list
* @param h_agent agent struct
*/
void copy_single_xmachine_memory_A_hostToDevice(xmachine_memory_A_list * d_dst, xmachine_memory_A * h_agent){
gpuErrchk(cudaMemcpy(d_dst->id, &h_agent->id, sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_dst->x, &h_agent->x, sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_dst->y, &h_agent->y, sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_dst->z, &h_agent->z, sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_dst->fx, &h_agent->fx, sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_dst->fy, &h_agent->fy, sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_dst->fz, &h_agent->fz, sizeof(float), cudaMemcpyHostToDevice));
}
/*
* Private function to copy some elements from a host based struct of arrays to a device based struct of arrays for a single agent state.
* Individual copies of `count` elements are performed for each agent variable or each component of agent array variables, to avoid wasted data transfer.
* There will be a point at which a single cudaMemcpy will outperform many smaller memcpys, however host based agent creation should typically only populate a fraction of the maximum buffer size, so this should be more efficient.
* @optimisation - experimentally find the proportion at which transferring the whole SoA would be better and incorporate this. The same will apply to agent variable arrays.
*
* @param d_dst device destination SoA
* @oaram h_src host source SoA
* @param count the number of agents to transfer data for
*/
void copy_partial_xmachine_memory_A_hostToDevice(xmachine_memory_A_list * d_dst, xmachine_memory_A_list * h_src, unsigned int count){
// Only copy elements if there is data to move.
if (count > 0){
gpuErrchk(cudaMemcpy(d_dst->id, h_src->id, count * sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_dst->x, h_src->x, count * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_dst->y, h_src->y, count * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_dst->z, h_src->z, count * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_dst->fx, h_src->fx, count * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_dst->fy, h_src->fy, count * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_dst->fz, h_src->fz, count * sizeof(float), cudaMemcpyHostToDevice));
}
}
xmachine_memory_A* h_allocate_agent_A(){
xmachine_memory_A* agent = (xmachine_memory_A*)malloc(sizeof(xmachine_memory_A));
// Memset the whole agent strcuture
memset(agent, 0, sizeof(xmachine_memory_A));
return agent;
}
void h_free_agent_A(xmachine_memory_A** agent){
free((*agent));
(*agent) = NULL;
}
xmachine_memory_A** h_allocate_agent_A_array(unsigned int count){
xmachine_memory_A ** agents = (xmachine_memory_A**)malloc(count * sizeof(xmachine_memory_A*));
for (unsigned int i = 0; i < count; i++) {
agents[i] = h_allocate_agent_A();
}
return agents;
}
void h_free_agent_A_array(xmachine_memory_A*** agents, unsigned int count){
for (unsigned int i = 0; i < count; i++) {
h_free_agent_A(&((*agents)[i]));
}
free((*agents));
(*agents) = NULL;
}
void h_unpack_agents_A_AoS_to_SoA(xmachine_memory_A_list * dst, xmachine_memory_A** src, unsigned int count){
if(count > 0){
for(unsigned int i = 0; i < count; i++){
dst->id[i] = src[i]->id;
dst->x[i] = src[i]->x;
dst->y[i] = src[i]->y;
dst->z[i] = src[i]->z;
dst->fx[i] = src[i]->fx;
dst->fy[i] = src[i]->fy;
dst->fz[i] = src[i]->fz;
}
}
}
void h_add_agent_A_moving(xmachine_memory_A* agent){
if (h_xmachine_memory_A_count + 1 > xmachine_memory_A_MAX){
printf("Error: Buffer size of A agents in state moving will be exceeded by h_add_agent_A_moving\n");
exit(EXIT_FAILURE);
}
int blockSize;
int minGridSize;
int gridSize;
unsigned int count = 1;
// Copy data from host struct to device SoA for target state
copy_single_xmachine_memory_A_hostToDevice(d_As_new, agent);
// Use append kernel (@optimisation - This can be replaced with a pointer swap if the target state list is empty)
cudaOccupancyMaxPotentialBlockSizeVariableSMem(&minGridSize, &blockSize, append_A_Agents, no_sm, count);
gridSize = (count + blockSize - 1) / blockSize;
append_A_Agents <<<gridSize, blockSize, 0, stream1 >>>(d_As_moving, d_As_new, h_xmachine_memory_A_moving_count, count);
gpuErrchkLaunch();
// Update the number of agents in this state.
h_xmachine_memory_A_moving_count += count;
gpuErrchk(cudaMemcpyToSymbol(d_xmachine_memory_A_moving_count, &h_xmachine_memory_A_moving_count, sizeof(int)));
cudaDeviceSynchronize();
// Reset host variable status flags for the relevant agent state list as the device state list has been modified.
h_As_moving_variable_id_data_iteration = 0;
h_As_moving_variable_x_data_iteration = 0;
h_As_moving_variable_y_data_iteration = 0;
h_As_moving_variable_z_data_iteration = 0;
h_As_moving_variable_fx_data_iteration = 0;
h_As_moving_variable_fy_data_iteration = 0;
h_As_moving_variable_fz_data_iteration = 0;
}
void h_add_agents_A_moving(xmachine_memory_A** agents, unsigned int count){
if(count > 0){
int blockSize;
int minGridSize;
int gridSize;
if (h_xmachine_memory_A_count + count > xmachine_memory_A_MAX){
printf("Error: Buffer size of A agents in state moving will be exceeded by h_add_agents_A_moving\n");
exit(EXIT_FAILURE);
}
// Unpack data from AoS into the pre-existing SoA
h_unpack_agents_A_AoS_to_SoA(h_As_moving, agents, count);
// Copy data from the host SoA to the device SoA for the target state
copy_partial_xmachine_memory_A_hostToDevice(d_As_new, h_As_moving, count);
// Use append kernel (@optimisation - This can be replaced with a pointer swap if the target state list is empty)
cudaOccupancyMaxPotentialBlockSizeVariableSMem(&minGridSize, &blockSize, append_A_Agents, no_sm, count);
gridSize = (count + blockSize - 1) / blockSize;
append_A_Agents <<<gridSize, blockSize, 0, stream1 >>>(d_As_moving, d_As_new, h_xmachine_memory_A_moving_count, count);
gpuErrchkLaunch();
// Update the number of agents in this state.
h_xmachine_memory_A_moving_count += count;
gpuErrchk(cudaMemcpyToSymbol(d_xmachine_memory_A_moving_count, &h_xmachine_memory_A_moving_count, sizeof(int)));
cudaDeviceSynchronize();
// Reset host variable status flags for the relevant agent state list as the device state list has been modified.
h_As_moving_variable_id_data_iteration = 0;
h_As_moving_variable_x_data_iteration = 0;
h_As_moving_variable_y_data_iteration = 0;
h_As_moving_variable_z_data_iteration = 0;
h_As_moving_variable_fx_data_iteration = 0;
h_As_moving_variable_fy_data_iteration = 0;
h_As_moving_variable_fz_data_iteration = 0;
}
}
void h_add_agent_A_change_direction(xmachine_memory_A* agent){
if (h_xmachine_memory_A_count + 1 > xmachine_memory_A_MAX){
printf("Error: Buffer size of A agents in state change_direction will be exceeded by h_add_agent_A_change_direction\n");
exit(EXIT_FAILURE);
}
int blockSize;
int minGridSize;
int gridSize;
unsigned int count = 1;
// Copy data from host struct to device SoA for target state
copy_single_xmachine_memory_A_hostToDevice(d_As_new, agent);
// Use append kernel (@optimisation - This can be replaced with a pointer swap if the target state list is empty)
cudaOccupancyMaxPotentialBlockSizeVariableSMem(&minGridSize, &blockSize, append_A_Agents, no_sm, count);
gridSize = (count + blockSize - 1) / blockSize;
append_A_Agents <<<gridSize, blockSize, 0, stream1 >>>(d_As_change_direction, d_As_new, h_xmachine_memory_A_change_direction_count, count);
gpuErrchkLaunch();
// Update the number of agents in this state.
h_xmachine_memory_A_change_direction_count += count;
gpuErrchk(cudaMemcpyToSymbol(d_xmachine_memory_A_change_direction_count, &h_xmachine_memory_A_change_direction_count, sizeof(int)));
cudaDeviceSynchronize();
// Reset host variable status flags for the relevant agent state list as the device state list has been modified.
h_As_change_direction_variable_id_data_iteration = 0;
h_As_change_direction_variable_x_data_iteration = 0;
h_As_change_direction_variable_y_data_iteration = 0;
h_As_change_direction_variable_z_data_iteration = 0;
h_As_change_direction_variable_fx_data_iteration = 0;
h_As_change_direction_variable_fy_data_iteration = 0;
h_As_change_direction_variable_fz_data_iteration = 0;
}
void h_add_agents_A_change_direction(xmachine_memory_A** agents, unsigned int count){
if(count > 0){
int blockSize;
int minGridSize;
int gridSize;
if (h_xmachine_memory_A_count + count > xmachine_memory_A_MAX){
printf("Error: Buffer size of A agents in state change_direction will be exceeded by h_add_agents_A_change_direction\n");
exit(EXIT_FAILURE);
}
// Unpack data from AoS into the pre-existing SoA
h_unpack_agents_A_AoS_to_SoA(h_As_change_direction, agents, count);
// Copy data from the host SoA to the device SoA for the target state
copy_partial_xmachine_memory_A_hostToDevice(d_As_new, h_As_change_direction, count);
// Use append kernel (@optimisation - This can be replaced with a pointer swap if the target state list is empty)
cudaOccupancyMaxPotentialBlockSizeVariableSMem(&minGridSize, &blockSize, append_A_Agents, no_sm, count);
gridSize = (count + blockSize - 1) / blockSize;
append_A_Agents <<<gridSize, blockSize, 0, stream1 >>>(d_As_change_direction, d_As_new, h_xmachine_memory_A_change_direction_count, count);
gpuErrchkLaunch();
// Update the number of agents in this state.
h_xmachine_memory_A_change_direction_count += count;
gpuErrchk(cudaMemcpyToSymbol(d_xmachine_memory_A_change_direction_count, &h_xmachine_memory_A_change_direction_count, sizeof(int)));
cudaDeviceSynchronize();
// Reset host variable status flags for the relevant agent state list as the device state list has been modified.
h_As_change_direction_variable_id_data_iteration = 0;
h_As_change_direction_variable_x_data_iteration = 0;
h_As_change_direction_variable_y_data_iteration = 0;
h_As_change_direction_variable_z_data_iteration = 0;
h_As_change_direction_variable_fx_data_iteration = 0;
h_As_change_direction_variable_fy_data_iteration = 0;
h_As_change_direction_variable_fz_data_iteration = 0;
}
}
void h_add_agent_A_get_going_again(xmachine_memory_A* agent){
if (h_xmachine_memory_A_count + 1 > xmachine_memory_A_MAX){
printf("Error: Buffer size of A agents in state get_going_again will be exceeded by h_add_agent_A_get_going_again\n");
exit(EXIT_FAILURE);
}
int blockSize;
int minGridSize;
int gridSize;
unsigned int count = 1;
// Copy data from host struct to device SoA for target state
copy_single_xmachine_memory_A_hostToDevice(d_As_new, agent);
// Use append kernel (@optimisation - This can be replaced with a pointer swap if the target state list is empty)
cudaOccupancyMaxPotentialBlockSizeVariableSMem(&minGridSize, &blockSize, append_A_Agents, no_sm, count);
gridSize = (count + blockSize - 1) / blockSize;
append_A_Agents <<<gridSize, blockSize, 0, stream1 >>>(d_As_get_going_again, d_As_new, h_xmachine_memory_A_get_going_again_count, count);
gpuErrchkLaunch();
// Update the number of agents in this state.
h_xmachine_memory_A_get_going_again_count += count;
gpuErrchk(cudaMemcpyToSymbol(d_xmachine_memory_A_get_going_again_count, &h_xmachine_memory_A_get_going_again_count, sizeof(int)));
cudaDeviceSynchronize();
// Reset host variable status flags for the relevant agent state list as the device state list has been modified.
h_As_get_going_again_variable_id_data_iteration = 0;
h_As_get_going_again_variable_x_data_iteration = 0;
h_As_get_going_again_variable_y_data_iteration = 0;
h_As_get_going_again_variable_z_data_iteration = 0;
h_As_get_going_again_variable_fx_data_iteration = 0;
h_As_get_going_again_variable_fy_data_iteration = 0;
h_As_get_going_again_variable_fz_data_iteration = 0;
}
void h_add_agents_A_get_going_again(xmachine_memory_A** agents, unsigned int count){
if(count > 0){
int blockSize;
int minGridSize;
int gridSize;
if (h_xmachine_memory_A_count + count > xmachine_memory_A_MAX){
printf("Error: Buffer size of A agents in state get_going_again will be exceeded by h_add_agents_A_get_going_again\n");
exit(EXIT_FAILURE);
}
// Unpack data from AoS into the pre-existing SoA
h_unpack_agents_A_AoS_to_SoA(h_As_get_going_again, agents, count);
// Copy data from the host SoA to the device SoA for the target state
copy_partial_xmachine_memory_A_hostToDevice(d_As_new, h_As_get_going_again, count);
// Use append kernel (@optimisation - This can be replaced with a pointer swap if the target state list is empty)
cudaOccupancyMaxPotentialBlockSizeVariableSMem(&minGridSize, &blockSize, append_A_Agents, no_sm, count);
gridSize = (count + blockSize - 1) / blockSize;
append_A_Agents <<<gridSize, blockSize, 0, stream1 >>>(d_As_get_going_again, d_As_new, h_xmachine_memory_A_get_going_again_count, count);
gpuErrchkLaunch();
// Update the number of agents in this state.
h_xmachine_memory_A_get_going_again_count += count;
gpuErrchk(cudaMemcpyToSymbol(d_xmachine_memory_A_get_going_again_count, &h_xmachine_memory_A_get_going_again_count, sizeof(int)));
cudaDeviceSynchronize();
// Reset host variable status flags for the relevant agent state list as the device state list has been modified.
h_As_get_going_again_variable_id_data_iteration = 0;
h_As_get_going_again_variable_x_data_iteration = 0;
h_As_get_going_again_variable_y_data_iteration = 0;
h_As_get_going_again_variable_z_data_iteration = 0;
h_As_get_going_again_variable_fx_data_iteration = 0;
h_As_get_going_again_variable_fy_data_iteration = 0;
h_As_get_going_again_variable_fz_data_iteration = 0;
}
}
/* Analytics Functions */
int reduce_A_moving_id_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_moving->id), thrust::device_pointer_cast(d_As_moving->id) + h_xmachine_memory_A_moving_count);
}
int count_A_moving_id_variable(int count_value){
//count in default stream
return (int)thrust::count(thrust::device_pointer_cast(d_As_moving->id), thrust::device_pointer_cast(d_As_moving->id) + h_xmachine_memory_A_moving_count, count_value);
}
int min_A_moving_id_variable(){
//min in default stream
thrust::device_ptr<int> thrust_ptr = thrust::device_pointer_cast(d_As_moving->id);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
int max_A_moving_id_variable(){
//max in default stream
thrust::device_ptr<int> thrust_ptr = thrust::device_pointer_cast(d_As_moving->id);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_moving_x_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_moving->x), thrust::device_pointer_cast(d_As_moving->x) + h_xmachine_memory_A_moving_count);
}
float min_A_moving_x_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->x);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_moving_x_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->x);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_moving_y_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_moving->y), thrust::device_pointer_cast(d_As_moving->y) + h_xmachine_memory_A_moving_count);
}
float min_A_moving_y_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->y);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_moving_y_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->y);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_moving_z_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_moving->z), thrust::device_pointer_cast(d_As_moving->z) + h_xmachine_memory_A_moving_count);
}
float min_A_moving_z_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->z);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_moving_z_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->z);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_moving_fx_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_moving->fx), thrust::device_pointer_cast(d_As_moving->fx) + h_xmachine_memory_A_moving_count);
}
float min_A_moving_fx_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->fx);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_moving_fx_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->fx);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_moving_fy_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_moving->fy), thrust::device_pointer_cast(d_As_moving->fy) + h_xmachine_memory_A_moving_count);
}
float min_A_moving_fy_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->fy);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_moving_fy_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->fy);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_moving_fz_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_moving->fz), thrust::device_pointer_cast(d_As_moving->fz) + h_xmachine_memory_A_moving_count);
}
float min_A_moving_fz_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->fz);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_moving_fz_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_moving->fz);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_moving_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
int reduce_A_change_direction_id_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_change_direction->id), thrust::device_pointer_cast(d_As_change_direction->id) + h_xmachine_memory_A_change_direction_count);
}
int count_A_change_direction_id_variable(int count_value){
//count in default stream
return (int)thrust::count(thrust::device_pointer_cast(d_As_change_direction->id), thrust::device_pointer_cast(d_As_change_direction->id) + h_xmachine_memory_A_change_direction_count, count_value);
}
int min_A_change_direction_id_variable(){
//min in default stream
thrust::device_ptr<int> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->id);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
int max_A_change_direction_id_variable(){
//max in default stream
thrust::device_ptr<int> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->id);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_change_direction_x_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_change_direction->x), thrust::device_pointer_cast(d_As_change_direction->x) + h_xmachine_memory_A_change_direction_count);
}
float min_A_change_direction_x_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->x);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_change_direction_x_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->x);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_change_direction_y_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_change_direction->y), thrust::device_pointer_cast(d_As_change_direction->y) + h_xmachine_memory_A_change_direction_count);
}
float min_A_change_direction_y_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->y);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_change_direction_y_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->y);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_change_direction_z_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_change_direction->z), thrust::device_pointer_cast(d_As_change_direction->z) + h_xmachine_memory_A_change_direction_count);
}
float min_A_change_direction_z_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->z);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_change_direction_z_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->z);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_change_direction_fx_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_change_direction->fx), thrust::device_pointer_cast(d_As_change_direction->fx) + h_xmachine_memory_A_change_direction_count);
}
float min_A_change_direction_fx_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->fx);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_change_direction_fx_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->fx);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_change_direction_fy_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_change_direction->fy), thrust::device_pointer_cast(d_As_change_direction->fy) + h_xmachine_memory_A_change_direction_count);
}
float min_A_change_direction_fy_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->fy);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_change_direction_fy_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->fy);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_change_direction_fz_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_change_direction->fz), thrust::device_pointer_cast(d_As_change_direction->fz) + h_xmachine_memory_A_change_direction_count);
}
float min_A_change_direction_fz_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->fz);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_change_direction_fz_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_change_direction->fz);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_change_direction_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
int reduce_A_get_going_again_id_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_get_going_again->id), thrust::device_pointer_cast(d_As_get_going_again->id) + h_xmachine_memory_A_get_going_again_count);
}
int count_A_get_going_again_id_variable(int count_value){
//count in default stream
return (int)thrust::count(thrust::device_pointer_cast(d_As_get_going_again->id), thrust::device_pointer_cast(d_As_get_going_again->id) + h_xmachine_memory_A_get_going_again_count, count_value);
}
int min_A_get_going_again_id_variable(){
//min in default stream
thrust::device_ptr<int> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->id);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
int max_A_get_going_again_id_variable(){
//max in default stream
thrust::device_ptr<int> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->id);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_get_going_again_x_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_get_going_again->x), thrust::device_pointer_cast(d_As_get_going_again->x) + h_xmachine_memory_A_get_going_again_count);
}
float min_A_get_going_again_x_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->x);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_get_going_again_x_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->x);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_get_going_again_y_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_get_going_again->y), thrust::device_pointer_cast(d_As_get_going_again->y) + h_xmachine_memory_A_get_going_again_count);
}
float min_A_get_going_again_y_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->y);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_get_going_again_y_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->y);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_get_going_again_z_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_get_going_again->z), thrust::device_pointer_cast(d_As_get_going_again->z) + h_xmachine_memory_A_get_going_again_count);
}
float min_A_get_going_again_z_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->z);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_get_going_again_z_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->z);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_get_going_again_fx_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_get_going_again->fx), thrust::device_pointer_cast(d_As_get_going_again->fx) + h_xmachine_memory_A_get_going_again_count);
}
float min_A_get_going_again_fx_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->fx);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_get_going_again_fx_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->fx);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_get_going_again_fy_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_get_going_again->fy), thrust::device_pointer_cast(d_As_get_going_again->fy) + h_xmachine_memory_A_get_going_again_count);
}
float min_A_get_going_again_fy_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->fy);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_get_going_again_fy_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->fy);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float reduce_A_get_going_again_fz_variable(){
//reduce in default stream
return thrust::reduce(thrust::device_pointer_cast(d_As_get_going_again->fz), thrust::device_pointer_cast(d_As_get_going_again->fz) + h_xmachine_memory_A_get_going_again_count);
}
float min_A_get_going_again_fz_variable(){
//min in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->fz);
size_t result_offset = thrust::min_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
float max_A_get_going_again_fz_variable(){
//max in default stream
thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(d_As_get_going_again->fz);
size_t result_offset = thrust::max_element(thrust_ptr, thrust_ptr + h_xmachine_memory_A_get_going_again_count) - thrust_ptr;
return *(thrust_ptr + result_offset);
}
/* Agent functions */
/* Shared memory size calculator for agent function */
int A_move_sm_size(int blockSize){
int sm_size;
sm_size = SM_START;
return sm_size;
}
/** A_move
* Agent function prototype for move function of A agent
*/
void A_move(cudaStream_t &stream){
int sm_size;
int blockSize;
int minGridSize;
int gridSize;
int state_list_size;
dim3 g; //grid for agent func
dim3 b; //block for agent func
//CHECK THE CURRENT STATE LIST COUNT IS NOT EQUAL TO 0
if (h_xmachine_memory_A_moving_count == 0)
{
return;
}
//SET SM size to 0 and save state list size for occupancy calculations
sm_size = SM_START;
state_list_size = h_xmachine_memory_A_moving_count;
//******************************** AGENT FUNCTION CONDITION *********************
//THERE IS NOT A FUNCTION CONDITION
//currentState maps to working list
xmachine_memory_A_list* As_moving_temp = d_As;
d_As = d_As_moving;
d_As_moving = As_moving_temp;
//set working count to current state count
h_xmachine_memory_A_count = h_xmachine_memory_A_moving_count;
gpuErrchk( cudaMemcpyToSymbol( d_xmachine_memory_A_count, &h_xmachine_memory_A_count, sizeof(int)));
//set current state count to 0
h_xmachine_memory_A_moving_count = 0;
gpuErrchk( cudaMemcpyToSymbol( d_xmachine_memory_A_moving_count, &h_xmachine_memory_A_moving_count, sizeof(int)));
//******************************** AGENT FUNCTION *******************************
//calculate the grid block size for main agent function
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, GPUFLAME_move, A_move_sm_size, state_list_size);
gridSize = (state_list_size + blockSize - 1) / blockSize;
b.x = blockSize;
g.x = gridSize;
sm_size = A_move_sm_size(blockSize);
//MAIN XMACHINE FUNCTION CALL (move)
//Reallocate : false
//Input :
//Output :
//Agent Output :
GPUFLAME_move<<<g, b, sm_size, stream>>>(d_As);
gpuErrchkLaunch();
//************************ MOVE AGENTS TO NEXT STATE ****************************
//check the working agents wont exceed the buffer size in the new state list
if (h_xmachine_memory_A_moving_count+h_xmachine_memory_A_count > xmachine_memory_A_MAX){
printf("Error: Buffer size of move agents in state moving will be exceeded moving working agents to next state in function move\n");
exit(EXIT_FAILURE);
}
//pointer swap the updated data
As_moving_temp = d_As;
d_As = d_As_moving;
d_As_moving = As_moving_temp;
//update new state agent size
h_xmachine_memory_A_moving_count += h_xmachine_memory_A_count;
gpuErrchk( cudaMemcpyToSymbol( d_xmachine_memory_A_moving_count, &h_xmachine_memory_A_moving_count, sizeof(int)));
}
/* Shared memory size calculator for agent function */
int A_reverse_direction_sm_size(int blockSize){
int sm_size;
sm_size = SM_START;
return sm_size;
}
/** A_reverse_direction
* Agent function prototype for reverse_direction function of A agent
*/
void A_reverse_direction(cudaStream_t &stream){
int sm_size;
int blockSize;
int minGridSize;
int gridSize;
int state_list_size;
dim3 g; //grid for agent func
dim3 b; //block for agent func
//CHECK THE CURRENT STATE LIST COUNT IS NOT EQUAL TO 0
if (h_xmachine_memory_A_moving_count == 0)
{
return;
}
//SET SM size to 0 and save state list size for occupancy calculations
sm_size = SM_START;
state_list_size = h_xmachine_memory_A_moving_count;
//******************************** AGENT FUNCTION CONDITION *********************
//THERE IS NOT A FUNCTION CONDITION
//currentState maps to working list
xmachine_memory_A_list* As_moving_temp = d_As;
d_As = d_As_moving;
d_As_moving = As_moving_temp;
//set working count to current state count
h_xmachine_memory_A_count = h_xmachine_memory_A_moving_count;
gpuErrchk( cudaMemcpyToSymbol( d_xmachine_memory_A_count, &h_xmachine_memory_A_count, sizeof(int)));
//set current state count to 0
h_xmachine_memory_A_moving_count = 0;
gpuErrchk( cudaMemcpyToSymbol( d_xmachine_memory_A_moving_count, &h_xmachine_memory_A_moving_count, sizeof(int)));
//******************************** AGENT FUNCTION *******************************
//calculate the grid block size for main agent function
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, GPUFLAME_reverse_direction, A_reverse_direction_sm_size, state_list_size);
gridSize = (state_list_size + blockSize - 1) / blockSize;
b.x = blockSize;
g.x = gridSize;
sm_size = A_reverse_direction_sm_size(blockSize);
//MAIN XMACHINE FUNCTION CALL (reverse_direction)
//Reallocate : false
//Input :
//Output :
//Agent Output :
GPUFLAME_reverse_direction<<<g, b, sm_size, stream>>>(d_As);
gpuErrchkLaunch();
//************************ MOVE AGENTS TO NEXT STATE ****************************
//check the working agents wont exceed the buffer size in the new state list
if (h_xmachine_memory_A_change_direction_count+h_xmachine_memory_A_count > xmachine_memory_A_MAX){
printf("Error: Buffer size of reverse_direction agents in state change_direction will be exceeded moving working agents to next state in function reverse_direction\n");
exit(EXIT_FAILURE);
}
//append agents to next state list
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, append_A_Agents, no_sm, state_list_size);
gridSize = (state_list_size + blockSize - 1) / blockSize;
append_A_Agents<<<gridSize, blockSize, 0, stream>>>(d_As_change_direction, d_As, h_xmachine_memory_A_change_direction_count, h_xmachine_memory_A_count);
gpuErrchkLaunch();
//update new state agent size
h_xmachine_memory_A_change_direction_count += h_xmachine_memory_A_count;
gpuErrchk( cudaMemcpyToSymbol( d_xmachine_memory_A_change_direction_count, &h_xmachine_memory_A_change_direction_count, sizeof(int)));
}
/* Shared memory size calculator for agent function */
int A_resume_movement_sm_size(int blockSize){
int sm_size;
sm_size = SM_START;
return sm_size;
}
/** A_resume_movement
* Agent function prototype for resume_movement function of A agent
*/
void A_resume_movement(cudaStream_t &stream){
int sm_size;
int blockSize;
int minGridSize;
int gridSize;
int state_list_size;
dim3 g; //grid for agent func
dim3 b; //block for agent func
//CHECK THE CURRENT STATE LIST COUNT IS NOT EQUAL TO 0
if (h_xmachine_memory_A_change_direction_count == 0)
{
return;
}
//SET SM size to 0 and save state list size for occupancy calculations
sm_size = SM_START;
state_list_size = h_xmachine_memory_A_change_direction_count;
//******************************** AGENT FUNCTION CONDITION *********************
//THERE IS NOT A FUNCTION CONDITION
//currentState maps to working list
xmachine_memory_A_list* As_change_direction_temp = d_As;
d_As = d_As_change_direction;
d_As_change_direction = As_change_direction_temp;
//set working count to current state count
h_xmachine_memory_A_count = h_xmachine_memory_A_change_direction_count;
gpuErrchk( cudaMemcpyToSymbol( d_xmachine_memory_A_count, &h_xmachine_memory_A_count, sizeof(int)));
//set current state count to 0
h_xmachine_memory_A_change_direction_count = 0;
gpuErrchk( cudaMemcpyToSymbol( d_xmachine_memory_A_change_direction_count, &h_xmachine_memory_A_change_direction_count, sizeof(int)));
//******************************** AGENT FUNCTION *******************************
//calculate the grid block size for main agent function
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, GPUFLAME_resume_movement, A_resume_movement_sm_size, state_list_size);
gridSize = (state_list_size + blockSize - 1) / blockSize;
b.x = blockSize;
g.x = gridSize;
sm_size = A_resume_movement_sm_size(blockSize);
//MAIN XMACHINE FUNCTION CALL (resume_movement)
//Reallocate : false
//Input :
//Output :
//Agent Output :
GPUFLAME_resume_movement<<<g, b, sm_size, stream>>>(d_As);
gpuErrchkLaunch();
//************************ MOVE AGENTS TO NEXT STATE ****************************
//check the working agents wont exceed the buffer size in the new state list
if (h_xmachine_memory_A_moving_count+h_xmachine_memory_A_count > xmachine_memory_A_MAX){
printf("Error: Buffer size of resume_movement agents in state moving will be exceeded moving working agents to next state in function resume_movement\n");
exit(EXIT_FAILURE);
}
//append agents to next state list
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &blockSize, append_A_Agents, no_sm, state_list_size);
gridSize = (state_list_size + blockSize - 1) / blockSize;
append_A_Agents<<<gridSize, blockSize, 0, stream>>>(d_As_moving, d_As, h_xmachine_memory_A_moving_count, h_xmachine_memory_A_count);
gpuErrchkLaunch();
//update new state agent size
h_xmachine_memory_A_moving_count += h_xmachine_memory_A_count;
gpuErrchk( cudaMemcpyToSymbol( d_xmachine_memory_A_moving_count, &h_xmachine_memory_A_moving_count, sizeof(int)));
}
extern void reset_A_moving_count()
{
h_xmachine_memory_A_moving_count = 0;
}
extern void reset_A_change_direction_count()
{
h_xmachine_memory_A_change_direction_count = 0;
}
extern void reset_A_get_going_again_count()
{
h_xmachine_memory_A_get_going_again_count = 0;
}
|
77a31178680c9e29545904c971e929c86049d051.hip | // !!! This is a file automatically generated by hipify!!!
// --- Internal Includes ---
#include "dielectric.cuh"
#include "hittable.cuh"
namespace ray_tracer {
RT_DEVICE Dielectric::Dielectric(const Color& color, const float refractive_index) :
albedo_(color), refractive_index_(refractive_index)
{
}
RT_DEVICE bool Dielectric::scatter(hiprandState_t* rand_state, const Ray& ray_incident,
const HitRecord& rec, Color& attenuation, Ray& ray_scattered) const
{
const auto refractive_index = rec.front_face ? 1 / refractive_index_ : refractive_index_;
const auto unit_incident_ray_direction = unit_vector(ray_incident.direction());
// Incident ray angle calculations
const auto cos_incident = -dot(unit_incident_ray_direction, rec.normal);
const auto sin_incident = std::sqrt(std::abs(1 - cos_incident * cos_incident));
Vec3 scatter_direction;
// In case refraction is not possible or desired
if ((refractive_index * sin_incident > 1.0f) ||
reflectance(cos_incident, refractive_index) > random_unit(rand_state))
{
scatter_direction = reflect(unit_incident_ray_direction, rec.normal);
}
else
{
scatter_direction = refract(unit_incident_ray_direction, cos_incident, rec.normal, refractive_index);
}
ray_scattered = Ray(rec.hit_point, scatter_direction, ray_incident.time());
attenuation = albedo_;
return true;
}
RT_DEVICE float Dielectric::reflectance(const float cos_incident, const float refractive_index)
{
auto r0 = (1 - refractive_index) / (1 + refractive_index);
r0 *= r0;
return r0 + (1 - r0) * std::powf(1 - cos_incident, 5);
}
} // namespace ray_tracer
| 77a31178680c9e29545904c971e929c86049d051.cu | // --- Internal Includes ---
#include "dielectric.cuh"
#include "hittable.cuh"
namespace ray_tracer {
RT_DEVICE Dielectric::Dielectric(const Color& color, const float refractive_index) :
albedo_(color), refractive_index_(refractive_index)
{
}
RT_DEVICE bool Dielectric::scatter(curandState_t* rand_state, const Ray& ray_incident,
const HitRecord& rec, Color& attenuation, Ray& ray_scattered) const
{
const auto refractive_index = rec.front_face ? 1 / refractive_index_ : refractive_index_;
const auto unit_incident_ray_direction = unit_vector(ray_incident.direction());
// Incident ray angle calculations
const auto cos_incident = -dot(unit_incident_ray_direction, rec.normal);
const auto sin_incident = std::sqrt(std::abs(1 - cos_incident * cos_incident));
Vec3 scatter_direction;
// In case refraction is not possible or desired
if ((refractive_index * sin_incident > 1.0f) ||
reflectance(cos_incident, refractive_index) > random_unit(rand_state))
{
scatter_direction = reflect(unit_incident_ray_direction, rec.normal);
}
else
{
scatter_direction = refract(unit_incident_ray_direction, cos_incident, rec.normal, refractive_index);
}
ray_scattered = Ray(rec.hit_point, scatter_direction, ray_incident.time());
attenuation = albedo_;
return true;
}
RT_DEVICE float Dielectric::reflectance(const float cos_incident, const float refractive_index)
{
auto r0 = (1 - refractive_index) / (1 + refractive_index);
r0 *= r0;
return r0 + (1 - r0) * std::powf(1 - cos_incident, 5);
}
} // namespace ray_tracer
|
b2c4752d4ab988c48c8cab5ca9d6aeda2d3207cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void hello() {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
printf("Hello wolrd! from thread %d\n", tid);
}
int main(){
int NUMBER_OF_BLOCKS = 2;
int NUMBER_OF_THREADS = 10;
hipLaunchKernelGGL(( hello), dim3(NUMBER_OF_BLOCKS), dim3(NUMBER_OF_THREADS), 0, 0, );
return 0;
} | b2c4752d4ab988c48c8cab5ca9d6aeda2d3207cc.cu | #include <stdio.h>
__global__ void hello() {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
printf("Hello wolrd! from thread %d\n", tid);
}
int main(){
int NUMBER_OF_BLOCKS = 2;
int NUMBER_OF_THREADS = 10;
hello<<<NUMBER_OF_BLOCKS, NUMBER_OF_THREADS>>>();
return 0;
} |
8a104bb20fdaf914b415b42a1da4ec1fe3a768be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "cuImage.h"
//namespace cusift {
///******************************* Defs and macros *****************************/
//// default width of descriptor histogram array
//static const int SIFT_DESCR_WIDTH = 4;
//// default number of bins per histogram in descriptor array
//static const int SIFT_DESCR_HIST_BINS = 8;
//// assumed gaussian blur for input image
//static const float SIFT_INIT_SIGMA = 0.5f;
//// width of border in which to ignore keypoints
//static const int SIFT_IMG_BORDER = 5;
//// maximum steps of keypoint interpolation before failure
//static const int SIFT_MAX_INTERP_STEPS = 5;
//// default number of bins in histogram for orientation assignment
//static const int SIFT_ORI_HIST_BINS = 36;
//// determines gaussian sigma for orientation assignment
//static const float SIFT_ORI_SIG_FCTR = 1.5f;
//// determines the radius of the region used in orientation assignment
//static const float SIFT_ORI_RADIUS = 3 * SIFT_ORI_SIG_FCTR;
//// orientation magnitude relative to max that results in new feature
//static const float SIFT_ORI_PEAK_RATIO = 0.8f;
//// determines the size of a single descriptor orientation histogram
//static const float SIFT_DESCR_SCL_FCTR = 3.f;
//// threshold on magnitude of elements of descriptor vector
//static const float SIFT_DESCR_MAG_THR = 0.2f;
//// factor used to convert floating-point descriptor to unsigned char
//static const float SIFT_INT_DESCR_FCTR = 512.f;
//static const int SIFT_FIXPT_SCALE = 1;
//}
//namespace cusift {
//void createInitialImage_gpu(const Mat &src, CudaImage &base, float sigma, bool doubleImageSize){
// int width = src.cols;
// int height = src.rows;
// if(!src.data){
// printf("input none data !");
// return;
// }
// Mat gray, gray_fpt;
// if( src.channels() == 3 || src.channels() == 4 )
// {
// cvtColor(src, gray, COLOR_BGR2GRAY);
// gray.convertTo(gray_fpt, DataType<float>::type, 1, 0);
// }
// else
// src.convertTo(gray_fpt, DataType<float>::type, 1, 0);
// //sigma different which is sqrt(1.6*1.6-0.5*0.5*4)
// float sig_diff;
// if( doubleImageSize )
// {
// sig_diff = sqrtf( ::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA * 4, 0.01f) );
// resize(gray_fpt, gray_fpt, Size(gray_fpt.cols*2, gray_fpt.rows*2), 0, 0, INTER_LINEAR);
// width = gray_fpt.cols;
// height = gray_fpt.rows;
// base.Allocate(width,height,iAlignUp(width, 128),false,NULL,(float*)gray_fpt.data);
// base.Download();
// //cuGaussianBlur(base,sig_diff);
// }
// else
// {
// sig_diff = sqrtf( ::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA, 0.01f) );
// base.Allocate(width,height,iAlignUp(width, 128),false,NULL,(float*)gray_fpt.data);
// base.Download();
// //cuGaussianBlur(base,sig_diff);
// //GaussianBlur(gray_fpt, gray_fpt, Size(), sig_diff, sig_diff);
// }
//}
//}
#include"cuGlobal.h"
namespace cv { namespace cuda { namespace device
{
namespace sift
{
void bindImgTex(PtrStepSzb img);
//void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures);
//extern void differenceImg_gpu(PtrStepSzf next,PtrStepSzf prev,PtrStepSzf diff);
//void differenceImg_gpu();
}
}}}
__global__ void differenceImg_gpu()
{
}
namespace cv { namespace cuda { namespace device
{
namespace sift
{
using namespace cv::cuda;
void differenceImg_gpu()
{
std::cout<<"static function "<<std::endl;
}
//void differenceImg_gpu(PtrStepSzf next,PtrStepSzf prev,PtrStepSzf diff)
//{
//}
}
}
}
}
| 8a104bb20fdaf914b415b42a1da4ec1fe3a768be.cu | //#include "cuImage.h"
//namespace cusift {
///******************************* Defs and macros *****************************/
//// default width of descriptor histogram array
//static const int SIFT_DESCR_WIDTH = 4;
//// default number of bins per histogram in descriptor array
//static const int SIFT_DESCR_HIST_BINS = 8;
//// assumed gaussian blur for input image
//static const float SIFT_INIT_SIGMA = 0.5f;
//// width of border in which to ignore keypoints
//static const int SIFT_IMG_BORDER = 5;
//// maximum steps of keypoint interpolation before failure
//static const int SIFT_MAX_INTERP_STEPS = 5;
//// default number of bins in histogram for orientation assignment
//static const int SIFT_ORI_HIST_BINS = 36;
//// determines gaussian sigma for orientation assignment
//static const float SIFT_ORI_SIG_FCTR = 1.5f;
//// determines the radius of the region used in orientation assignment
//static const float SIFT_ORI_RADIUS = 3 * SIFT_ORI_SIG_FCTR;
//// orientation magnitude relative to max that results in new feature
//static const float SIFT_ORI_PEAK_RATIO = 0.8f;
//// determines the size of a single descriptor orientation histogram
//static const float SIFT_DESCR_SCL_FCTR = 3.f;
//// threshold on magnitude of elements of descriptor vector
//static const float SIFT_DESCR_MAG_THR = 0.2f;
//// factor used to convert floating-point descriptor to unsigned char
//static const float SIFT_INT_DESCR_FCTR = 512.f;
//static const int SIFT_FIXPT_SCALE = 1;
//}
//namespace cusift {
//void createInitialImage_gpu(const Mat &src, CudaImage &base, float sigma, bool doubleImageSize){
// int width = src.cols;
// int height = src.rows;
// if(!src.data){
// printf("input none data !");
// return;
// }
// Mat gray, gray_fpt;
// if( src.channels() == 3 || src.channels() == 4 )
// {
// cvtColor(src, gray, COLOR_BGR2GRAY);
// gray.convertTo(gray_fpt, DataType<float>::type, 1, 0);
// }
// else
// src.convertTo(gray_fpt, DataType<float>::type, 1, 0);
// //sigma different which is sqrt(1.6*1.6-0.5*0.5*4)
// float sig_diff;
// if( doubleImageSize )
// {
// sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA * 4, 0.01f) );
// resize(gray_fpt, gray_fpt, Size(gray_fpt.cols*2, gray_fpt.rows*2), 0, 0, INTER_LINEAR);
// width = gray_fpt.cols;
// height = gray_fpt.rows;
// base.Allocate(width,height,iAlignUp(width, 128),false,NULL,(float*)gray_fpt.data);
// base.Download();
// //cuGaussianBlur(base,sig_diff);
// }
// else
// {
// sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA, 0.01f) );
// base.Allocate(width,height,iAlignUp(width, 128),false,NULL,(float*)gray_fpt.data);
// base.Download();
// //cuGaussianBlur(base,sig_diff);
// //GaussianBlur(gray_fpt, gray_fpt, Size(), sig_diff, sig_diff);
// }
//}
//}
#include"cuGlobal.h"
namespace cv { namespace cuda { namespace device
{
namespace sift
{
void bindImgTex(PtrStepSzb img);
//void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures);
//extern void differenceImg_gpu(PtrStepSzf next,PtrStepSzf prev,PtrStepSzf diff);
//void differenceImg_gpu();
}
}}}
__global__ void differenceImg_gpu()
{
}
namespace cv { namespace cuda { namespace device
{
namespace sift
{
using namespace cv::cuda;
void differenceImg_gpu()
{
std::cout<<"static function "<<std::endl;
}
//void differenceImg_gpu(PtrStepSzf next,PtrStepSzf prev,PtrStepSzf diff)
//{
//}
}
}
}
}
|
18f27549b70c3e1eb9fbde854724fbe816aa5b1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdafx.h"
#include "cuda_tensor.h"
__global__ static void forward_maxpool_kernel_nchw(float* out, const float* in, int* indexes, int elements,
int channels, int width, int height, int window, int stride, int pad) {
int dst_i = blockIdx.x * blockDim.x + threadIdx.x;
int threads = gridDim.x * blockDim.x;
int dst_size = width * height;
int dst_c_size = channels * dst_size;
int stride_w = stride & 0xffff;
int stride_h = stride >> 16;
int window_w = window & 0xffff;
int window_h = window >> 16;
int pad_hb = pad & 0xff;
int pad_ht = (pad >> 8) & 0xff;
int pad_wr = (pad >> 16) & 0xff;
int pad_wl = pad >> 24;
//output_width = (input_width + pad_wl + pad_wr - window_w) / stride_w + 1;
int src_w = ( width - 1) * stride_w + window_w - pad_wl - pad_wr;
//output_height = (input_height + pad_ht + pad_hb - window_h) / stride_h + 1;
int src_h = (height - 1) * stride_h + window_h - pad_ht - pad_hb;
int src_size = src_w * src_h;
while (dst_i < elements) {
int b = dst_i / dst_c_size;
int temp = dst_i % dst_c_size;
int c = temp / dst_size;
temp = temp % dst_size;
int dst_y = temp / width;
int dst_x = temp % width;
out[dst_i] = -INFINITY;
for (int y = dst_y * stride_h - pad_ht, i = 0; i < window_h; i++,y++) {
if (y >= 0 && y < src_h) {
for (int x = dst_x * stride_w - pad_wl, j = 0; j < window_w; j++, x++) {
if (x >= 0 && x < src_w) {
int src_i = (b * channels + c) * src_size + y * src_w + x;
if (in[src_i] > out[dst_i]) {
out[dst_i] = in[src_i];
indexes[dst_i] = src_i;
}
}
}
}
}
dst_i += threads;
}
}
__global__ static void forward_avgpool_kernel_nchw(float* out, const float* in, int elements,
int channels, int width, int height, int window, int stride, int pad) {
int dst_i = blockIdx.x * blockDim.x + threadIdx.x;
int threads = gridDim.x * blockDim.x;
int dst_size = width * height;
int dst_c_size = channels * dst_size;
int stride_w = stride & 0xffff;
int stride_h = stride >> 16;
int window_w = window & 0xffff;
int window_h = window >> 16;
int pad_hb = pad & 0xff;
int pad_ht = (pad >> 8) & 0xff;
int pad_wr = (pad >> 16) & 0xff;
int pad_wl = pad >> 24;
//output_width = (input_width + pad_wl + pad_wr - window_w) / stride_w + 1;
int src_w = (width - 1) * stride_w + window_w - pad_wl - pad_wr;
//output_height = (input_height + pad_ht + pad_hb - window_h) / stride_h + 1;
int src_h = (height - 1) * stride_h + window_h - pad_ht - pad_hb;
int src_size = src_w * src_h;
while (dst_i < elements) {
int b = dst_i / dst_c_size;
int temp = dst_i % dst_c_size;
int c = temp / dst_size;
temp = temp % dst_size;
int dst_y = temp / width;
int dst_x = temp % width;
out[dst_i] = 0.0f;
for (int y = dst_y * stride_h - pad_ht, i = 0; i < window_h; i++, y++) {
if (y >= 0 && y < src_h) {
for (int x = dst_x * stride_w - pad_wl, j = 0; j < window_w; j++, x++) {
if (x >= 0 && x < src_w) {
int src_i = (b * channels + c) * src_size + y * src_w + x;
out[dst_i] += in[src_i];
}
}
}
}
out[dst_i] /= src_size;
dst_i += threads;
}
}
bool forward_avgpool(CudaTensor& output, const CudaTensor& input, int window, int stride, int pad) {
if (input.DataType() == CUDNN_DATA_HALF) {
CudaPtr<float> in(input.Elements());
if (!f16_to_f32(in, reinterpret_cast<__half*>(input.Data()), input.Elements())) {
hipFree(in);
return false;
}
CudaPtr<float> out(output.Elements());
int g = GPUGridSize();
int b = GPUBlockSize();
if (input.DataFormat() == CUDNN_TENSOR_NCHW) {
hipLaunchKernelGGL(( forward_avgpool_kernel_nchw) , dim3(g), dim3(b) , 0, 0, out, in, output.Elements(), output.Channel(),
output.Width(), output.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
hipError_t e = hipDeviceSynchronize();
if (e != hipSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
return f32_to_f16(reinterpret_cast<__half*>(output.Data()), out, output.Elements());
}
else {
int g = GPUGridSize();
int b = GPUBlockSize();
float* out = reinterpret_cast<float*>(output.Data());
float* in = reinterpret_cast<float*>(input.Data());
if (input.DataFormat() == CUDNN_TENSOR_NCHW) {
hipLaunchKernelGGL(( forward_avgpool_kernel_nchw) , dim3(g), dim3(b) , 0, 0, out, in, output.Elements(), output.Channel(),
output.Width(), output.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
hipError_t e = hipDeviceSynchronize();
if (e != hipSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
}
return true;
}
bool forward_maxpool(CudaTensor& output, const CudaTensor& input, int* indexes,
int window, int stride, int pad) {
if (input.DataType() == CUDNN_DATA_HALF) {
CudaPtr<float> in(input.Elements());
if (!f16_to_f32(in, reinterpret_cast<__half*>(input.Data()), input.Elements())) {
hipFree(in);
return false;
}
CudaPtr<float> out(output.Elements());
int g = GPUGridSize();
int b = GPUBlockSize();
if (input.DataFormat() == CUDNN_TENSOR_NCHW) {
hipLaunchKernelGGL(( forward_maxpool_kernel_nchw), dim3(g),dim3(b), 0, 0, out, in, indexes, output.Elements(), output.Channel(),
output.Width(), output.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
hipError_t e = hipDeviceSynchronize();
if (e != hipSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
return f32_to_f16(reinterpret_cast<__half*>(output.Data()), out, output.Elements());
}
else {
int g = GPUGridSize();
int b = GPUBlockSize();
float* out = reinterpret_cast<float*>(output.Data());
float* in = reinterpret_cast<float*>(input.Data());
if (input.DataFormat() == CUDNN_TENSOR_NCHW) {
hipLaunchKernelGGL(( forward_maxpool_kernel_nchw), dim3(g), dim3(b), 0, 0, out, in, indexes, output.Elements(), output.Channel(),
output.Width(), output.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
hipError_t e = hipDeviceSynchronize();
if (e != hipSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
}
return true;
}
__global__ static void backward_maxpool_kernel_nchw(float* out, const float* in, int* indexes, int elements,
int channels, int width, int height, int window, int stride, int pad) {
int dst_i = blockIdx.x * blockDim.x + threadIdx.x;
int threads = gridDim.x * blockDim.x;
int dst_size = width * height;
int dst_c_size = channels * dst_size;
int stride_w = stride & 0xffff;
int stride_h = stride >> 16;
int window_w = window & 0xffff;
int window_h = window >> 16;
int pad_hb = pad & 0xff;
int pad_ht = (pad >> 8) & 0xff;
int pad_wr = (pad >> 16) & 0xff;
int pad_wl = pad >> 24;
int src_w = (width + pad_wl + pad_wr - window_w) / stride_w + 1;
int src_h = (height + pad_ht + pad_hb - window_h) / stride_h + 1;
int src_size = src_w * src_h;
while (dst_i < elements) {
int b = dst_i / dst_c_size;
int temp = dst_i % dst_c_size;
int c = temp / dst_size;
temp = temp % dst_size;
int dst_y = temp / width;
int dst_x = temp % width;
int src_y = (dst_y + pad_ht) / stride_h;
int src_x = (dst_x + pad_wl) / stride_w;
//TODO: makesure src_x and src_y is in the matrix
int src_i = (b * channels + c) * src_size + src_y * src_w + src_x;
if (indexes[src_i] == dst_i)
out[dst_i] += in[src_i];
dst_i += threads;
}
}
bool backward_maxpool(CudaTensor& dx, const CudaTensor& dy, int* indexes,
int window, int stride, int pad) {
dx = 0.0f;
if (dx.DataType() == CUDNN_DATA_HALF) {
CudaPtr<float> in(dy.Elements());
if (f16_to_f32(in, reinterpret_cast<__half*>(dy.Data()), dy.Elements())) {
hipFree(in);
return false;
}
CudaPtr<float> out(dx.Elements());
int g = GPUGridSize();
int b = GPUBlockSize();
if (dy.DataFormat() == CUDNN_TENSOR_NCHW) {
hipLaunchKernelGGL(( backward_maxpool_kernel_nchw), dim3(g),dim3(b), 0, 0, out, in, indexes, dx.Elements(), dx.Channel(),
dx.Width(), dx.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
hipError_t e = hipDeviceSynchronize();
if (e != hipSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
return f32_to_f16(reinterpret_cast<__half*>(dx.Data()), out, dx.Elements());
}
else {
int g = GPUGridSize();
int b = GPUBlockSize();
float* out = reinterpret_cast<float*>(dx.Data());
float* in = reinterpret_cast<float*>(dy.Data());
if (dy.DataFormat() == CUDNN_TENSOR_NCHW) {
hipLaunchKernelGGL(( backward_maxpool_kernel_nchw), dim3(g),dim3(b), 0, 0, out, in, indexes, dx.Elements(), dx.Channel(),
dx.Width(), dx.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
hipError_t e = hipDeviceSynchronize();
if (e != hipSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
}
return true;
}
__global__ static void backward_avgpool_kernel_nchw(float* out, const float* in, int elements,
int channels, int width, int height, int window, int stride, int pad) {
int dst_i = blockIdx.x * blockDim.x + threadIdx.x;
int threads = gridDim.x * blockDim.x;
int dst_size = width * height;
int dst_c_size = channels * dst_size;
int stride_w = stride & 0xffff;
int stride_h = stride >> 16;
int window_w = window & 0xffff;
int window_h = window >> 16;
int pad_hb = pad & 0xff;
int pad_ht = (pad >> 8) & 0xff;
int pad_wr = (pad >> 16) & 0xff;
int pad_wl = pad >> 24;
int src_w = (width + pad_wl + pad_wr - window_w) / stride_w + 1;
int src_h = (height + pad_ht + pad_hb - window_h) / stride_h + 1;
int src_size = src_w * src_h;
int w_size = stride_w * stride_h;
while (dst_i < elements) {
int b = dst_i / dst_c_size;
int temp = dst_i % dst_c_size;
int c = temp / dst_size;
temp = temp % dst_size;
int dst_y = temp / width;
int dst_x = temp % width;
int src_y = (dst_y + pad_ht) / stride_h;
int src_x = (dst_x + pad_wl) / stride_w;
//TODO: makesure src_x and src_y is in the matrix
int src_i = (b * channels + c) * src_size + src_y * src_w + src_x;
out[dst_i] += in[src_i] / w_size;
dst_i += threads;
}
}
bool backward_avgpool(CudaTensor& dx, const CudaTensor& dy, int window, int stride, int pad) {
dx = 0.0f;
if (dx.DataType() == CUDNN_DATA_HALF) {
CudaPtr<float> in(dy.Elements());
if (f16_to_f32(in, reinterpret_cast<__half*>(dy.Data()), dy.Elements())) {
hipFree(in);
return false;
}
CudaPtr<float> out(dx.Elements());
int g = GPUGridSize();
int b = GPUBlockSize();
if (dy.DataFormat() == CUDNN_TENSOR_NCHW) {
hipLaunchKernelGGL(( backward_avgpool_kernel_nchw) , dim3(g), dim3(b) , 0, 0, out, in, dx.Elements(), dx.Channel(),
dx.Width(), dx.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
hipError_t e = hipDeviceSynchronize();
if (e != hipSuccess) {
cerr << " Error: forward_avgpool failed!\n";
return false;
}
return f32_to_f16(reinterpret_cast<__half*>(dx.Data()), out, dx.Elements());
}
else {
int g = GPUGridSize();
int b = GPUBlockSize();
float* out = reinterpret_cast<float*>(dx.Data());
float* in = reinterpret_cast<float*>(dy.Data());
if (dy.DataFormat() == CUDNN_TENSOR_NCHW) {
hipLaunchKernelGGL(( backward_avgpool_kernel_nchw) , dim3(g), dim3(b) , 0, 0, out, in, dx.Elements(), dx.Channel(),
dx.Width(), dx.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
hipError_t e = hipDeviceSynchronize();
if (e != hipSuccess) {
cerr << " Error: forward_avgpool failed!\n";
return false;
}
}
return true;
} | 18f27549b70c3e1eb9fbde854724fbe816aa5b1e.cu | #include "stdafx.h"
#include "cuda_tensor.h"
__global__ static void forward_maxpool_kernel_nchw(float* out, const float* in, int* indexes, int elements,
int channels, int width, int height, int window, int stride, int pad) {
int dst_i = blockIdx.x * blockDim.x + threadIdx.x;
int threads = gridDim.x * blockDim.x;
int dst_size = width * height;
int dst_c_size = channels * dst_size;
int stride_w = stride & 0xffff;
int stride_h = stride >> 16;
int window_w = window & 0xffff;
int window_h = window >> 16;
int pad_hb = pad & 0xff;
int pad_ht = (pad >> 8) & 0xff;
int pad_wr = (pad >> 16) & 0xff;
int pad_wl = pad >> 24;
//output_width = (input_width + pad_wl + pad_wr - window_w) / stride_w + 1;
int src_w = ( width - 1) * stride_w + window_w - pad_wl - pad_wr;
//output_height = (input_height + pad_ht + pad_hb - window_h) / stride_h + 1;
int src_h = (height - 1) * stride_h + window_h - pad_ht - pad_hb;
int src_size = src_w * src_h;
while (dst_i < elements) {
int b = dst_i / dst_c_size;
int temp = dst_i % dst_c_size;
int c = temp / dst_size;
temp = temp % dst_size;
int dst_y = temp / width;
int dst_x = temp % width;
out[dst_i] = -INFINITY;
for (int y = dst_y * stride_h - pad_ht, i = 0; i < window_h; i++,y++) {
if (y >= 0 && y < src_h) {
for (int x = dst_x * stride_w - pad_wl, j = 0; j < window_w; j++, x++) {
if (x >= 0 && x < src_w) {
int src_i = (b * channels + c) * src_size + y * src_w + x;
if (in[src_i] > out[dst_i]) {
out[dst_i] = in[src_i];
indexes[dst_i] = src_i;
}
}
}
}
}
dst_i += threads;
}
}
__global__ static void forward_avgpool_kernel_nchw(float* out, const float* in, int elements,
int channels, int width, int height, int window, int stride, int pad) {
int dst_i = blockIdx.x * blockDim.x + threadIdx.x;
int threads = gridDim.x * blockDim.x;
int dst_size = width * height;
int dst_c_size = channels * dst_size;
int stride_w = stride & 0xffff;
int stride_h = stride >> 16;
int window_w = window & 0xffff;
int window_h = window >> 16;
int pad_hb = pad & 0xff;
int pad_ht = (pad >> 8) & 0xff;
int pad_wr = (pad >> 16) & 0xff;
int pad_wl = pad >> 24;
//output_width = (input_width + pad_wl + pad_wr - window_w) / stride_w + 1;
int src_w = (width - 1) * stride_w + window_w - pad_wl - pad_wr;
//output_height = (input_height + pad_ht + pad_hb - window_h) / stride_h + 1;
int src_h = (height - 1) * stride_h + window_h - pad_ht - pad_hb;
int src_size = src_w * src_h;
while (dst_i < elements) {
int b = dst_i / dst_c_size;
int temp = dst_i % dst_c_size;
int c = temp / dst_size;
temp = temp % dst_size;
int dst_y = temp / width;
int dst_x = temp % width;
out[dst_i] = 0.0f;
for (int y = dst_y * stride_h - pad_ht, i = 0; i < window_h; i++, y++) {
if (y >= 0 && y < src_h) {
for (int x = dst_x * stride_w - pad_wl, j = 0; j < window_w; j++, x++) {
if (x >= 0 && x < src_w) {
int src_i = (b * channels + c) * src_size + y * src_w + x;
out[dst_i] += in[src_i];
}
}
}
}
out[dst_i] /= src_size;
dst_i += threads;
}
}
bool forward_avgpool(CudaTensor& output, const CudaTensor& input, int window, int stride, int pad) {
if (input.DataType() == CUDNN_DATA_HALF) {
CudaPtr<float> in(input.Elements());
if (!f16_to_f32(in, reinterpret_cast<__half*>(input.Data()), input.Elements())) {
cudaFree(in);
return false;
}
CudaPtr<float> out(output.Elements());
int g = GPUGridSize();
int b = GPUBlockSize();
if (input.DataFormat() == CUDNN_TENSOR_NCHW) {
forward_avgpool_kernel_nchw <<<g, b >>> (out, in, output.Elements(), output.Channel(),
output.Width(), output.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
cudaError_t e = cudaDeviceSynchronize();
if (e != cudaSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
return f32_to_f16(reinterpret_cast<__half*>(output.Data()), out, output.Elements());
}
else {
int g = GPUGridSize();
int b = GPUBlockSize();
float* out = reinterpret_cast<float*>(output.Data());
float* in = reinterpret_cast<float*>(input.Data());
if (input.DataFormat() == CUDNN_TENSOR_NCHW) {
forward_avgpool_kernel_nchw <<<g, b >>> (out, in, output.Elements(), output.Channel(),
output.Width(), output.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
cudaError_t e = cudaDeviceSynchronize();
if (e != cudaSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
}
return true;
}
bool forward_maxpool(CudaTensor& output, const CudaTensor& input, int* indexes,
int window, int stride, int pad) {
if (input.DataType() == CUDNN_DATA_HALF) {
CudaPtr<float> in(input.Elements());
if (!f16_to_f32(in, reinterpret_cast<__half*>(input.Data()), input.Elements())) {
cudaFree(in);
return false;
}
CudaPtr<float> out(output.Elements());
int g = GPUGridSize();
int b = GPUBlockSize();
if (input.DataFormat() == CUDNN_TENSOR_NCHW) {
forward_maxpool_kernel_nchw<<<g,b>>>(out, in, indexes, output.Elements(), output.Channel(),
output.Width(), output.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
cudaError_t e = cudaDeviceSynchronize();
if (e != cudaSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
return f32_to_f16(reinterpret_cast<__half*>(output.Data()), out, output.Elements());
}
else {
int g = GPUGridSize();
int b = GPUBlockSize();
float* out = reinterpret_cast<float*>(output.Data());
float* in = reinterpret_cast<float*>(input.Data());
if (input.DataFormat() == CUDNN_TENSOR_NCHW) {
forward_maxpool_kernel_nchw<<<g, b>>>(out, in, indexes, output.Elements(), output.Channel(),
output.Width(), output.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
cudaError_t e = cudaDeviceSynchronize();
if (e != cudaSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
}
return true;
}
__global__ static void backward_maxpool_kernel_nchw(float* out, const float* in, int* indexes, int elements,
int channels, int width, int height, int window, int stride, int pad) {
int dst_i = blockIdx.x * blockDim.x + threadIdx.x;
int threads = gridDim.x * blockDim.x;
int dst_size = width * height;
int dst_c_size = channels * dst_size;
int stride_w = stride & 0xffff;
int stride_h = stride >> 16;
int window_w = window & 0xffff;
int window_h = window >> 16;
int pad_hb = pad & 0xff;
int pad_ht = (pad >> 8) & 0xff;
int pad_wr = (pad >> 16) & 0xff;
int pad_wl = pad >> 24;
int src_w = (width + pad_wl + pad_wr - window_w) / stride_w + 1;
int src_h = (height + pad_ht + pad_hb - window_h) / stride_h + 1;
int src_size = src_w * src_h;
while (dst_i < elements) {
int b = dst_i / dst_c_size;
int temp = dst_i % dst_c_size;
int c = temp / dst_size;
temp = temp % dst_size;
int dst_y = temp / width;
int dst_x = temp % width;
int src_y = (dst_y + pad_ht) / stride_h;
int src_x = (dst_x + pad_wl) / stride_w;
//TODO: makesure src_x and src_y is in the matrix
int src_i = (b * channels + c) * src_size + src_y * src_w + src_x;
if (indexes[src_i] == dst_i)
out[dst_i] += in[src_i];
dst_i += threads;
}
}
bool backward_maxpool(CudaTensor& dx, const CudaTensor& dy, int* indexes,
int window, int stride, int pad) {
dx = 0.0f;
if (dx.DataType() == CUDNN_DATA_HALF) {
CudaPtr<float> in(dy.Elements());
if (f16_to_f32(in, reinterpret_cast<__half*>(dy.Data()), dy.Elements())) {
cudaFree(in);
return false;
}
CudaPtr<float> out(dx.Elements());
int g = GPUGridSize();
int b = GPUBlockSize();
if (dy.DataFormat() == CUDNN_TENSOR_NCHW) {
backward_maxpool_kernel_nchw<<<g,b>>>(out, in, indexes, dx.Elements(), dx.Channel(),
dx.Width(), dx.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
cudaError_t e = cudaDeviceSynchronize();
if (e != cudaSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
return f32_to_f16(reinterpret_cast<__half*>(dx.Data()), out, dx.Elements());
}
else {
int g = GPUGridSize();
int b = GPUBlockSize();
float* out = reinterpret_cast<float*>(dx.Data());
float* in = reinterpret_cast<float*>(dy.Data());
if (dy.DataFormat() == CUDNN_TENSOR_NCHW) {
backward_maxpool_kernel_nchw<<<g,b>>>(out, in, indexes, dx.Elements(), dx.Channel(),
dx.Width(), dx.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
cudaError_t e = cudaDeviceSynchronize();
if (e != cudaSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
}
return true;
}
__global__ static void backward_avgpool_kernel_nchw(float* out, const float* in, int elements,
int channels, int width, int height, int window, int stride, int pad) {
int dst_i = blockIdx.x * blockDim.x + threadIdx.x;
int threads = gridDim.x * blockDim.x;
int dst_size = width * height;
int dst_c_size = channels * dst_size;
int stride_w = stride & 0xffff;
int stride_h = stride >> 16;
int window_w = window & 0xffff;
int window_h = window >> 16;
int pad_hb = pad & 0xff;
int pad_ht = (pad >> 8) & 0xff;
int pad_wr = (pad >> 16) & 0xff;
int pad_wl = pad >> 24;
int src_w = (width + pad_wl + pad_wr - window_w) / stride_w + 1;
int src_h = (height + pad_ht + pad_hb - window_h) / stride_h + 1;
int src_size = src_w * src_h;
int w_size = stride_w * stride_h;
while (dst_i < elements) {
int b = dst_i / dst_c_size;
int temp = dst_i % dst_c_size;
int c = temp / dst_size;
temp = temp % dst_size;
int dst_y = temp / width;
int dst_x = temp % width;
int src_y = (dst_y + pad_ht) / stride_h;
int src_x = (dst_x + pad_wl) / stride_w;
//TODO: makesure src_x and src_y is in the matrix
int src_i = (b * channels + c) * src_size + src_y * src_w + src_x;
out[dst_i] += in[src_i] / w_size;
dst_i += threads;
}
}
bool backward_avgpool(CudaTensor& dx, const CudaTensor& dy, int window, int stride, int pad) {
dx = 0.0f;
if (dx.DataType() == CUDNN_DATA_HALF) {
CudaPtr<float> in(dy.Elements());
if (f16_to_f32(in, reinterpret_cast<__half*>(dy.Data()), dy.Elements())) {
cudaFree(in);
return false;
}
CudaPtr<float> out(dx.Elements());
int g = GPUGridSize();
int b = GPUBlockSize();
if (dy.DataFormat() == CUDNN_TENSOR_NCHW) {
backward_avgpool_kernel_nchw <<<g, b >>> (out, in, dx.Elements(), dx.Channel(),
dx.Width(), dx.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
cudaError_t e = cudaDeviceSynchronize();
if (e != cudaSuccess) {
cerr << " Error: forward_avgpool failed!\n";
return false;
}
return f32_to_f16(reinterpret_cast<__half*>(dx.Data()), out, dx.Elements());
}
else {
int g = GPUGridSize();
int b = GPUBlockSize();
float* out = reinterpret_cast<float*>(dx.Data());
float* in = reinterpret_cast<float*>(dy.Data());
if (dy.DataFormat() == CUDNN_TENSOR_NCHW) {
backward_avgpool_kernel_nchw <<<g, b >>> (out, in, dx.Elements(), dx.Channel(),
dx.Width(), dx.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
cudaError_t e = cudaDeviceSynchronize();
if (e != cudaSuccess) {
cerr << " Error: forward_avgpool failed!\n";
return false;
}
}
return true;
} |
5d93832e04b2f32e851df94fbacfc0cd1382c83e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
__global__ void sqrt_float(int n,int idx,float *dy,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = sqrtf(dy[i]);
}
} | 5d93832e04b2f32e851df94fbacfc0cd1382c83e.cu | #include "includes.h"
extern "C"
__global__ void sqrt_float(int n,int idx,float *dy,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = sqrtf(dy[i]);
}
} |
75d404824366de5ea46e7e5118a0098b072ad018.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda_runtime.h>
#include<stdio.h>
// Kernel definition
__global__ void MatAdd(float *A, float *B, float *C)
{
int i = threadIdx.x;
int j = threadIdx.y;
*C= *A + *B;
}
int main() {
// Kernel invocation with one block of N * N * 1 threads
int numBlocks = 1;
int i=0,j=0;
float A=5,B=10,C;
dim3 threadsPerBlock(10, 10);
hipLaunchKernelGGL(( MatAdd), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, &A,&B,&C);
printf("%f",C);
return 1;
} | 75d404824366de5ea46e7e5118a0098b072ad018.cu | #include<cuda_runtime.h>
#include<stdio.h>
// Kernel definition
__global__ void MatAdd(float *A, float *B, float *C)
{
int i = threadIdx.x;
int j = threadIdx.y;
*C= *A + *B;
}
int main() {
// Kernel invocation with one block of N * N * 1 threads
int numBlocks = 1;
int i=0,j=0;
float A=5,B=10,C;
dim3 threadsPerBlock(10, 10);
MatAdd<<<numBlocks, threadsPerBlock>>>(&A,&B,&C);
printf("%f",C);
return 1;
} |
060061d8a6720ed826fed78027d9e44cedcbecea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <hiprand/hiprand_kernel.h> //hiprand device function
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X,
hipStream_t str) {
hipStream_t initial_stream;
CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X,
hipStream_t str) {
hipStream_t initial_stream;
CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<int>(const int n, const int alpha, const int *x,
int* y) {
}
template <>
void caffe_gpu_scale<unsigned>(const int n, const unsigned alpha, const unsigned *x,
unsigned* y) {
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
//gpu
template <typename Dtype>
__global__ void ternary_kernel(const int n, const Dtype delta, const Dtype* X, Dtype* Y){
CUDA_KERNEL_LOOP(index, n) {
const Dtype a = X[index];
Y[index] = (a>delta) - (a<-delta);
}
}
template <>
void caffe_gpu_ternary<int>(const int N, const int delta, const int* X, int* Y,int* alpha) {
// NOT IMPLEMENT
}
template <>
void caffe_gpu_ternary<unsigned>(const int N, const unsigned delta, const unsigned* X, unsigned* Y,unsigned* alpha) {
// NOT IMPLEMENT
}
template <>
void caffe_gpu_ternary<float>(const int N, const float delta, const float* X, float* Y,float* alpha) {
// NOT IMPLEMENT
//clock_t ed;
//double stime;
//ed=clock();
hipLaunchKernelGGL(( ternary_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, delta, X, Y);
const float* ternary_data=(const float*)Y;
float pa = 0;
caffe_gpu_dot(N, ternary_data, X, &pa);
float pb = 0;
caffe_gpu_dot(N, ternary_data, ternary_data, &pb);
*alpha = (pa) / ((pb) + 1e-6);
//stime=(double)(clock() - ed)/CLOCKS_PER_SEC;
//LOG(INFO)<<"quantize time is "<<stime*1000<<"ms";
//std::cout<<"pa="<<pa<<", pb="<<pb<<", alpha="<<*alpha<<std::endl;
}
template <>
void caffe_gpu_ternary<double>(const int N, const double delta, const double* X, double* Y,double* alpha) {
// NOT IMPLEMENT
hipLaunchKernelGGL(( ternary_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, delta, X, Y);
const double* ternary_data=(const double*)Y;
double pa = 0;
caffe_gpu_dot(N, ternary_data, X, &pa);
double pb = 0;
caffe_gpu_dot(N, ternary_data, ternary_data, &pb);
*alpha = (pa) / ((pb) + 1e-6);
//std::cout<<"pa="<<pa<<", pb="<<pb<<", alpha="<<*alpha<<std::endl;
}
template <typename Dtype>
__global__ void quantize_kernel(const int n,const Dtype fixed_value, const Dtype max_num, const Dtype min_num, const Dtype* X, Dtype* Y){
CUDA_KERNEL_LOOP(index, n){
Dtype x_q = X[index] / fixed_value;
// create a rand N satisfy uniform(-0.5,0.5)
x_q+=Y[index];//x_qY
x_q = (x_q > 0.0) ? floor(x_q + 0.5) : ceil(x_q - 0.5);
if (x_q >= max_num)
{
Y[index] = max_num - 1;
}else if (x_q <= min_num){
Y[index] = min_num + 1;
}else{
Y[index] = x_q;
}
}
}
//
template<typename Dtype>
void caffe_gpu_getcudavalue(const Dtype* X,const int index, Dtype* result){
CUDA_CHECK(hipMemcpy(result, X+index, sizeof(Dtype), hipMemcpyDeviceToHost));
}
template<>
void caffe_gpu_quantizea<int>(const int count, const int* X, int* Y, int* fixed_point, int max_bits, bool calc_fixed_point){}
template<>
void caffe_gpu_quantizea<unsigned>(const int count, const unsigned* X, unsigned* Y, int* fixed_point, int max_bits, bool calc_fixed_point){}
template<>
void caffe_gpu_quantizea<double>(const int count, const double* X, double* Y, int* fixed_point, int max_bits, bool calc_fixed_point){
//
if(calc_fixed_point){
int max_index;
CUBLAS_CHECK(hipblasIdamax(Caffe::cublas_handle(),count, X, 1, &max_index));
double max_n;
caffe_gpu_getcudavalue(X,max_index-1,&max_n);//cublas1
max_n=fabs(max_n);
if (max_n == 0){
*fixed_point = 0;
}else{
//
*fixed_point = floor(log2(max_n)) - (max_bits - 1);
}
}
const double max_num = pow(2, max_bits);
const double min_num = -max_num;
const double fixed_value = pow(2, *fixed_point);
//Y-0.50.5,YX
caffe_gpu_rng_uniform(count,double(-0.5),double(0.5),Y);
//
quantize_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, fixed_value,max_num,min_num, X, Y);
}
template<>
void caffe_gpu_quantizea<float>(const int count, const float* X, float* Y, int* fixed_point, int max_bits, bool calc_fixed_point){
//
//clock_t ed;
//double stime;
//ed=clock();
if(calc_fixed_point){
int max_index;
CUBLAS_CHECK(hipblasIsamax(Caffe::cublas_handle(),count, X, 1, &max_index));
float max_n;
caffe_gpu_getcudavalue(X,max_index-1,&max_n);//cublas1
//std::cout<<"max_n="<<max_n;
//std::cout<<"before max value is "<<max_n << " in index : "<<max_index<<std::endl;
max_n=fabsf(max_n);
//std::cout<<", abs max_n="<<max_n<<std::endl;
if (max_n == 0){
*fixed_point = 0;
}else{
//
*fixed_point = floor(log2(max_n)) - (max_bits - 1);
}
}
//stime=(double)(clock() - ed)/CLOCKS_PER_SEC;
//LOG(INFO)<<"find max time is "<<stime*1000<<"ms";
const float max_num = pow(2, max_bits);
const float min_num = -max_num;
const float fixed_value = pow(2, *fixed_point);
//std::cout<<"fixed_point is "<<*fixed_point<<std::endl;
//Y-0.50.5,YX
//ed=clock();
caffe_gpu_rng_uniform(count,float(-0.5),float(0.5),Y);
//stime=(double)(clock() - ed)/CLOCKS_PER_SEC;
//LOG(INFO)<<"generate rand time is "<<stime*1000<<"ms";
//
//ed=clock();
quantize_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, fixed_value,max_num,min_num, X, Y);
//stime=(double)(clock() - ed)/CLOCKS_PER_SEC;
//LOG(INFO)<<"quantize time is "<<stime*1000<<"ms";
}
//Clip quantization
template <typename Dtype>
__global__ void quantize_kernel(const int n,Dtype* X, Dtype max_pos,Dtype max_neg){
CUDA_KERNEL_LOOP(index, n){
if(X[index]>max_pos){
X[index]=max_pos;
}
if(X[index]<max_neg){
X[index]=max_neg;
}
}
}
template<>
void caffe_gpu_quantizeb<int>(const int count, int* X, int max_bits){}
template<>
void caffe_gpu_quantizeb<unsigned>(const int count, unsigned* X, int max_bits){}
template<>
void caffe_gpu_quantizeb<float>(const int count, float* X, int max_bits){
//
float max_pos=(float)pow(2,max_bits-1)-1;
float max_neg=-(float)pow(2,max_bits-1);
quantize_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count,X,max_pos,max_neg);
}
template<>
void caffe_gpu_quantizeb<double>(const int count, double* X, int max_bits){
//
double max_pos=(double)pow(2,max_bits-1)-1;
double max_neg=-(double)pow(2,max_bits-1);
quantize_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count,X,max_pos,max_neg);
}
//scale clip
template <typename Dtype>
__global__ void round_kernel(const int n,Dtype* X){
CUDA_KERNEL_LOOP(index, n){
X[index] = (X[index] > 0.0) ? floor(X[index] + 0.5) : ceil(X[index] - 0.5);
}
}
template<>
void caffe_gpu_quantizec<int>(const int count, int* X, int max_bits){}
template<>
void caffe_gpu_quantizec<unsigned>(const int count, unsigned* X, int max_bits){}
template<>
void caffe_gpu_quantizec<float>(const int count, float* X, int max_bits){
//[-127,127]-128
int max_index;
CUBLAS_CHECK(hipblasIsamax(Caffe::cublas_handle(),count, X, 1, &max_index));
float max_n;
caffe_gpu_getcudavalue(X,max_index-1,&max_n);//cublas1
max_n=fabsf(max_n);
//scale:scaler=127/max_n;
float dst_range=(float)pow(2,max_bits-1)-1;
float scaler=dst_range/max_n;
caffe_gpu_scale(count, scaler, X, X);
////
round_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count,X);
}
template<>
void caffe_gpu_quantizec<double>(const int count, double* X, int max_bits){
//[-127,127]-128
int max_index;
CUBLAS_CHECK(hipblasIdamax(Caffe::cublas_handle(),count, X, 1, &max_index));
double max_n;
caffe_gpu_getcudavalue(X,max_index-1,&max_n);//cublas1
max_n=fabsf(max_n);
//scale:scaler=127/max_n;
double dst_range=(double)pow(2,max_bits-1)-1;
double scaler=dst_range/max_n;
caffe_gpu_scale(count, scaler, X, X);
////
round_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count,X);
}
//meanstd
__global__ void block_Ssum(const int n, const float *X, float *result)
{
extern __shared__ float sdata[];
sdata[threadIdx.x] = 0;
//__syncthreads();//
CUDA_KERNEL_LOOP(i, n){
sdata[threadIdx.x] += X[i];
}
__syncthreads();//
for(int offset = blockDim.x / 2;offset > 0;offset >>= 1){
if(threadIdx.x < offset)//
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
if(threadIdx.x == 0)
{
result[blockIdx.x] = sdata[0];
}
}
__global__ void block_Dsum(const int n, const double *X, double *result)
{
extern __shared__ double sdata1[];
sdata1[threadIdx.x] = 0;
__syncthreads();//
CUDA_KERNEL_LOOP(i, n){
sdata1[threadIdx.x] += X[i];
}
__syncthreads();//
for(int offset = blockDim.x / 2;offset > 0;offset >>= 1){
if(threadIdx.x < offset)//
{
// add a partial sum upstream to our own
sdata1[threadIdx.x] += sdata1[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
if(threadIdx.x == 0)
{
result[blockIdx.x] = sdata1[0];
}
}
template<class DType>
__global__ void block_sum(const DType *input,DType *per_block_results,const size_t n)
{
extern __shared__ DType sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory //
DType x = 0;
if(i < n)
{
x = input[i];
}
sdata[threadIdx.x] = x;
__syncthreads();//
// contiguous range pattern//.threadIdx.xi
for(int offset = blockDim.x / 2;offset > 0;offset >>= 1){
if(threadIdx.x < offset)//
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result//0
if(threadIdx.x == 0)
{
per_block_results[blockIdx.x] = sdata[0];
}
}
template <>
void caffe_gpu_sum<int>(const int n, const int* x, int* y){}
template <>
void caffe_gpu_sum<unsigned>(const int n, const unsigned* x, unsigned* y){}
template <>
void caffe_gpu_sum<float>(const int n, const float* x, float* y){
const int block_size = CAFFE_CUDA_NUM_THREADS;//
int num_elements = n;
int num_blocks = CAFFE_GET_BLOCKS(n);
float *dev_input_array = 0;
float *dev_block_sums = 0;//
while(num_elements > block_size)
{
if(dev_block_sums == 0)//
{
dev_input_array = (float*)x;
}
else //
{
//
if(dev_input_array != x){
CUDA_CHECK(hipFree(dev_input_array));
}
dev_input_array = dev_block_sums;
}
//num_blocks = (num_elements/block_size) + ((num_elements%block_size) ? 1 : 0);
//
CUDA_CHECK(hipMalloc((void**)&dev_block_sums, sizeof(float) * (num_blocks )));
// launch one kernel to compute, per-block, a partial sum//
hipLaunchKernelGGL(( block_Ssum), dim3(num_blocks),dim3(block_size), block_size*sizeof(float), 0, num_elements,dev_input_array, dev_block_sums);
//
num_elements = num_blocks;
//
num_blocks = CAFFE_GET_BLOCKS(num_blocks);
}
float* res=0;
CUDA_CHECK(hipMalloc((void**)&res, sizeof(float)));
float result=0;
//
hipLaunchKernelGGL(( block_Ssum), dim3(1),dim3(num_elements),num_elements*sizeof(float), 0, num_elements,dev_block_sums, res );
CUDA_CHECK(hipMemcpy(&result, res, sizeof(float), hipMemcpyDeviceToHost));
//
CUDA_CHECK(hipFree(res));
CUDA_CHECK(hipFree(dev_block_sums));
*y=result;
}
template <>
void caffe_gpu_sum<double>(const int n, const double* x, double* y){
const int block_size = CAFFE_CUDA_NUM_THREADS;//
int num_elements = n;
int num_blocks = CAFFE_GET_BLOCKS(n);
double *dev_input_array = 0;
double *dev_block_sums = 0;//
while(num_elements > block_size)
{
if(dev_block_sums == 0)//
{
dev_input_array = (double*)x;
}
else //
{
//
if(dev_input_array != x){
CUDA_CHECK(hipFree(dev_input_array));
}
dev_input_array = dev_block_sums;
}
//num_blocks = (num_elements/block_size) + ((num_elements%block_size) ? 1 : 0);
//
CUDA_CHECK(hipMalloc((void**)&dev_block_sums, sizeof(double) * (num_blocks )));
// launch one kernel to compute, per-block, a partial sum//
hipLaunchKernelGGL(( block_Dsum), dim3(num_blocks),dim3(block_size),block_size*sizeof(double), 0, num_elements,dev_input_array, dev_block_sums);
//
num_elements = num_blocks;
//
num_blocks = CAFFE_GET_BLOCKS(num_blocks);
}
double* res=0;
double result=0;
CUDA_CHECK(hipMalloc((void**)&res, sizeof(double)));
//
hipLaunchKernelGGL(( block_Dsum), dim3(1),dim3(num_elements),num_elements*sizeof(double), 0, num_elements,dev_block_sums, res );
CUDA_CHECK(hipMemcpy(&result, res, sizeof(double), hipMemcpyDeviceToHost));
//
CUDA_CHECK(hipFree(res));
//
CUDA_CHECK(hipFree(dev_block_sums));
*y=result;
}
template<>
void caffe_gpu_meanstd<int>(const int count, const int* X, int& mean, int& std){}
template<>
void caffe_gpu_meanstd<unsigned>(const int count, const unsigned* X, unsigned& mean, unsigned& std){}
template<>
void caffe_gpu_meanstd<float>(const int count, const float* X, float& mean, float& stds){
//calc mean
float total;
//CUBLAS_CHECK(cublasSsum(Caffe::cublas_handle(), count, X, 1, &total));
//float X0=0,Xc=0;
//caffe_gpu_getcudavalue(X,0, &X0);
//caffe_gpu_getcudavalue(X,count-1, &Xc);
//LOG(INFO)<<"count="<<count<<",X[0]="<<X0<<",X[count-1]="<<Xc;
//LOG(INFO)<<"calc sum by GPU!";
//caffe_gpu_sum(count,X,&total);
thrust::device_ptr<const float> dev_ptr(X);
total=thrust::reduce(dev_ptr, dev_ptr + size_t(count), (float)0, thrust::plus<float>());
mean=total/count;
//LOG(INFO)<<"GPU sum is "<<total;
//calc std
//LOG(INFO)<<"calc square_sum by GPU!";
caffe_gpu_dot(count, X, X,&total);
stds=sqrt(total/count-pow(mean,2));
//LOG(INFO)<<"GPU mean="<<mean<<",std="<<stds;
}
template<>
void caffe_gpu_meanstd<double>(const int count, const double* X, double& mean, double& stds){
//calc mean
double total;
//CUBLAS_CHECK(cublasDsum(Caffe::cublas_handle(), count, X, 1, &total));
thrust::device_ptr<const double> dev_ptr(X);
total=thrust::reduce(dev_ptr, dev_ptr + size_t(count), (double)0, thrust::plus<double>());
mean=total/count;
//calc std
caffe_gpu_dot(count, X, X,&total);
stds=sqrt(total/count-pow(mean,2));
//LOG(INFO)<<"GPU mean="<<mean<<",std="<<stds;
}
//ulq
//ULQQ=Round( Clip ((F-delta)*alpha+0.5))#alpha=1/alpha,
// [1-2^(k-1),2^(k-1)]
template <typename Dtype>
__global__ void ulq_kernel(const int n,const Dtype mean_old,const Dtype sigma_old,const Dtype maxs,const Dtype mins,const Dtype* X,Dtype* Y){
CUDA_KERNEL_LOOP(index, n){
//scale
Y[index]=(X[index]-mean_old)*sigma_old+0.5;
//clip
if(Y[index]>maxs){Y[index]=maxs;}
if(Y[index]<mins){Y[index]=mins;}
//round
Y[index] = (Y[index] > 0.0) ? floor(Y[index] + 0.5) : ceil(Y[index] - 0.5);
//
//range
Y[index]=((Y[index]-0.5)/sigma_old)+mean_old;
}
}
template<>
void caffe_gpu_ulq<int>(const int count, const int mean_old, const int sigma_old,const int* X, int* Y, int max_bits){}
template<>
void caffe_gpu_ulq<unsigned>(const int count, const unsigned mean_old, const unsigned sigma_old,const unsigned* X, unsigned* Y, int max_bits){}
template<>
void caffe_gpu_ulq<float>(const int count, const float mean_old, const float sigma_old,const float* X, float* Y, int max_bits){
float mins=1-pow(2,max_bits-1);
float maxs=pow(2,max_bits-1);
ulq_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count,mean_old,sigma_old,maxs,mins,X,Y);
}
template<>
void caffe_gpu_ulq<double>(const int count, const double mean_old, const double sigma_old,const double* X, double* Y, int max_bits){}
} // namespace caffe
| 060061d8a6720ed826fed78027d9e44cedcbecea.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <curand_kernel.h> //curand device function
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X,
cudaStream_t str) {
cudaStream_t initial_stream;
CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X,
cudaStream_t str) {
cudaStream_t initial_stream;
CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<int>(const int n, const int alpha, const int *x,
int* y) {
}
template <>
void caffe_gpu_scale<unsigned>(const int n, const unsigned alpha, const unsigned *x,
unsigned* y) {
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
//å®ç°gpuæ±å忥
template <typename Dtype>
__global__ void ternary_kernel(const int n, const Dtype delta, const Dtype* X, Dtype* Y){
CUDA_KERNEL_LOOP(index, n) {
const Dtype a = X[index];
Y[index] = (a>delta) - (a<-delta);
}
}
template <>
void caffe_gpu_ternary<int>(const int N, const int delta, const int* X, int* Y,int* alpha) {
// NOT IMPLEMENT
}
template <>
void caffe_gpu_ternary<unsigned>(const int N, const unsigned delta, const unsigned* X, unsigned* Y,unsigned* alpha) {
// NOT IMPLEMENT
}
template <>
void caffe_gpu_ternary<float>(const int N, const float delta, const float* X, float* Y,float* alpha) {
// NOT IMPLEMENT
//clock_t ed;
//double stime;
//ed=clock();
ternary_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, delta, X, Y);
const float* ternary_data=(const float*)Y;
float pa = 0;
caffe_gpu_dot(N, ternary_data, X, &pa);
float pb = 0;
caffe_gpu_dot(N, ternary_data, ternary_data, &pb);
*alpha = (pa) / ((pb) + 1e-6);
//stime=(double)(clock() - ed)/CLOCKS_PER_SEC;
//LOG(INFO)<<"quantize time is "<<stime*1000<<"ms";
//std::cout<<"pa="<<pa<<", pb="<<pb<<", alpha="<<*alpha<<std::endl;
}
template <>
void caffe_gpu_ternary<double>(const int N, const double delta, const double* X, double* Y,double* alpha) {
// NOT IMPLEMENT
ternary_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, delta, X, Y);
const double* ternary_data=(const double*)Y;
double pa = 0;
caffe_gpu_dot(N, ternary_data, X, &pa);
double pb = 0;
caffe_gpu_dot(N, ternary_data, ternary_data, &pb);
*alpha = (pa) / ((pb) + 1e-6);
//std::cout<<"pa="<<pa<<", pb="<<pb<<", alpha="<<*alpha<<std::endl;
}
template <typename Dtype>
__global__ void quantize_kernel(const int n,const Dtype fixed_value, const Dtype max_num, const Dtype min_num, const Dtype* X, Dtype* Y){
CUDA_KERNEL_LOOP(index, n){
Dtype x_q = X[index] / fixed_value;
// create a rand N satisfy uniform(-0.5,0.5)
x_q+=Y[index];//ç»x_qå äžéæºæ°ïŒæ€å€çYå·²ç»ååå§åäžºéæºæ°äº
x_q = (x_q > 0.0) ? floor(x_q + 0.5) : ceil(x_q - 0.5);
if (x_q >= max_num)
{
Y[index] = max_num - 1;
}else if (x_q <= min_num){
Y[index] = min_num + 1;
}else{
Y[index] = x_q;
}
}
}
//
template<typename Dtype>
void caffe_gpu_getcudavalue(const Dtype* X,const int index, Dtype* result){
CUDA_CHECK(cudaMemcpy(result, X+index, sizeof(Dtype), cudaMemcpyDeviceToHost));
}
template<>
void caffe_gpu_quantizea<int>(const int count, const int* X, int* Y, int* fixed_point, int max_bits, bool calc_fixed_point){}
template<>
void caffe_gpu_quantizea<unsigned>(const int count, const unsigned* X, unsigned* Y, int* fixed_point, int max_bits, bool calc_fixed_point){}
template<>
void caffe_gpu_quantizea<double>(const int count, const double* X, double* Y, int* fixed_point, int max_bits, bool calc_fixed_point){
//å¯»æŸæå€§åŒçå®ç¹äœçœ®
if(calc_fixed_point){
int max_index;
CUBLAS_CHECK(cublasIdamax(Caffe::cublas_handle(),count, X, 1, &max_index));
double max_n;
caffe_gpu_getcudavalue(X,max_index-1,&max_n);//cublasçæ°ç»äžæ ä»1åŒå§
max_n=fabs(max_n);
if (max_n == 0){
*fixed_point = 0;
}else{
//计ç®å®ç¹äœçœ®
*fixed_point = floor(log2(max_n)) - (max_bits - 1);
}
}
const double max_num = pow(2, max_bits);
const double min_num = -max_num;
const double fixed_value = pow(2, *fixed_point);
//å
å°Yåå§å䞺-0.5å°0.5çéæºæ°,ç¶å䜿çšY对Xçéåç»æå åªå£°
caffe_gpu_rng_uniform(count,double(-0.5),double(0.5),Y);
//计ç®éåç»æ
quantize_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, fixed_value,max_num,min_num, X, Y);
}
template<>
void caffe_gpu_quantizea<float>(const int count, const float* X, float* Y, int* fixed_point, int max_bits, bool calc_fixed_point){
//å¯»æŸæå€§åŒçå®ç¹äœçœ®
//clock_t ed;
//double stime;
//ed=clock();
if(calc_fixed_point){
int max_index;
CUBLAS_CHECK(cublasIsamax(Caffe::cublas_handle(),count, X, 1, &max_index));
float max_n;
caffe_gpu_getcudavalue(X,max_index-1,&max_n);//cublasçæ°ç»äžæ ä»1åŒå§
//std::cout<<"max_n="<<max_n;
//std::cout<<"before max value is "<<max_n << " in index : "<<max_index<<std::endl;
max_n=fabsf(max_n);
//std::cout<<", abs max_n="<<max_n<<std::endl;
if (max_n == 0){
*fixed_point = 0;
}else{
//计ç®å®ç¹äœçœ®
*fixed_point = floor(log2(max_n)) - (max_bits - 1);
}
}
//stime=(double)(clock() - ed)/CLOCKS_PER_SEC;
//LOG(INFO)<<"find max time is "<<stime*1000<<"ms";
const float max_num = pow(2, max_bits);
const float min_num = -max_num;
const float fixed_value = pow(2, *fixed_point);
//std::cout<<"fixed_point is "<<*fixed_point<<std::endl;
//å
å°Yåå§å䞺-0.5å°0.5çéæºæ°,ç¶å䜿çšY对Xçéåç»æå åªå£°
//ed=clock();
caffe_gpu_rng_uniform(count,float(-0.5),float(0.5),Y);
//stime=(double)(clock() - ed)/CLOCKS_PER_SEC;
//LOG(INFO)<<"generate rand time is "<<stime*1000<<"ms";
//计ç®éåç»æ
//ed=clock();
quantize_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, fixed_value,max_num,min_num, X, Y);
//stime=(double)(clock() - ed)/CLOCKS_PER_SEC;
//LOG(INFO)<<"quantize time is "<<stime*1000<<"ms";
}
//Clip quantization
template <typename Dtype>
__global__ void quantize_kernel(const int n,Dtype* X, Dtype max_pos,Dtype max_neg){
CUDA_KERNEL_LOOP(index, n){
if(X[index]>max_pos){
X[index]=max_pos;
}
if(X[index]<max_neg){
X[index]=max_neg;
}
}
}
template<>
void caffe_gpu_quantizeb<int>(const int count, int* X, int max_bits){}
template<>
void caffe_gpu_quantizeb<unsigned>(const int count, unsigned* X, int max_bits){}
template<>
void caffe_gpu_quantizeb<float>(const int count, float* X, int max_bits){
//å®ç°åªå
float max_pos=(float)pow(2,max_bits-1)-1;
float max_neg=-(float)pow(2,max_bits-1);
quantize_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count,X,max_pos,max_neg);
}
template<>
void caffe_gpu_quantizeb<double>(const int count, double* X, int max_bits){
//å®ç°åªå
double max_pos=(double)pow(2,max_bits-1)-1;
double max_neg=-(double)pow(2,max_bits-1);
quantize_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count,X,max_pos,max_neg);
}
//scale clip
template <typename Dtype>
__global__ void round_kernel(const int n,Dtype* X){
CUDA_KERNEL_LOOP(index, n){
X[index] = (X[index] > 0.0) ? floor(X[index] + 0.5) : ceil(X[index] - 0.5);
}
}
template<>
void caffe_gpu_quantizec<int>(const int count, int* X, int max_bits){}
template<>
void caffe_gpu_quantizec<unsigned>(const int count, unsigned* X, int max_bits){}
template<>
void caffe_gpu_quantizec<float>(const int count, float* X, int max_bits){
//éŠå
æŸå°æå€§åŒïŒè¿è¡çŒ©æŸïŒçŒ©æŸå°[-127,127]ä¹éŽïŒ-128äžèœåå°ïŒ
int max_index;
CUBLAS_CHECK(cublasIsamax(Caffe::cublas_handle(),count, X, 1, &max_index));
float max_n;
caffe_gpu_getcudavalue(X,max_index-1,&max_n);//cublasçæ°ç»äžæ ä»1åŒå§
max_n=fabsf(max_n);
//scale:scaler=127/max_n;
float dst_range=(float)pow(2,max_bits-1)-1;
float scaler=dst_range/max_n;
caffe_gpu_scale(count, scaler, X, X);
////éå䞺æŽå
round_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count,X);
}
template<>
void caffe_gpu_quantizec<double>(const int count, double* X, int max_bits){
//éŠå
æŸå°æå€§åŒïŒè¿è¡çŒ©æŸïŒçŒ©æŸå°[-127,127]ä¹éŽïŒ-128äžèœåå°ïŒ
int max_index;
CUBLAS_CHECK(cublasIdamax(Caffe::cublas_handle(),count, X, 1, &max_index));
double max_n;
caffe_gpu_getcudavalue(X,max_index-1,&max_n);//cublasçæ°ç»äžæ ä»1åŒå§
max_n=fabsf(max_n);
//scale:scaler=127/max_n;
double dst_range=(double)pow(2,max_bits-1)-1;
double scaler=dst_range/max_n;
caffe_gpu_scale(count, scaler, X, X);
////éå䞺æŽå
round_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count,X);
}
//meanstd
__global__ void block_Ssum(const int n, const float *X, float *result)
{
extern __shared__ float sdata[];
sdata[threadIdx.x] = 0;
//__syncthreads();//çåŸ
ææçº¿çšæèªå·±èŽèŽ£çå
çŽ èœœå
¥å°å
±äº«å
å
CUDA_KERNEL_LOOP(i, n){
sdata[threadIdx.x] += X[i];
}
__syncthreads();//çåŸ
ææçº¿çšæèªå·±èŽèŽ£çå
çŽ èœœå
¥å°å
±äº«å
å
for(int offset = blockDim.x / 2;offset > 0;offset >>= 1){
if(threadIdx.x < offset)//æ§å¶åªææäºçº¿çšæè¿è¡æäœã
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
if(threadIdx.x == 0)
{
result[blockIdx.x] = sdata[0];
}
}
__global__ void block_Dsum(const int n, const double *X, double *result)
{
extern __shared__ double sdata1[];
sdata1[threadIdx.x] = 0;
__syncthreads();//çåŸ
ææçº¿çšæèªå·±èŽèŽ£çå
çŽ èœœå
¥å°å
±äº«å
å
CUDA_KERNEL_LOOP(i, n){
sdata1[threadIdx.x] += X[i];
}
__syncthreads();//çåŸ
ææçº¿çšæèªå·±èŽèŽ£çå
çŽ èœœå
¥å°å
±äº«å
å
for(int offset = blockDim.x / 2;offset > 0;offset >>= 1){
if(threadIdx.x < offset)//æ§å¶åªææäºçº¿çšæè¿è¡æäœã
{
// add a partial sum upstream to our own
sdata1[threadIdx.x] += sdata1[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
if(threadIdx.x == 0)
{
result[blockIdx.x] = sdata1[0];
}
}
template<class DType>
__global__ void block_sum(const DType *input,DType *per_block_results,const size_t n)
{
extern __shared__ DType sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory //äžäžªçº¿çšèŽèŽ£æäžäžªå
çŽ ä»å
šå±å
å蜜å
¥å°å
±äº«å
å
DType x = 0;
if(i < n)
{
x = input[i];
}
sdata[threadIdx.x] = x;
__syncthreads();//çåŸ
ææçº¿çšæèªå·±èŽèŽ£çå
çŽ èœœå
¥å°å
±äº«å
å
// contiguous range pattern//åå
è¿è¡åå¹¶æäœïŒæ¯æ¬¡åå¹¶å䞺äžå.泚æthreadIdx.xæ¯åå
çåç§»ïŒäžé¢ç®åºçiæ¯å
šå±çåç§»ã
for(int offset = blockDim.x / 2;offset > 0;offset >>= 1){
if(threadIdx.x < offset)//æ§å¶åªææäºçº¿çšæè¿è¡æäœã
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result//æ¯äžªåç线çš0èŽèŽ£åæŸåå
æ±åçç»æ
if(threadIdx.x == 0)
{
per_block_results[blockIdx.x] = sdata[0];
}
}
template <>
void caffe_gpu_sum<int>(const int n, const int* x, int* y){}
template <>
void caffe_gpu_sum<unsigned>(const int n, const unsigned* x, unsigned* y){}
template <>
void caffe_gpu_sum<float>(const int n, const float* x, float* y){
const int block_size = CAFFE_CUDA_NUM_THREADS;//线çšåç倧å°ã
int num_elements = n;
int num_blocks = CAFFE_GET_BLOCKS(n);
float *dev_input_array = 0;
float *dev_block_sums = 0;//äžäžªçº¿çšåäžäžªåã
while(num_elements > block_size)
{
if(dev_block_sums == 0)//ç¬¬äžæ¬¡
{
dev_input_array = (float*)x;
}
else //é€äºç¬¬äžæ¬¡
{
//é€äºç¬¬äžæ¬¡çäžéŽç»æéœè¿è¡éæ¯
if(dev_input_array != x){
CUDA_CHECK(cudaFree(dev_input_array));
}
dev_input_array = dev_block_sums;
}
//num_blocks = (num_elements/block_size) + ((num_elements%block_size) ? 1 : 0);
//ç»èŸåºç»æåé
å
å
CUDA_CHECK(cudaMalloc((void**)&dev_block_sums, sizeof(float) * (num_blocks )));
// launch one kernel to compute, per-block, a partial sum//ææ¯äžªçº¿çšåçåæ±åºæ¥
block_Ssum<<<num_blocks,block_size, block_size*sizeof(float)>>>(num_elements,dev_input_array, dev_block_sums);
//æ¯äžåæäžäžªç»æïŒæ»çç»ææ°éå°±æ¯èŸåºçåæ°
num_elements = num_blocks;
//äžæ¬¡çè¿ä»£åæ°
num_blocks = CAFFE_GET_BLOCKS(num_blocks);
}
float* res=0;
CUDA_CHECK(cudaMalloc((void**)&res, sizeof(float)));
float result=0;
//èœå€çšäžäžªåè£
äžææçæ°æ®äºïŒè¿æ¶ååäžäžªåç计ç®
block_Ssum<<<1,num_elements,num_elements*sizeof(float)>>>(num_elements,dev_block_sums, res );
CUDA_CHECK(cudaMemcpy(&result, res, sizeof(float), cudaMemcpyDeviceToHost));
//éæŸæåäžæ¬¡çç»æ
CUDA_CHECK(cudaFree(res));
CUDA_CHECK(cudaFree(dev_block_sums));
*y=result;
}
template <>
void caffe_gpu_sum<double>(const int n, const double* x, double* y){
const int block_size = CAFFE_CUDA_NUM_THREADS;//线çšåç倧å°ã
int num_elements = n;
int num_blocks = CAFFE_GET_BLOCKS(n);
double *dev_input_array = 0;
double *dev_block_sums = 0;//äžäžªçº¿çšåäžäžªåã
while(num_elements > block_size)
{
if(dev_block_sums == 0)//ç¬¬äžæ¬¡
{
dev_input_array = (double*)x;
}
else //é€äºç¬¬äžæ¬¡
{
//é€äºç¬¬äžæ¬¡çäžéŽç»æéœè¿è¡éæ¯
if(dev_input_array != x){
CUDA_CHECK(cudaFree(dev_input_array));
}
dev_input_array = dev_block_sums;
}
//num_blocks = (num_elements/block_size) + ((num_elements%block_size) ? 1 : 0);
//ç»èŸåºç»æåé
å
å
CUDA_CHECK(cudaMalloc((void**)&dev_block_sums, sizeof(double) * (num_blocks )));
// launch one kernel to compute, per-block, a partial sum//ææ¯äžªçº¿çšåçåæ±åºæ¥
block_Dsum<<<num_blocks,block_size,block_size*sizeof(double)>>>(num_elements,dev_input_array, dev_block_sums);
//æ¯äžåæäžäžªç»æïŒæ»çç»ææ°éå°±æ¯èŸåºçåæ°
num_elements = num_blocks;
//äžæ¬¡çè¿ä»£åæ°
num_blocks = CAFFE_GET_BLOCKS(num_blocks);
}
double* res=0;
double result=0;
CUDA_CHECK(cudaMalloc((void**)&res, sizeof(double)));
//èœå€çšäžäžªåè£
äžææçæ°æ®äºïŒè¿æ¶ååäžäžªåç计ç®
block_Dsum<<<1,num_elements,num_elements*sizeof(double)>>>(num_elements,dev_block_sums, res );
CUDA_CHECK(cudaMemcpy(&result, res, sizeof(double), cudaMemcpyDeviceToHost));
//éæŸæåäžæ¬¡çç»æ
CUDA_CHECK(cudaFree(res));
//éæŸæåäžæ¬¡çç»æ
CUDA_CHECK(cudaFree(dev_block_sums));
*y=result;
}
template<>
void caffe_gpu_meanstd<int>(const int count, const int* X, int& mean, int& std){}
template<>
void caffe_gpu_meanstd<unsigned>(const int count, const unsigned* X, unsigned& mean, unsigned& std){}
template<>
void caffe_gpu_meanstd<float>(const int count, const float* X, float& mean, float& stds){
//calc mean
float total;
//CUBLAS_CHECK(cublasSsum(Caffe::cublas_handle(), count, X, 1, &total));
//float X0=0,Xc=0;
//caffe_gpu_getcudavalue(X,0, &X0);
//caffe_gpu_getcudavalue(X,count-1, &Xc);
//LOG(INFO)<<"count="<<count<<",X[0]="<<X0<<",X[count-1]="<<Xc;
//LOG(INFO)<<"calc sum by GPU!";
//caffe_gpu_sum(count,X,&total);
thrust::device_ptr<const float> dev_ptr(X);
total=thrust::reduce(dev_ptr, dev_ptr + size_t(count), (float)0, thrust::plus<float>());
mean=total/count;
//LOG(INFO)<<"GPU sum is "<<total;
//calc std
//LOG(INFO)<<"calc square_sum by GPU!";
caffe_gpu_dot(count, X, X,&total);
stds=sqrt(total/count-pow(mean,2));
//LOG(INFO)<<"GPU mean="<<mean<<",std="<<stds;
}
template<>
void caffe_gpu_meanstd<double>(const int count, const double* X, double& mean, double& stds){
//calc mean
double total;
//CUBLAS_CHECK(cublasDsum(Caffe::cublas_handle(), count, X, 1, &total));
thrust::device_ptr<const double> dev_ptr(X);
total=thrust::reduce(dev_ptr, dev_ptr + size_t(count), (double)0, thrust::plus<double>());
mean=total/count;
//calc std
caffe_gpu_dot(count, X, X,&total);
stds=sqrt(total/count-pow(mean,2));
//LOG(INFO)<<"GPU mean="<<mean<<",std="<<stds;
}
//ulq
//ULQïŒQ=Round( Clip ((F-delta)*alpha+0.5))#alpha=1/alpha,æ€å€æ¯äžºäºé¿å
逿³
// [1-2^(k-1),2^(k-1)]
template <typename Dtype>
__global__ void ulq_kernel(const int n,const Dtype mean_old,const Dtype sigma_old,const Dtype maxs,const Dtype mins,const Dtype* X,Dtype* Y){
CUDA_KERNEL_LOOP(index, n){
//scale
Y[index]=(X[index]-mean_old)*sigma_old+0.5;
//clip
if(Y[index]>maxs){Y[index]=maxs;}
if(Y[index]<mins){Y[index]=mins;}
//round
Y[index] = (Y[index] > 0.0) ? floor(Y[index] + 0.5) : ceil(Y[index] - 0.5);
//éèŠè¿åïŒå®èŽ
//è¿å䞺çžåçrangeïŒä»¥è¿è¡ååäŒ æ
Y[index]=((Y[index]-0.5)/sigma_old)+mean_old;
}
}
template<>
void caffe_gpu_ulq<int>(const int count, const int mean_old, const int sigma_old,const int* X, int* Y, int max_bits){}
template<>
void caffe_gpu_ulq<unsigned>(const int count, const unsigned mean_old, const unsigned sigma_old,const unsigned* X, unsigned* Y, int max_bits){}
template<>
void caffe_gpu_ulq<float>(const int count, const float mean_old, const float sigma_old,const float* X, float* Y, int max_bits){
float mins=1-pow(2,max_bits-1);
float maxs=pow(2,max_bits-1);
ulq_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count,mean_old,sigma_old,maxs,mins,X,Y);
}
template<>
void caffe_gpu_ulq<double>(const int count, const double mean_old, const double sigma_old,const double* X, double* Y, int max_bits){}
} // namespace caffe
|
932a9de2e42b77d089907bb4549dd7247398a031.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/UpSample.cuh>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bicubic2d_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 4> idata,
PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
// Special case: input and output are the same size, just copy
const int output_x = index % output_width;
const int output_y = index / output_width;
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; c++) {
const scalar_t val = idata[n][c][output_y][output_x];
odata[n][c][output_y][output_x] = val;
}
}
return;
}
// Interpolation kernel
accscalar_t real_x = area_pixel_compute_source_index(
width_scale, output_x, align_corners, /*cubic=*/true);
int in_x = floorf(real_x);
accscalar_t t_x = real_x - in_x;
accscalar_t real_y = area_pixel_compute_source_index(
height_scale, output_y, align_corners, /*cubic=*/true);
int in_y = floorf(real_y);
accscalar_t t_y = real_y - in_y;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; c++) {
accscalar_t coefficients[4];
for (int k = 0; k < 4; k++) {
coefficients[k] = cubic_interp1d(
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x - 1),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 0),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 1),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 2),
t_x);
}
odata[n][c][output_y][output_x] = static_cast<scalar_t>(cubic_interp1d(
coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
t_y));
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bicubic2d_backward_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
PackedTensorAccessor64<scalar_t, 4> idata,
const PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
const int output_x = index % output_width;
const int output_y = index / output_width;
// special case: output_xust copy
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = odata[n][c][output_y][output_x];
idata[n][c][output_y][output_x] = val;
}
}
return;
}
accscalar_t real_x = area_pixel_compute_source_index(
width_scale, output_x, align_corners, /*cubic=*/true);
int input_x = floorf(real_x);
accscalar_t t_x = real_x - input_x;
accscalar_t real_y = area_pixel_compute_source_index(
height_scale, output_y, align_corners, /*cubic=*/true);
int input_y = floorf(real_y);
accscalar_t t_y = real_y - input_y;
accscalar_t x_coeffs[4];
accscalar_t y_coeffs[4];
get_cubic_upsampling_coefficients(x_coeffs, t_x);
get_cubic_upsampling_coefficients(y_coeffs, t_y);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
scalar_t out_value = odata[n][c][output_y][output_x];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
upsample_increment_value_bounded<scalar_t, accscalar_t>(
idata,
n,
c,
input_height,
input_width,
input_y - 1 + i,
input_x - 1 + j,
out_value * y_coeffs[i] * x_coeffs[j]);
}
}
}
}
}
static void upsample_bicubic2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_bicubic2d_out", {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input.size(0);
int channels = input.size(1);
int input_height = input.size(2);
int input_width = input.size(3);
upsample_2d_shape_check(
input,
Tensor(),
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
output.resize_({input.size(0), input.size(1), output_height, output_width});
output.zero_();
AT_ASSERT(
input_height > 0 && input_width > 0 && output_height > 0 &&
output_width > 0);
const int num_output_elements = output_height * output_width;
const int max_threads = ::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
// Launch kernel
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_bicubic2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
// Get scaling factors
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
hipLaunchKernelGGL(( upsample_bicubic2d_out_frame<scalar_t, accscalar_t>)
, dim3(cuda::ATenCeilDiv(num_output_elements, max_threads)),
dim3(max_threads),
0,
stream,
num_output_elements,
rheight,
rwidth,
align_corners,
idata,
odata);
});
AT_CUDA_CHECK(hipGetLastError());
}
static void upsample_bicubic2d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_bicubic2d_backward_out_cuda",
{grad_output_arg, grad_input_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 4,
"It is expected input_size equals to 4, but got size ",
input_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_size[0];
int channels = input_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
upsample_2d_shape_check(
Tensor(),
grad_output_,
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_({nbatch, channels, input_height, input_width});
grad_input.zero_();
const int num_kernels = output_height * output_width;
const int num_threads = ::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_bicubic2d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.packed_accessor64<scalar_t, 4>();
auto odata = grad_output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
hipLaunchKernelGGL(( upsample_bicubic2d_backward_out_frame<scalar_t, accscalar_t>)
, dim3(cuda::ATenCeilDiv(num_kernels, num_threads)),
dim3(num_threads),
0,
stream,
num_kernels, rheight, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(hipGetLastError());
}
} // namespace
Tensor& upsample_bicubic2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
upsample_bicubic2d_out_cuda_template(
output, input, output_size, align_corners, scales_h, scales_w);
return output;
}
Tensor upsample_bicubic2d_cuda(
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bicubic2d_out_cuda_template(
output, input, output_size, align_corners, scales_h, scales_w);
return output;
}
Tensor& upsample_bicubic2d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
return grad_input;
}
Tensor upsample_bicubic2d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
return grad_input;
}
} // namespace native
} // namespace at
| 932a9de2e42b77d089907bb4549dd7247398a031.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/UpSample.cuh>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bicubic2d_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 4> idata,
PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
// Special case: input and output are the same size, just copy
const int output_x = index % output_width;
const int output_y = index / output_width;
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; c++) {
const scalar_t val = idata[n][c][output_y][output_x];
odata[n][c][output_y][output_x] = val;
}
}
return;
}
// Interpolation kernel
accscalar_t real_x = area_pixel_compute_source_index(
width_scale, output_x, align_corners, /*cubic=*/true);
int in_x = floorf(real_x);
accscalar_t t_x = real_x - in_x;
accscalar_t real_y = area_pixel_compute_source_index(
height_scale, output_y, align_corners, /*cubic=*/true);
int in_y = floorf(real_y);
accscalar_t t_y = real_y - in_y;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; c++) {
accscalar_t coefficients[4];
for (int k = 0; k < 4; k++) {
coefficients[k] = cubic_interp1d(
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x - 1),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 0),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 1),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 2),
t_x);
}
odata[n][c][output_y][output_x] = static_cast<scalar_t>(cubic_interp1d(
coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
t_y));
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bicubic2d_backward_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
PackedTensorAccessor64<scalar_t, 4> idata,
const PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
const int output_x = index % output_width;
const int output_y = index / output_width;
// special case: output_xust copy
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = odata[n][c][output_y][output_x];
idata[n][c][output_y][output_x] = val;
}
}
return;
}
accscalar_t real_x = area_pixel_compute_source_index(
width_scale, output_x, align_corners, /*cubic=*/true);
int input_x = floorf(real_x);
accscalar_t t_x = real_x - input_x;
accscalar_t real_y = area_pixel_compute_source_index(
height_scale, output_y, align_corners, /*cubic=*/true);
int input_y = floorf(real_y);
accscalar_t t_y = real_y - input_y;
accscalar_t x_coeffs[4];
accscalar_t y_coeffs[4];
get_cubic_upsampling_coefficients(x_coeffs, t_x);
get_cubic_upsampling_coefficients(y_coeffs, t_y);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
scalar_t out_value = odata[n][c][output_y][output_x];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
upsample_increment_value_bounded<scalar_t, accscalar_t>(
idata,
n,
c,
input_height,
input_width,
input_y - 1 + i,
input_x - 1 + j,
out_value * y_coeffs[i] * x_coeffs[j]);
}
}
}
}
}
static void upsample_bicubic2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_bicubic2d_out", {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input.size(0);
int channels = input.size(1);
int input_height = input.size(2);
int input_width = input.size(3);
upsample_2d_shape_check(
input,
Tensor(),
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
output.resize_({input.size(0), input.size(1), output_height, output_width});
output.zero_();
AT_ASSERT(
input_height > 0 && input_width > 0 && output_height > 0 &&
output_width > 0);
const int num_output_elements = output_height * output_width;
const int max_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
// Launch kernel
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_bicubic2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
// Get scaling factors
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
upsample_bicubic2d_out_frame<scalar_t, accscalar_t>
<<<cuda::ATenCeilDiv(num_output_elements, max_threads),
max_threads,
0,
stream>>>(
num_output_elements,
rheight,
rwidth,
align_corners,
idata,
odata);
});
AT_CUDA_CHECK(cudaGetLastError());
}
static void upsample_bicubic2d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_bicubic2d_backward_out_cuda",
{grad_output_arg, grad_input_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 4,
"It is expected input_size equals to 4, but got size ",
input_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_size[0];
int channels = input_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
upsample_2d_shape_check(
Tensor(),
grad_output_,
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_({nbatch, channels, input_height, input_width});
grad_input.zero_();
const int num_kernels = output_height * output_width;
const int num_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_bicubic2d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.packed_accessor64<scalar_t, 4>();
auto odata = grad_output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
upsample_bicubic2d_backward_out_frame<scalar_t, accscalar_t>
<<<cuda::ATenCeilDiv(num_kernels, num_threads),
num_threads,
0,
stream>>>(
num_kernels, rheight, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(cudaGetLastError());
}
} // namespace
Tensor& upsample_bicubic2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
upsample_bicubic2d_out_cuda_template(
output, input, output_size, align_corners, scales_h, scales_w);
return output;
}
Tensor upsample_bicubic2d_cuda(
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bicubic2d_out_cuda_template(
output, input, output_size, align_corners, scales_h, scales_w);
return output;
}
Tensor& upsample_bicubic2d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
return grad_input;
}
Tensor upsample_bicubic2d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
return grad_input;
}
} // namespace native
} // namespace at
|
0c05fb1b2fda94512740af8c18293a95bb185c93.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THHNumerics.cuh>
#include <THH/THH.h>
#include <hip/hip_runtime.h>
//#define DEBUG
// calculate the IoU of a single box against another box
__device__
float calc_single_iou(const float4 b1, const float4 b2) {
// (lt), (rb)
float l = max(b1.x, b2.x);
float t = max(b1.y, b2.y);
float r = min(b1.z, b2.z);
float b = min(b1.w, b2.w);
float first = (r - l);
first = (first < 0) ? 0 : first;
float second = (b - t);
second = (second < 0) ? 0 : second;
float intersection = first * second;
float area1 = (b1.w - b1.y) * (b1.z - b1.x);
float area2 = (b2.w - b2.y) * (b2.z - b2.x);
return intersection / (area1 + area2 - intersection);
}
__global__
// boxes1 : [N x 4]
// boxes2 : [M x 4]
// ious : [N x M]
void calc_ious_kernel(const int N_img, const float4 *box1, const int *box1_offsets,
const int M, const float4 *boxes2, float *ious) {
// launch N_img blocks
const int img = blockIdx.x;
// each block, i will run over the box1_N[i] source and M target boxes
// generating box1_N[i] x M outputs
// alias to start of boxes for this image
const float4 *b1 = &box1[box1_offsets[img]];
if (threadIdx.x == 0) {
//printf("offset for img %d : %d\n", img, box1_offsets[img]);
}
// number of boxes for this image from offsets
int N = box1_offsets[img+1] - box1_offsets[img];
for (int i = 0; i < N; ++i) {
// if (threadIdx.x == 0) printf("i : %d\n", i);
const float4 source = b1[i];
// for each source, loop over targets
for (int j = threadIdx.x; j < M; j += blockDim.x) {
const float4 target = boxes2[j];
float iou = calc_single_iou(source, target);
// store the calculated IoU in the correct spot
int out_idx = box1_offsets[img] * M + i * M + j;
ious[out_idx] = iou;
}
}
}
__device__
void reduce_val_idx(int N, volatile float *vals, volatile int *idx) {
// naive: single thread for now
if (threadIdx.x == 0) {
float max_val = vals[0];
int max_idx = idx[0];
for (int i = 1; i < N; ++i) {
if (vals[i] > max_val) {
max_val = vals[i];
max_idx = idx[i];
}
}
vals[0] = max_val;
idx[0] = max_idx;
}
}
/**
* perform remaining parts, storing temporary values in global workspace
* workspace needs N_img * M values, each of 8 bytes (float, int)
**/
template <int BLOCK_SIZE, int MAX_BBOXES_PER_BLOCK>
__global__
void encode(const int N_img, const float4 *bbox_in, const long *labels_in, const int *offsets,
const int M, const float4 *dboxes, // const float *ious,
const float criteria, uint8_t *workspace, float4 *bbox_out, long *label_out) {
// Each block will take a single image's IoU set
const int img = blockIdx.x;
// shared memory for intermediate results
__shared__ volatile float best_bbox_iou_tmp[BLOCK_SIZE];
__shared__ volatile int best_bbox_idx_tmp[BLOCK_SIZE];
// shared memory for final best_bbox_{iou, idx} values
__shared__ volatile float best_bbox_iou[MAX_BBOXES_PER_BLOCK];
__shared__ volatile int best_bbox_idx[MAX_BBOXES_PER_BLOCK];
// index into the global workspace - each image needs (float + int) * M values
volatile float *best_dbox_iou = (float *)&workspace[img * M * 8];
volatile int *best_dbox_idx = (int *)&workspace[img * M * 8 + M * 4];
// number of input bboxes for this image
const int N_rows = offsets[img+1] - offsets[img];
// Check for potential crash
assert(N_rows <= MAX_BBOXES_PER_BLOCK);
#ifdef DEBUG
if (threadIdx.x == 0)
printf("N rows: %d %d to %d (%p - %p)\n", N_rows, offsets[img], offsets[img+1], best_dbox_iou, best_dbox_idx);
#endif
for (int i = threadIdx.x; i < MAX_BBOXES_PER_BLOCK; i += blockDim.x) {
best_bbox_iou[i] = -FLT_MAX;
best_bbox_idx[i] = -1;
}
__syncthreads();
// loop serially over the rows of the IoU set that correspond to this image
int row_num = 0;
for (int i = offsets[img]; i < offsets[img+1]; ++i) {
// reset shmem tallies
best_bbox_iou_tmp[threadIdx.x] = -FLT_MAX;
best_bbox_idx_tmp[threadIdx.x] = -1;
// index into the input buffer
// const float *row = &ious[i * M];
const float4 input_bbox = bbox_in[i];
#ifdef DEBUG
if (threadIdx.x == 0)
printf("%d - %p\n", img, &input_bbox);
#endif
// loop by threads over the columns
for (int j = threadIdx.x; j < M; j += blockDim.x) {
// check and store new max if necessary
const float4 input_dbox = dboxes[j];
// float new_val = row[j];
float new_val = calc_single_iou(input_bbox, input_dbox);
// handle per-row max in shared memory
if (new_val > best_bbox_iou_tmp[threadIdx.x]) {
best_bbox_iou_tmp[threadIdx.x] = new_val;
best_bbox_idx_tmp[threadIdx.x] = j;
}
// handle per-col max in global workspace
if (new_val > best_dbox_iou[j]) {
best_dbox_iou[j] = new_val;
best_dbox_idx[j] = row_num;
#ifdef DEBUG
assert(best_dbox_idx[j] >= 0);
assert(best_dbox_idx[j] < N_rows);
#endif
}
}
// Now we have all the values for this row -- reduce
__syncthreads();
// reduce - output is in max_{val, idx}_row[0]
reduce_val_idx(blockDim.x, best_bbox_iou_tmp, best_bbox_idx_tmp);
#ifdef DEBUG
__syncthreads();
#endif
// store output for row i
if (threadIdx.x == 0) {
best_bbox_iou[row_num] = best_bbox_iou_tmp[0];
best_bbox_idx[row_num] = best_bbox_idx_tmp[0];
#ifdef DEBUG
assert(best_bbox_idx[row_num] >= 0);
assert(best_bbox_idx[row_num] < M);
#endif
}
__syncthreads();
// keep track of _local_ row
row_num++;
}
#ifdef DEBUG
if (threadIdx.x == 0) {
for (int i = 0; i < N_rows; ++i) {
printf("%d - row : %d : best bbox_idx: %d\n", img, i, best_bbox_idx[i]);
}
}
#endif
#ifdef DEBUG
// make sure all best_bbox_{iou, val} are seen by everyone
__syncthreads();
#endif
// At this point we have the maximum values & indices for both bbox and dbox
/*
best_dbox_ious.index_fill_(0, best_bbox_idx, 2.0)
idx = torch.arange(0, best_bbox_idx.size(0), dtype=torch.int64)
best_dbox_idx[best_bbox_idx[idx]] = idx
*/
for (int i = threadIdx.x; i < N_rows; i += blockDim.x) {
int idx = best_bbox_idx[i];
#ifdef DEBUG
assert(idx < M);
assert(idx >= 0);
#endif
best_dbox_iou[idx] = 2.;
best_dbox_idx[idx] = i;
#ifdef DEBUG
printf("%d - set best dbox_idx[%d] to %d\n", img, best_bbox_idx[i], i);
#endif
}
/**
# filter IoU > 0.5
masks = best_dbox_ious > criteria
labels_out = torch.zeros(self.nboxes, dtype=torch.long)
#print(maxloc.shape, labels_in.shape, labels_out.shape)
labels_out[masks] = labels_in[best_dbox_idx[masks]]
bboxes_out = self.dboxes.clone()
bboxes_out[masks, :] = bboxes_in[best_dbox_idx[masks], :]
# Transform format to xywh format
x, y, w, h = 0.5*(bboxes_out[:, 0] + bboxes_out[:, 2]), \
0.5*(bboxes_out[:, 1] + bboxes_out[:, 3]), \
-bboxes_out[:, 0] + bboxes_out[:, 2], \
-bboxes_out[:, 1] + bboxes_out[:, 3]
bboxes_out[:, 0] = x
bboxes_out[:, 1] = y
bboxes_out[:, 2] = w
bboxes_out[:, 3] = h
return bboxes_out, labels_out
**/
__syncthreads();
for (int i = threadIdx.x; i < M; i += blockDim.x) {
// offset into output arrays: M values per image
// int output_idx = offsets[img] * M + i;
int output_idx = img * M + i;
// reset output labels to background
// NOTE: bbox_out is already cloned from dbox outside of this kernel
label_out[output_idx] = 0;
// Filter IoU > 0.5
bool mask = best_dbox_iou[i] > criteria;
float4 bbox = bbox_out[output_idx];
// copy some labels and bboxes
if (mask) {
// copy label
#ifdef DEBUG
printf("%d : label: local input idx: %d, value: %d\n", i, best_dbox_idx[i], labels_in[offsets[img] + best_dbox_idx[i]]);
// printf("%d : label: local input idx: %d, value: %d\n", i, best_dbox_idx[i], labels_in[offsets[img] + i]);
#endif
label_out[output_idx] = labels_in[offsets[img] + best_dbox_idx[i]];
// grab original box
bbox = bbox_in[offsets[img] + best_dbox_idx[i]];
#ifdef DEBUG
printf("mask %d : %d : %f %f %f %f\n", i, best_dbox_idx[i], bbox.x, bbox.y, bbox.z, bbox.w);
#endif
}
// transfer to xywh
float4 bbox_tmp;
bbox_tmp.x = 0.5 * (bbox.x + bbox.z);
bbox_tmp.y = 0.5 * (bbox.y + bbox.w);
bbox_tmp.z = bbox.z - bbox.x;
bbox_tmp.w = bbox.w - bbox.y;
// write out
bbox_out[output_idx] = bbox_tmp;
}
}
/**
def encode(self, bboxes_in, labels_in, criteria = 0.5):
ious = calc_iou_tensor(bboxes_in, self.dboxes)
best_dbox_ious, best_dbox_idx = ious.max(dim=0)
best_bbox_ious, best_bbox_idx = ious.max(dim=1)
# set best ious 2.0
best_dbox_ious.index_fill_(0, best_bbox_idx, 2.0)
idx = torch.arange(0, best_bbox_idx.size(0), dtype=torch.int64)
best_dbox_idx[best_bbox_idx[idx]] = idx
# filter IoU > 0.5
masks = best_dbox_ious > criteria
labels_out = torch.zeros(self.nboxes, dtype=torch.long)
#print(maxloc.shape, labels_in.shape, labels_out.shape)
labels_out[masks] = labels_in[best_dbox_idx[masks]]
bboxes_out = self.dboxes.clone()
bboxes_out[masks, :] = bboxes_in[best_dbox_idx[masks], :]
# Transform format to xywh format
x, y, w, h = 0.5*(bboxes_out[:, 0] + bboxes_out[:, 2]), \
0.5*(bboxes_out[:, 1] + bboxes_out[:, 3]), \
-bboxes_out[:, 0] + bboxes_out[:, 2], \
-bboxes_out[:, 1] + bboxes_out[:, 3]
bboxes_out[:, 0] = x
bboxes_out[:, 1] = y
bboxes_out[:, 2] = w
bboxes_out[:, 3] = h
return bboxes_out, labels_out
**/
std::vector<at::Tensor> box_encoder(const int N_img,
const at::Tensor& bbox_input,
const at::Tensor& bbox_offsets,
const at::Tensor& labels_input,
const at::Tensor& dbox,
float criteria) {
// Check everything is on the device
AT_ASSERTM(bbox_input.type().is_cuda(), "bboxes must be a CUDA tensor");
AT_ASSERTM(bbox_offsets.type().is_cuda(), "bbox offsets must be a CUDA tensor");
AT_ASSERTM(labels_input.type().is_cuda(), "labels must be a CUDA tensor");
AT_ASSERTM(dbox.type().is_cuda(), "dboxes must be a CUDA tensor");
// Check at least offsets, bboxes and labels are consistent
// Note: offsets is N+1 vs. N for labels
AT_ASSERTM(N_img + 1 == bbox_offsets.numel(), "must have N_img+1 offsets");
auto num_bbox_total = bbox_offsets[bbox_offsets.numel()-1].item<int>();
#ifdef DEBUG
printf("%d : bboxes: %d\n", (int)bbox_offsets.numel(), num_bbox_total);
#endif
AT_ASSERTM(num_bbox_total <= 2048, "total num bboxes must be <= 2048");
AT_ASSERTM(bbox_input.size(0) == labels_input.size(0), "bbox and labels must have same leading dimension");
const int N = bbox_input.size(0);
const int M = dbox.size(0);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// allocate final outputs (known size)
#ifdef DEBUG
printf("%d x %d\n", N_img * M, 4);
// at::Tensor bbox_out = dbox.type().tensor({N_img * M, 4});
printf("allocating %lu bytes for output labels\n", N_img*M*sizeof(long));
#endif
at::Tensor labels_out = at::empty({N_img * M}, labels_input.options());
THCudaCheck(hipGetLastError());
// copy default boxes to outputs
#ifdef DEBUG
printf("allocating %lu bytes for output bboxes\n", N_img*M*4*sizeof(float));
#endif
at::Tensor bbox_out = dbox.repeat({N_img, 1});
THCudaCheck(hipGetLastError());
// need to allocate some workspace
#ifdef DEBUG
printf("allocating %lu bytes for workspace\n", 8*M*N_img);
#endif
// at::Tensor workspace = at::CUDA(at::kByte).zeros({8 * M * N_img});
at::Tensor workspace = at::zeros({8 * M * N_img}, at::CUDA(at::kByte));
THCudaCheck(hipGetLastError());
// Encode the inputs
const int THREADS_PER_BLOCK = 256;
hipLaunchKernelGGL(( encode<THREADS_PER_BLOCK, 256>), dim3(N_img), dim3(THREADS_PER_BLOCK), 0, stream.stream(), N_img,
(float4*)bbox_input.data<float>(),
labels_input.data<long>(),
bbox_offsets.data<int>(),
M,
(float4*)dbox.data<float>(),
criteria,
workspace.data<uint8_t>(),
(float4*)bbox_out.data<float>(),
labels_out.data<long>());
THCudaCheck(hipGetLastError());
return {bbox_out, labels_out};
}
at::Tensor calc_ious(const int N_img,
const at::Tensor& boxes1,
const at::Tensor& boxes1_offsets,
const at::Tensor& boxes2) {
const int N = boxes1.size(0);
const int M = boxes2.size(0);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// at::Tensor ious = at::CUDA(at::kFloat).zeros({N, M});
// at::Tensor ious = at::ones(at::CUDA(at::kFloat), {N, M});
at::Tensor ious = at::empty({N, M}, boxes1.options());
// Get IoU of all source x default box pairs
hipLaunchKernelGGL(( calc_ious_kernel), dim3(N_img), dim3(256), 0, stream.stream(),
N_img,
(float4*)boxes1.data<float>(),
boxes1_offsets.data<int>(),
M,
(float4*)boxes2.data<float>(),
ious.data<float>());
THCudaCheck(hipGetLastError());
return ious;
}
| 0c05fb1b2fda94512740af8c18293a95bb185c93.cu | /******************************************************************************
*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCNumerics.cuh>
#include <THC/THC.h>
#include <cuda.h>
//#define DEBUG
// calculate the IoU of a single box against another box
__device__
float calc_single_iou(const float4 b1, const float4 b2) {
// (lt), (rb)
float l = max(b1.x, b2.x);
float t = max(b1.y, b2.y);
float r = min(b1.z, b2.z);
float b = min(b1.w, b2.w);
float first = (r - l);
first = (first < 0) ? 0 : first;
float second = (b - t);
second = (second < 0) ? 0 : second;
float intersection = first * second;
float area1 = (b1.w - b1.y) * (b1.z - b1.x);
float area2 = (b2.w - b2.y) * (b2.z - b2.x);
return intersection / (area1 + area2 - intersection);
}
__global__
// boxes1 : [N x 4]
// boxes2 : [M x 4]
// ious : [N x M]
void calc_ious_kernel(const int N_img, const float4 *box1, const int *box1_offsets,
const int M, const float4 *boxes2, float *ious) {
// launch N_img blocks
const int img = blockIdx.x;
// each block, i will run over the box1_N[i] source and M target boxes
// generating box1_N[i] x M outputs
// alias to start of boxes for this image
const float4 *b1 = &box1[box1_offsets[img]];
if (threadIdx.x == 0) {
//printf("offset for img %d : %d\n", img, box1_offsets[img]);
}
// number of boxes for this image from offsets
int N = box1_offsets[img+1] - box1_offsets[img];
for (int i = 0; i < N; ++i) {
// if (threadIdx.x == 0) printf("i : %d\n", i);
const float4 source = b1[i];
// for each source, loop over targets
for (int j = threadIdx.x; j < M; j += blockDim.x) {
const float4 target = boxes2[j];
float iou = calc_single_iou(source, target);
// store the calculated IoU in the correct spot
int out_idx = box1_offsets[img] * M + i * M + j;
ious[out_idx] = iou;
}
}
}
__device__
void reduce_val_idx(int N, volatile float *vals, volatile int *idx) {
// naive: single thread for now
if (threadIdx.x == 0) {
float max_val = vals[0];
int max_idx = idx[0];
for (int i = 1; i < N; ++i) {
if (vals[i] > max_val) {
max_val = vals[i];
max_idx = idx[i];
}
}
vals[0] = max_val;
idx[0] = max_idx;
}
}
/**
* perform remaining parts, storing temporary values in global workspace
* workspace needs N_img * M values, each of 8 bytes (float, int)
**/
template <int BLOCK_SIZE, int MAX_BBOXES_PER_BLOCK>
__global__
void encode(const int N_img, const float4 *bbox_in, const long *labels_in, const int *offsets,
const int M, const float4 *dboxes, // const float *ious,
const float criteria, uint8_t *workspace, float4 *bbox_out, long *label_out) {
// Each block will take a single image's IoU set
const int img = blockIdx.x;
// shared memory for intermediate results
__shared__ volatile float best_bbox_iou_tmp[BLOCK_SIZE];
__shared__ volatile int best_bbox_idx_tmp[BLOCK_SIZE];
// shared memory for final best_bbox_{iou, idx} values
__shared__ volatile float best_bbox_iou[MAX_BBOXES_PER_BLOCK];
__shared__ volatile int best_bbox_idx[MAX_BBOXES_PER_BLOCK];
// index into the global workspace - each image needs (float + int) * M values
volatile float *best_dbox_iou = (float *)&workspace[img * M * 8];
volatile int *best_dbox_idx = (int *)&workspace[img * M * 8 + M * 4];
// number of input bboxes for this image
const int N_rows = offsets[img+1] - offsets[img];
// Check for potential crash
assert(N_rows <= MAX_BBOXES_PER_BLOCK);
#ifdef DEBUG
if (threadIdx.x == 0)
printf("N rows: %d %d to %d (%p - %p)\n", N_rows, offsets[img], offsets[img+1], best_dbox_iou, best_dbox_idx);
#endif
for (int i = threadIdx.x; i < MAX_BBOXES_PER_BLOCK; i += blockDim.x) {
best_bbox_iou[i] = -FLT_MAX;
best_bbox_idx[i] = -1;
}
__syncthreads();
// loop serially over the rows of the IoU set that correspond to this image
int row_num = 0;
for (int i = offsets[img]; i < offsets[img+1]; ++i) {
// reset shmem tallies
best_bbox_iou_tmp[threadIdx.x] = -FLT_MAX;
best_bbox_idx_tmp[threadIdx.x] = -1;
// index into the input buffer
// const float *row = &ious[i * M];
const float4 input_bbox = bbox_in[i];
#ifdef DEBUG
if (threadIdx.x == 0)
printf("%d - %p\n", img, &input_bbox);
#endif
// loop by threads over the columns
for (int j = threadIdx.x; j < M; j += blockDim.x) {
// check and store new max if necessary
const float4 input_dbox = dboxes[j];
// float new_val = row[j];
float new_val = calc_single_iou(input_bbox, input_dbox);
// handle per-row max in shared memory
if (new_val > best_bbox_iou_tmp[threadIdx.x]) {
best_bbox_iou_tmp[threadIdx.x] = new_val;
best_bbox_idx_tmp[threadIdx.x] = j;
}
// handle per-col max in global workspace
if (new_val > best_dbox_iou[j]) {
best_dbox_iou[j] = new_val;
best_dbox_idx[j] = row_num;
#ifdef DEBUG
assert(best_dbox_idx[j] >= 0);
assert(best_dbox_idx[j] < N_rows);
#endif
}
}
// Now we have all the values for this row -- reduce
__syncthreads();
// reduce - output is in max_{val, idx}_row[0]
reduce_val_idx(blockDim.x, best_bbox_iou_tmp, best_bbox_idx_tmp);
#ifdef DEBUG
__syncthreads();
#endif
// store output for row i
if (threadIdx.x == 0) {
best_bbox_iou[row_num] = best_bbox_iou_tmp[0];
best_bbox_idx[row_num] = best_bbox_idx_tmp[0];
#ifdef DEBUG
assert(best_bbox_idx[row_num] >= 0);
assert(best_bbox_idx[row_num] < M);
#endif
}
__syncthreads();
// keep track of _local_ row
row_num++;
}
#ifdef DEBUG
if (threadIdx.x == 0) {
for (int i = 0; i < N_rows; ++i) {
printf("%d - row : %d : best bbox_idx: %d\n", img, i, best_bbox_idx[i]);
}
}
#endif
#ifdef DEBUG
// make sure all best_bbox_{iou, val} are seen by everyone
__syncthreads();
#endif
// At this point we have the maximum values & indices for both bbox and dbox
/*
best_dbox_ious.index_fill_(0, best_bbox_idx, 2.0)
idx = torch.arange(0, best_bbox_idx.size(0), dtype=torch.int64)
best_dbox_idx[best_bbox_idx[idx]] = idx
*/
for (int i = threadIdx.x; i < N_rows; i += blockDim.x) {
int idx = best_bbox_idx[i];
#ifdef DEBUG
assert(idx < M);
assert(idx >= 0);
#endif
best_dbox_iou[idx] = 2.;
best_dbox_idx[idx] = i;
#ifdef DEBUG
printf("%d - set best dbox_idx[%d] to %d\n", img, best_bbox_idx[i], i);
#endif
}
/**
# filter IoU > 0.5
masks = best_dbox_ious > criteria
labels_out = torch.zeros(self.nboxes, dtype=torch.long)
#print(maxloc.shape, labels_in.shape, labels_out.shape)
labels_out[masks] = labels_in[best_dbox_idx[masks]]
bboxes_out = self.dboxes.clone()
bboxes_out[masks, :] = bboxes_in[best_dbox_idx[masks], :]
# Transform format to xywh format
x, y, w, h = 0.5*(bboxes_out[:, 0] + bboxes_out[:, 2]), \
0.5*(bboxes_out[:, 1] + bboxes_out[:, 3]), \
-bboxes_out[:, 0] + bboxes_out[:, 2], \
-bboxes_out[:, 1] + bboxes_out[:, 3]
bboxes_out[:, 0] = x
bboxes_out[:, 1] = y
bboxes_out[:, 2] = w
bboxes_out[:, 3] = h
return bboxes_out, labels_out
**/
__syncthreads();
for (int i = threadIdx.x; i < M; i += blockDim.x) {
// offset into output arrays: M values per image
// int output_idx = offsets[img] * M + i;
int output_idx = img * M + i;
// reset output labels to background
// NOTE: bbox_out is already cloned from dbox outside of this kernel
label_out[output_idx] = 0;
// Filter IoU > 0.5
bool mask = best_dbox_iou[i] > criteria;
float4 bbox = bbox_out[output_idx];
// copy some labels and bboxes
if (mask) {
// copy label
#ifdef DEBUG
printf("%d : label: local input idx: %d, value: %d\n", i, best_dbox_idx[i], labels_in[offsets[img] + best_dbox_idx[i]]);
// printf("%d : label: local input idx: %d, value: %d\n", i, best_dbox_idx[i], labels_in[offsets[img] + i]);
#endif
label_out[output_idx] = labels_in[offsets[img] + best_dbox_idx[i]];
// grab original box
bbox = bbox_in[offsets[img] + best_dbox_idx[i]];
#ifdef DEBUG
printf("mask %d : %d : %f %f %f %f\n", i, best_dbox_idx[i], bbox.x, bbox.y, bbox.z, bbox.w);
#endif
}
// transfer to xywh
float4 bbox_tmp;
bbox_tmp.x = 0.5 * (bbox.x + bbox.z);
bbox_tmp.y = 0.5 * (bbox.y + bbox.w);
bbox_tmp.z = bbox.z - bbox.x;
bbox_tmp.w = bbox.w - bbox.y;
// write out
bbox_out[output_idx] = bbox_tmp;
}
}
/**
def encode(self, bboxes_in, labels_in, criteria = 0.5):
ious = calc_iou_tensor(bboxes_in, self.dboxes)
best_dbox_ious, best_dbox_idx = ious.max(dim=0)
best_bbox_ious, best_bbox_idx = ious.max(dim=1)
# set best ious 2.0
best_dbox_ious.index_fill_(0, best_bbox_idx, 2.0)
idx = torch.arange(0, best_bbox_idx.size(0), dtype=torch.int64)
best_dbox_idx[best_bbox_idx[idx]] = idx
# filter IoU > 0.5
masks = best_dbox_ious > criteria
labels_out = torch.zeros(self.nboxes, dtype=torch.long)
#print(maxloc.shape, labels_in.shape, labels_out.shape)
labels_out[masks] = labels_in[best_dbox_idx[masks]]
bboxes_out = self.dboxes.clone()
bboxes_out[masks, :] = bboxes_in[best_dbox_idx[masks], :]
# Transform format to xywh format
x, y, w, h = 0.5*(bboxes_out[:, 0] + bboxes_out[:, 2]), \
0.5*(bboxes_out[:, 1] + bboxes_out[:, 3]), \
-bboxes_out[:, 0] + bboxes_out[:, 2], \
-bboxes_out[:, 1] + bboxes_out[:, 3]
bboxes_out[:, 0] = x
bboxes_out[:, 1] = y
bboxes_out[:, 2] = w
bboxes_out[:, 3] = h
return bboxes_out, labels_out
**/
std::vector<at::Tensor> box_encoder(const int N_img,
const at::Tensor& bbox_input,
const at::Tensor& bbox_offsets,
const at::Tensor& labels_input,
const at::Tensor& dbox,
float criteria) {
// Check everything is on the device
AT_ASSERTM(bbox_input.type().is_cuda(), "bboxes must be a CUDA tensor");
AT_ASSERTM(bbox_offsets.type().is_cuda(), "bbox offsets must be a CUDA tensor");
AT_ASSERTM(labels_input.type().is_cuda(), "labels must be a CUDA tensor");
AT_ASSERTM(dbox.type().is_cuda(), "dboxes must be a CUDA tensor");
// Check at least offsets, bboxes and labels are consistent
// Note: offsets is N+1 vs. N for labels
AT_ASSERTM(N_img + 1 == bbox_offsets.numel(), "must have N_img+1 offsets");
auto num_bbox_total = bbox_offsets[bbox_offsets.numel()-1].item<int>();
#ifdef DEBUG
printf("%d : bboxes: %d\n", (int)bbox_offsets.numel(), num_bbox_total);
#endif
AT_ASSERTM(num_bbox_total <= 2048, "total num bboxes must be <= 2048");
AT_ASSERTM(bbox_input.size(0) == labels_input.size(0), "bbox and labels must have same leading dimension");
const int N = bbox_input.size(0);
const int M = dbox.size(0);
auto stream = at::cuda::getCurrentCUDAStream();
// allocate final outputs (known size)
#ifdef DEBUG
printf("%d x %d\n", N_img * M, 4);
// at::Tensor bbox_out = dbox.type().tensor({N_img * M, 4});
printf("allocating %lu bytes for output labels\n", N_img*M*sizeof(long));
#endif
at::Tensor labels_out = at::empty({N_img * M}, labels_input.options());
THCudaCheck(cudaGetLastError());
// copy default boxes to outputs
#ifdef DEBUG
printf("allocating %lu bytes for output bboxes\n", N_img*M*4*sizeof(float));
#endif
at::Tensor bbox_out = dbox.repeat({N_img, 1});
THCudaCheck(cudaGetLastError());
// need to allocate some workspace
#ifdef DEBUG
printf("allocating %lu bytes for workspace\n", 8*M*N_img);
#endif
// at::Tensor workspace = at::CUDA(at::kByte).zeros({8 * M * N_img});
at::Tensor workspace = at::zeros({8 * M * N_img}, at::CUDA(at::kByte));
THCudaCheck(cudaGetLastError());
// Encode the inputs
const int THREADS_PER_BLOCK = 256;
encode<THREADS_PER_BLOCK, 256><<<N_img, THREADS_PER_BLOCK, 0, stream.stream()>>>(N_img,
(float4*)bbox_input.data<float>(),
labels_input.data<long>(),
bbox_offsets.data<int>(),
M,
(float4*)dbox.data<float>(),
criteria,
workspace.data<uint8_t>(),
(float4*)bbox_out.data<float>(),
labels_out.data<long>());
THCudaCheck(cudaGetLastError());
return {bbox_out, labels_out};
}
at::Tensor calc_ious(const int N_img,
const at::Tensor& boxes1,
const at::Tensor& boxes1_offsets,
const at::Tensor& boxes2) {
const int N = boxes1.size(0);
const int M = boxes2.size(0);
auto stream = at::cuda::getCurrentCUDAStream();
// at::Tensor ious = at::CUDA(at::kFloat).zeros({N, M});
// at::Tensor ious = at::ones(at::CUDA(at::kFloat), {N, M});
at::Tensor ious = at::empty({N, M}, boxes1.options());
// Get IoU of all source x default box pairs
calc_ious_kernel<<<N_img, 256, 0, stream.stream()>>>(
N_img,
(float4*)boxes1.data<float>(),
boxes1_offsets.data<int>(),
M,
(float4*)boxes2.data<float>(),
ious.data<float>());
THCudaCheck(cudaGetLastError());
return ious;
}
|
71db709cfe729e1c9a3dc232cdf938cd02a0d90f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cudaConvolution_3D_Float_Valid_Kernel.h>
#include <cudaGradient_3D_Float_Valid_Kernel.h>
void cudaGradient_3D_Float_Valid(float* d_in, int dimx, int dimy, int dimz, float* d_LPMask, int LPMaskSize, float *d_HPMask, int HPMaskSize,
float *d_tmp, float *d_tmp2, float* d_gx, float *d_gy, float *d_gz)
{
int dx = dimx - LPMaskSize + 1, dy = dimy - LPMaskSize + 1, dz = dimz - LPMaskSize + 1;
cudaConvolution_3D_Float_Valid_Z(d_in, dimx, dimy, dimz, d_LPMask, LPMaskSize, d_tmp);
cudaConvolution_3D_Float_Valid_Y(d_tmp, dimx, dimy, dz, d_LPMask, LPMaskSize, d_tmp2);
cudaConvolution_3D_Float_Valid_X(d_tmp2, dimx, dy, dz, d_HPMask, HPMaskSize, d_gx);
cudaConvolution_3D_Float_Valid_X(d_tmp, dimx, dimy, dz, d_LPMask, LPMaskSize, d_tmp2);
cudaConvolution_3D_Float_Valid_Y(d_tmp2, dx, dimy, dz, d_HPMask, HPMaskSize, d_gy);
cudaConvolution_3D_Float_Valid_X(d_in, dimx, dimy, dimz, d_LPMask, LPMaskSize, d_tmp);
cudaConvolution_3D_Float_Valid_Y(d_tmp, dx, dimy, dimz, d_LPMask, LPMaskSize, d_tmp2);
cudaConvolution_3D_Float_Valid_Z(d_tmp2, dx, dy, dimz, d_HPMask, HPMaskSize, d_gz);
} | 71db709cfe729e1c9a3dc232cdf938cd02a0d90f.cu |
#include <cuda_runtime.h>
#include <cudaConvolution_3D_Float_Valid_Kernel.h>
#include <cudaGradient_3D_Float_Valid_Kernel.h>
void cudaGradient_3D_Float_Valid(float* d_in, int dimx, int dimy, int dimz, float* d_LPMask, int LPMaskSize, float *d_HPMask, int HPMaskSize,
float *d_tmp, float *d_tmp2, float* d_gx, float *d_gy, float *d_gz)
{
int dx = dimx - LPMaskSize + 1, dy = dimy - LPMaskSize + 1, dz = dimz - LPMaskSize + 1;
cudaConvolution_3D_Float_Valid_Z(d_in, dimx, dimy, dimz, d_LPMask, LPMaskSize, d_tmp);
cudaConvolution_3D_Float_Valid_Y(d_tmp, dimx, dimy, dz, d_LPMask, LPMaskSize, d_tmp2);
cudaConvolution_3D_Float_Valid_X(d_tmp2, dimx, dy, dz, d_HPMask, HPMaskSize, d_gx);
cudaConvolution_3D_Float_Valid_X(d_tmp, dimx, dimy, dz, d_LPMask, LPMaskSize, d_tmp2);
cudaConvolution_3D_Float_Valid_Y(d_tmp2, dx, dimy, dz, d_HPMask, HPMaskSize, d_gy);
cudaConvolution_3D_Float_Valid_X(d_in, dimx, dimy, dimz, d_LPMask, LPMaskSize, d_tmp);
cudaConvolution_3D_Float_Valid_Y(d_tmp, dx, dimy, dimz, d_LPMask, LPMaskSize, d_tmp2);
cudaConvolution_3D_Float_Valid_Z(d_tmp2, dx, dy, dimz, d_HPMask, HPMaskSize, d_gz);
} |
3b9320f1c639006d9f513595a791b0f703332bf7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/native/sparse/SparseUtils.h>
#include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh>
#include <ATen/native/sparse/hip/SparseHIPBlas.cuh>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <THH/THHTensorMathPointwise.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THH/THHNumerics.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/system/hip/execution_policy.h>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
#ifndef __HIP_PLATFORM_HCC__
namespace {
IntTensor _to_csr_int(const LongTensor& rowIndices, int64_t dim, int64_t nnz) {
IntTensor csr = at::empty({dim+1}, CUDA(kInt));
IntTensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data<int32_t>(), nnz, dim, csr.data<int32_t>());
return csr;
}
}
#endif
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensorRef, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, Scalar beta, Scalar alpha) {
#ifndef __HIP_PLATFORM_HCC__
AT_ASSERT(t.is_cuda()); // dispatch argument
AT_CHECK(r_.is_cuda(), "addmm: expected 'out' to be CUDA, but got CPU");
AT_CHECK(sparse_.is_cuda(), "addmm: expected 'mat1' to be CUDA, but got CPU");
AT_CHECK(dense.is_cuda(), "addmm: expected 'mat2' to be CUDA, but got CPU");
AT_CHECK(_check_device({sparse_, r_, t, dense}));
// TODO: This error message seems awfully opaque
AT_CHECK(sparse_._sparseDims() == 2, "addmm: matrices expected, got ", sparse_._sparseDims(), "D tensor");
AT_CHECK(sparse_._denseDims() == 0, "addmm: scalar values expected, got ", sparse_._denseDims(), "D values");
AT_CHECK(dense.dim() == 2, "addmm: matrices expected, got ", dense.dim(), "D tensor");
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
AT_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
AT_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
AT_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
LongTensor rowIndices = indices.select(0, 0);
LongTensor colIndices = indices.select(0, 1);
IntTensor csr = _to_csr_int(rowIndices, m, nnz);
IntTensor colIndicesInt = at::empty({colIndices.size(0)}, indices.type().toScalarType(kInt));
colIndicesInt.copy_(colIndices);
// No half support, so we don't have to use CUDATypeConversion
Tensor r__;
AT_DISPATCH_FLOATING_TYPES(
values.type(), "addmm_sparse_cuda", [&] {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
if (cast_beta == 0) {
r_.zero_();
} else if (cast_beta == ScalarConvert<int, scalar_t>::to(1)) {
if (!isSameTensor(t, r_)) {
r_.copy_(t);
}
} else {
at::mul_out(r_, t, beta);
}
/* r_ */
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// TODO: how... strange
r__ = r_.transpose(0, 1).clone();
r__.transpose_(0, 1);
}
/* dense */
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) != dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data<scalar_t>(),
csr.data<int32_t>(),
colIndicesInt.data<int32_t>(),
dense_.data<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data<scalar_t>(),
r__.stride(1));
});
r_.copy_(r__);
return r_;
#else
AT_ERROR("s_addmm_out_sparse_dense_cuda: HIP not supported");
#endif
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
Tensor r = t.type().tensor();
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(SparseTensor& r_, const SparseTensor& sparse_, const Tensor& dense/* , Scalar alpha */) {
#ifndef __HIP_PLATFORM_HCC__
AT_ASSERT(sparse_.is_cuda()); // dispatch argument
AT_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
AT_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
AT_CHECK(_check_device({r_, sparse_, dense}));
AT_CHECK(sparse_._sparseDims() == 2,
"hspmm: Argument #2: matrices expected, got ", sparse_._sparseDims(), "D tensor");
AT_CHECK(sparse_._denseDims() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_._denseDims(), "D values");
AT_CHECK(dense.dim() == 2,
"hspmm: Argument #3: matrices expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
AT_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
_get_sparse_impl(r_)->raw_resize_(1, 1, {m, n});
hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.type());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
LongTensor spIndices = newSparse._indices();
LongTensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
_get_sparse_impl(newSparse)->_sizes_mut()[0] = nnz; // TODO: use something safer)
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
_get_sparse_impl(r_)->set_indices_and_values(indices, values);
return r_;
#else
AT_ERROR("hspmm_out_sparse_cuda: HIP not supported");
#endif
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = sparse.type().tensor();
hspmm_out_sparse_cuda(r, sparse, dense);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensorRef, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, SparseTensorRef sparse_, at::Scalar value) {
#ifndef __HIP_PLATFORM_HCC__
const SparseTensor& sparse = sparse_.tref;
AT_ASSERT(dense.is_cuda()); // dispatch argument
AT_CHECK(sparse.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
AT_CHECK(_check_device({sparse, r_, dense}));
AT_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
Tensor r = r_;
if (!isSameTensor(r, dense)) {
r_.resize_as_(dense);
r_.copy_(dense);
} else {
AT_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
r = r_.contiguous();
}
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
int64_t nDim = dense.dim();
int64_t nDimI = sparse._sparseDims();
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = globalContext().getCurrentCUDAStreamOnDevice(curDevice);
if (sparse._denseDims() == 0) {
AT_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND_HALF(
values.type(), "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernelScalar<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r_), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
} else {
AT_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND_HALF(
values.type(), "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r_), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
}
} else {
LongTensor indices1D = _newFlattenedIndices(sparse, 0);
indices1D.resize_({nnz});
// FIXME: at some point we can wrap the scale into indexAdd
// NB: Purposely not inplace!
AT_DISPATCH_ALL_TYPES_AND_HALF(
values.type(), "add_out_dense_sparse_cuda", [&] {
if (value.to<scalar_t>() != ScalarConvert<int, scalar_t>::to(1)) {
values = values.mul(value);
}
});
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values.resize_({nnz, view_columns});
r_view.index_add_(0, indices1D, values);
}
THCudaCheck(hipGetLastError());
return r_;
#else
AT_ERROR("add_out_dense_sparse_cuda: HIP not supported");
#endif
}
Tensor add_dense_sparse_cuda(const Tensor& t, SparseTensorRef src, Scalar alpha) {
Tensor r = t.type().tensor();
add_out_dense_sparse_cuda(r, t, src, alpha);
return r;
}
Tensor& add_dense_sparse_cuda_(Tensor& t, SparseTensorRef src, Scalar alpha) {
return add_out_dense_sparse_cuda(t, t, src, alpha);
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& s_add_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t, const SparseTensor& src, Scalar value) {
#ifndef __HIP_PLATFORM_HCC__
AT_ASSERT(t.is_cuda()); // dispatch argument
AT_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
AT_CHECK(_check_device({r_, t, src}));
AT_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return raw_copy_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
AT_CHECK(_is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t._sparseDims(), " sparse dimensions while 'other' has ", src._sparseDims(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
LongTensor t_indices_ = t._indices();
Tensor t_values_ = t._values();
LongTensor s_indices_ = src._indices();
Tensor s_values_ = src._values();
AT_DISPATCH_ALL_TYPES_AND_HALF(
s_values_.type(), "s_add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != ScalarConvert<int, scalar_t>::to(1)) {
s_values_ = s_values_.mul(value);
}
});
LongTensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
r_.resize_as_(src);
_alias_into_sparse(r_, r_indices_, r_values_);
// FIXME: add some heuristic about when to call coalesce() here, so that
// tensors don't totally blow up in size by concatenation; e.g.
// r->minUnique = max(a->minUnique + b->minUnique);
// if (r->nnz / r->minUnique > COMPACTION_THRESHOLD) {
// THCSTensor_(contiguous)(r);
// r->minUnique = r->nnz;
// }
return r_;
#else
AT_ERROR("s_add_out_sparse_cuda: HIP not supported");
#endif
}
SparseTensor s_add_sparse_cuda(const SparseTensor& t, const SparseTensor& src, Scalar alpha) {
SparseTensor r = t.type().tensor();
s_add_out_sparse_cuda(r, t, src, alpha);
return r;
}
SparseTensor& s_add_sparse_cuda_(SparseTensor& t, const SparseTensor& src, Scalar alpha) {
return s_add_out_sparse_cuda(t, t, src, alpha);
}
// --------------------------------------------------------------------
// sub(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& s_sub_out_sparse_cuda(SparseTensor& r, const SparseTensor& t, const SparseTensor& src, Scalar value) {
AT_ASSERT(t.is_cuda()); // dispatch argument
AT_CHECK(src.is_cuda(), "sub: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r.is_cuda(), "sub: expected 'out' to be CUDA, but got CPU");
AT_DISPATCH_ALL_TYPES(
t.type(), "sub_sparse", [&] {
scalar_t cast_value = value.to<scalar_t>();
s_add_out_sparse_cuda(r, t, src, ScalarNegate<scalar_t>::to(cast_value));
}
);
return r;
}
SparseTensor s_sub_sparse_cuda(const SparseTensor& t, const SparseTensor& src, Scalar alpha) {
SparseTensor r = t.type().tensor();
s_sub_out_sparse_cuda(r, t, src, alpha);
return r;
}
SparseTensor& s_sub_sparse_cuda_(SparseTensor& t, const SparseTensor& src, Scalar alpha) {
return s_sub_out_sparse_cuda(t, t, src, alpha);
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& s_mul_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t_, const SparseTensor& src_) {
#ifndef __HIP_PLATFORM_HCC__
AT_ASSERT(t_.is_cuda()); // dispatch argument
AT_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
AT_CHECK(_check_device({r_, t_, src_}));
AT_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = ::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparseDims = src._sparseDims();
LongTensor t_indices_ = t._indices();
Tensor t_values_ = t._values();
LongTensor s_indices_ = src._indices();
Tensor s_values_ = src._values();
LongTensor r_indices_ = t_indices_.type().tensor({sparseDims, max_nnz});
Tensor r_values_ = _new_values_with_size_of(t_values_, max_nnz).zero_();
r_.resize_as_(src);
_get_sparse_impl(r_)->set_indices_and_values(r_indices_, r_values_); // TODO: sigh
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = globalContext().getCurrentCUDAStreamOnDevice(curDevice);
AT_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
LongTensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND_HALF(
t_values_.type(), "s_mul_out_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::valueSparseIntersectionKernel<TensorMulOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
THCudaCheck(hipGetLastError());
hipLaunchKernelGGL(( apply::indexSparseIntersectionKernel<uint64_t, scalar_t>)
, dim3(1), dim3(1), 0, stream,
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
THCudaCheck(hipGetLastError());
});
// sync! (surely there is a more idiomatic way to do this...)
LongTensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
_get_sparse_impl(r_)->set_nnz(cpu_resultNnz.accessor<int64_t, 1>()[0]);
_get_sparse_impl(r_)->set_coalesced(true);
return r_;
#else
AT_ERROR("s_mul_out_sparse_cuda: HIP not supported");
#endif
}
SparseTensor s_mul_sparse_cuda(const SparseTensor& t, const SparseTensor& src) {
SparseTensor r = t.type().tensor();
s_mul_out_sparse_cuda(r, t, src);
return r;
}
SparseTensor& s_mul_sparse_cuda_(SparseTensor& t, const SparseTensor& src) {
return s_mul_out_sparse_cuda(t, t, src);
}
}} // namespace at::native
| 3b9320f1c639006d9f513595a791b0f703332bf7.cu | #include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/native/sparse/SparseUtils.h>
#include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh>
#include <ATen/native/sparse/cuda/SparseCUDABlas.cuh>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <THC/THCTensorMathPointwise.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THC/THCNumerics.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/system/cuda/execution_policy.h>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
#ifndef __HIP_PLATFORM_HCC__
namespace {
IntTensor _to_csr_int(const LongTensor& rowIndices, int64_t dim, int64_t nnz) {
IntTensor csr = at::empty({dim+1}, CUDA(kInt));
IntTensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data<int32_t>(), nnz, dim, csr.data<int32_t>());
return csr;
}
}
#endif
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensorRef, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, Scalar beta, Scalar alpha) {
#ifndef __HIP_PLATFORM_HCC__
AT_ASSERT(t.is_cuda()); // dispatch argument
AT_CHECK(r_.is_cuda(), "addmm: expected 'out' to be CUDA, but got CPU");
AT_CHECK(sparse_.is_cuda(), "addmm: expected 'mat1' to be CUDA, but got CPU");
AT_CHECK(dense.is_cuda(), "addmm: expected 'mat2' to be CUDA, but got CPU");
AT_CHECK(_check_device({sparse_, r_, t, dense}));
// TODO: This error message seems awfully opaque
AT_CHECK(sparse_._sparseDims() == 2, "addmm: matrices expected, got ", sparse_._sparseDims(), "D tensor");
AT_CHECK(sparse_._denseDims() == 0, "addmm: scalar values expected, got ", sparse_._denseDims(), "D values");
AT_CHECK(dense.dim() == 2, "addmm: matrices expected, got ", dense.dim(), "D tensor");
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
AT_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
AT_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
AT_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
LongTensor rowIndices = indices.select(0, 0);
LongTensor colIndices = indices.select(0, 1);
IntTensor csr = _to_csr_int(rowIndices, m, nnz);
IntTensor colIndicesInt = at::empty({colIndices.size(0)}, indices.type().toScalarType(kInt));
colIndicesInt.copy_(colIndices);
// No half support, so we don't have to use CUDATypeConversion
Tensor r__;
AT_DISPATCH_FLOATING_TYPES(
values.type(), "addmm_sparse_cuda", [&] {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
if (cast_beta == 0) {
r_.zero_();
} else if (cast_beta == ScalarConvert<int, scalar_t>::to(1)) {
if (!isSameTensor(t, r_)) {
r_.copy_(t);
}
} else {
at::mul_out(r_, t, beta);
}
/* r_ */
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// TODO: how... strange
r__ = r_.transpose(0, 1).clone();
r__.transpose_(0, 1);
}
/* dense */
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) != dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data<scalar_t>(),
csr.data<int32_t>(),
colIndicesInt.data<int32_t>(),
dense_.data<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data<scalar_t>(),
r__.stride(1));
});
r_.copy_(r__);
return r_;
#else
AT_ERROR("s_addmm_out_sparse_dense_cuda: HIP not supported");
#endif
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
Tensor r = t.type().tensor();
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(SparseTensor& r_, const SparseTensor& sparse_, const Tensor& dense/* , Scalar alpha */) {
#ifndef __HIP_PLATFORM_HCC__
AT_ASSERT(sparse_.is_cuda()); // dispatch argument
AT_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
AT_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
AT_CHECK(_check_device({r_, sparse_, dense}));
AT_CHECK(sparse_._sparseDims() == 2,
"hspmm: Argument #2: matrices expected, got ", sparse_._sparseDims(), "D tensor");
AT_CHECK(sparse_._denseDims() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_._denseDims(), "D values");
AT_CHECK(dense.dim() == 2,
"hspmm: Argument #3: matrices expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
AT_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
_get_sparse_impl(r_)->raw_resize_(1, 1, {m, n});
cudaStream_t stream = globalContext().getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.type());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
LongTensor spIndices = newSparse._indices();
LongTensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
_get_sparse_impl(newSparse)->_sizes_mut()[0] = nnz; // TODO: use something safer)
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
_get_sparse_impl(r_)->set_indices_and_values(indices, values);
return r_;
#else
AT_ERROR("hspmm_out_sparse_cuda: HIP not supported");
#endif
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = sparse.type().tensor();
hspmm_out_sparse_cuda(r, sparse, dense);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensorRef, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, SparseTensorRef sparse_, at::Scalar value) {
#ifndef __HIP_PLATFORM_HCC__
const SparseTensor& sparse = sparse_.tref;
AT_ASSERT(dense.is_cuda()); // dispatch argument
AT_CHECK(sparse.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
AT_CHECK(_check_device({sparse, r_, dense}));
AT_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
Tensor r = r_;
if (!isSameTensor(r, dense)) {
r_.resize_as_(dense);
r_.copy_(dense);
} else {
AT_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
r = r_.contiguous();
}
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
int64_t nDim = dense.dim();
int64_t nDimI = sparse._sparseDims();
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = globalContext().getCurrentCUDAStreamOnDevice(curDevice);
if (sparse._denseDims() == 0) {
AT_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND_HALF(
values.type(), "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernelScalar<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r_), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
} else {
AT_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND_HALF(
values.type(), "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r_), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
}
} else {
LongTensor indices1D = _newFlattenedIndices(sparse, 0);
indices1D.resize_({nnz});
// FIXME: at some point we can wrap the scale into indexAdd
// NB: Purposely not inplace!
AT_DISPATCH_ALL_TYPES_AND_HALF(
values.type(), "add_out_dense_sparse_cuda", [&] {
if (value.to<scalar_t>() != ScalarConvert<int, scalar_t>::to(1)) {
values = values.mul(value);
}
});
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values.resize_({nnz, view_columns});
r_view.index_add_(0, indices1D, values);
}
THCudaCheck(cudaGetLastError());
return r_;
#else
AT_ERROR("add_out_dense_sparse_cuda: HIP not supported");
#endif
}
Tensor add_dense_sparse_cuda(const Tensor& t, SparseTensorRef src, Scalar alpha) {
Tensor r = t.type().tensor();
add_out_dense_sparse_cuda(r, t, src, alpha);
return r;
}
Tensor& add_dense_sparse_cuda_(Tensor& t, SparseTensorRef src, Scalar alpha) {
return add_out_dense_sparse_cuda(t, t, src, alpha);
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& s_add_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t, const SparseTensor& src, Scalar value) {
#ifndef __HIP_PLATFORM_HCC__
AT_ASSERT(t.is_cuda()); // dispatch argument
AT_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
AT_CHECK(_check_device({r_, t, src}));
AT_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return raw_copy_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
AT_CHECK(_is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t._sparseDims(), " sparse dimensions while 'other' has ", src._sparseDims(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
LongTensor t_indices_ = t._indices();
Tensor t_values_ = t._values();
LongTensor s_indices_ = src._indices();
Tensor s_values_ = src._values();
AT_DISPATCH_ALL_TYPES_AND_HALF(
s_values_.type(), "s_add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != ScalarConvert<int, scalar_t>::to(1)) {
s_values_ = s_values_.mul(value);
}
});
LongTensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
r_.resize_as_(src);
_alias_into_sparse(r_, r_indices_, r_values_);
// FIXME: add some heuristic about when to call coalesce() here, so that
// tensors don't totally blow up in size by concatenation; e.g.
// r->minUnique = max(a->minUnique + b->minUnique);
// if (r->nnz / r->minUnique > COMPACTION_THRESHOLD) {
// THCSTensor_(contiguous)(r);
// r->minUnique = r->nnz;
// }
return r_;
#else
AT_ERROR("s_add_out_sparse_cuda: HIP not supported");
#endif
}
SparseTensor s_add_sparse_cuda(const SparseTensor& t, const SparseTensor& src, Scalar alpha) {
SparseTensor r = t.type().tensor();
s_add_out_sparse_cuda(r, t, src, alpha);
return r;
}
SparseTensor& s_add_sparse_cuda_(SparseTensor& t, const SparseTensor& src, Scalar alpha) {
return s_add_out_sparse_cuda(t, t, src, alpha);
}
// --------------------------------------------------------------------
// sub(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& s_sub_out_sparse_cuda(SparseTensor& r, const SparseTensor& t, const SparseTensor& src, Scalar value) {
AT_ASSERT(t.is_cuda()); // dispatch argument
AT_CHECK(src.is_cuda(), "sub: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r.is_cuda(), "sub: expected 'out' to be CUDA, but got CPU");
AT_DISPATCH_ALL_TYPES(
t.type(), "sub_sparse", [&] {
scalar_t cast_value = value.to<scalar_t>();
s_add_out_sparse_cuda(r, t, src, ScalarNegate<scalar_t>::to(cast_value));
}
);
return r;
}
SparseTensor s_sub_sparse_cuda(const SparseTensor& t, const SparseTensor& src, Scalar alpha) {
SparseTensor r = t.type().tensor();
s_sub_out_sparse_cuda(r, t, src, alpha);
return r;
}
SparseTensor& s_sub_sparse_cuda_(SparseTensor& t, const SparseTensor& src, Scalar alpha) {
return s_sub_out_sparse_cuda(t, t, src, alpha);
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& s_mul_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t_, const SparseTensor& src_) {
#ifndef __HIP_PLATFORM_HCC__
AT_ASSERT(t_.is_cuda()); // dispatch argument
AT_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
AT_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
AT_CHECK(_check_device({r_, t_, src_}));
AT_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = std::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparseDims = src._sparseDims();
LongTensor t_indices_ = t._indices();
Tensor t_values_ = t._values();
LongTensor s_indices_ = src._indices();
Tensor s_values_ = src._values();
LongTensor r_indices_ = t_indices_.type().tensor({sparseDims, max_nnz});
Tensor r_values_ = _new_values_with_size_of(t_values_, max_nnz).zero_();
r_.resize_as_(src);
_get_sparse_impl(r_)->set_indices_and_values(r_indices_, r_values_); // TODO: sigh
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = globalContext().getCurrentCUDAStreamOnDevice(curDevice);
AT_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
LongTensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND_HALF(
t_values_.type(), "s_mul_out_sparse_cuda", [&] {
apply::valueSparseIntersectionKernel<TensorMulOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
THCudaCheck(cudaGetLastError());
apply::indexSparseIntersectionKernel<uint64_t, scalar_t>
<<<1, 1, 0, stream>>>(
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
THCudaCheck(cudaGetLastError());
});
// sync! (surely there is a more idiomatic way to do this...)
LongTensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
_get_sparse_impl(r_)->set_nnz(cpu_resultNnz.accessor<int64_t, 1>()[0]);
_get_sparse_impl(r_)->set_coalesced(true);
return r_;
#else
AT_ERROR("s_mul_out_sparse_cuda: HIP not supported");
#endif
}
SparseTensor s_mul_sparse_cuda(const SparseTensor& t, const SparseTensor& src) {
SparseTensor r = t.type().tensor();
s_mul_out_sparse_cuda(r, t, src);
return r;
}
SparseTensor& s_mul_sparse_cuda_(SparseTensor& t, const SparseTensor& src) {
return s_mul_out_sparse_cuda(t, t, src);
}
}} // namespace at::native
|
ab0b6e6ded3d694ad70f6fde640b902a61459792.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Counter-based block synchronization. Only meant to be used for
// debugging and validating synchronization. This should be replaced
// with cuda::barrier::arrive_and_wait as that should be more robust.
namespace block_sync {
using CounterType = unsigned int;
static constexpr CounterType COUNTER_TYPE_MAX = ~(CounterType)0;
__shared__ CounterType sync_counter;
__device__ void init() {
const unsigned int tid = threadIdx.x + threadIdx.y * blockDim.x +
threadIdx.z * blockDim.x * blockDim.y;
if (tid == 0) {
sync_counter = 0;
}
__syncthreads();
}
// Emulate __syncthreads() with a synchronization counter
__device__ void sync() {
unsigned int backoff = 8;
const unsigned int backoff_max = 256;
const unsigned int num_threads = blockDim.x * blockDim.y * blockDim.z;
__threadfence_block();
// Use counter range only up to a limit so that the next val won't
// overflow.
const auto counter_max = (COUNTER_TYPE_MAX / num_threads) * num_threads;
const auto old = atomicInc(&sync_counter, counter_max - 1);
const auto next = (old / num_threads) * num_threads + num_threads;
auto local_sync_counter = *(volatile CounterType*)(&sync_counter);
// sync_counter may wrap around, which means local_sync_counter
// becomes smaller than old. In that case, it's guaranteed that all
// threads have incremented the counter.
while (local_sync_counter < next && old < local_sync_counter) {
__nanosleep(backoff);
if (backoff < backoff_max) {
backoff *= 2;
}
local_sync_counter = *(volatile CounterType*)(&sync_counter);
}
}
} // namespace block_sync
| ab0b6e6ded3d694ad70f6fde640b902a61459792.cu |
// Counter-based block synchronization. Only meant to be used for
// debugging and validating synchronization. This should be replaced
// with cuda::barrier::arrive_and_wait as that should be more robust.
namespace block_sync {
using CounterType = unsigned int;
static constexpr CounterType COUNTER_TYPE_MAX = ~(CounterType)0;
__shared__ CounterType sync_counter;
__device__ void init() {
const unsigned int tid = threadIdx.x + threadIdx.y * blockDim.x +
threadIdx.z * blockDim.x * blockDim.y;
if (tid == 0) {
sync_counter = 0;
}
__syncthreads();
}
// Emulate __syncthreads() with a synchronization counter
__device__ void sync() {
unsigned int backoff = 8;
const unsigned int backoff_max = 256;
const unsigned int num_threads = blockDim.x * blockDim.y * blockDim.z;
__threadfence_block();
// Use counter range only up to a limit so that the next val won't
// overflow.
const auto counter_max = (COUNTER_TYPE_MAX / num_threads) * num_threads;
const auto old = atomicInc(&sync_counter, counter_max - 1);
const auto next = (old / num_threads) * num_threads + num_threads;
auto local_sync_counter = *(volatile CounterType*)(&sync_counter);
// sync_counter may wrap around, which means local_sync_counter
// becomes smaller than old. In that case, it's guaranteed that all
// threads have incremented the counter.
while (local_sync_counter < next && old < local_sync_counter) {
__nanosleep(backoff);
if (backoff < backoff_max) {
backoff *= 2;
}
local_sync_counter = *(volatile CounterType*)(&sync_counter);
}
}
} // namespace block_sync
|
dbe382cf6673a844c0e45567cd75ee6e4c67f8eb.hip | // !!! This is a file automatically generated by hipify!!!
// Thread Divergence - Minimized - Workshop 8
// w8.2.cu
// ...
// calculate the dot product block by block
__global__ void dotProduct(const float* a, const float* b, float* c, int n) {
// upgrade your original kernel here
}
// Thread Divergence - Workshop 8
// w8.1.cu
#include <iostream>
#include <cstdlib>
#include <hip/hip_runtime.h>
// to remove intellisense highlighting
#include <device_launch_parameters.h>
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include <hip/device_functions.h>
const int ntpb = 1024; // number of threads per block
void init(float* a, int n) {
float f = 1.0f / RAND_MAX;
for (int i = 0; i < n; i++)
a[i] = std::rand() * f; // [0.0f 1.0f]
}
// calculate the dot product block by block
__global__ void dotProduct(const float* a, const float* b, float* c, int n) {
// include improved code
}
// accumulate the block sums
__global__ void accumulate(float* c, int n) {
// include improved code
}
int main(int argc, char** argv) {
// interpret command-line arguments
if (argc != 2) {
std::cerr << argv[0] << ": invalid number of arguments\n";
std::cerr << "Usage: " << argv[0] << " size_of_vectors\n";
return 1;
}
int n = std::atoi(argv[1]);
int nblocks = (n + ntpb - 1) / ntpb;
if (nblocks > ntpb) {
nblocks = ntpb;
n = nblocks * ntpb;
}
// host vectors
float* h_a = new float[n];
float* h_b = new float[n];
init(h_a, n);
init(h_b, n);
// device vectors (d_a, d_b, d_c)
float* d_a;
float* d_b;
float* d_c;
hipMalloc((void**)&d_a, n * sizeof(float));
hipMalloc((void**)&d_b, n * sizeof(float));
hipMalloc((void**)&d_c, nblocks * sizeof(float));
// copy from host to device h_a -> d_a, h_b -> d_b
hipMemcpy(d_a, h_a, n * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, n * sizeof(float), hipMemcpyHostToDevice);
// dot product on the device
hipLaunchKernelGGL(( dotProduct), dim3(nblocks), dim3(ntpb), 0, 0, d_a, d_b, d_c, n);
// synchronize
hipDeviceSynchronize();
// accumulate the block sums on the device
hipLaunchKernelGGL(( accumulate), dim3(1), dim3(nblocks), 0, 0, d_c, nblocks);
// copy from device to host d_c[0] -> h_d
float h_c;
hipMemcpy(&h_c, d_c, sizeof(float), hipMemcpyDeviceToHost);
float hx = 0.f;
for (int i = 0; i < n; i++)
hx += h_a[i] * h_b[i];
// compare results
std::cout << "Device = " << h_c << " Host = " << hx << std::endl;
// free device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// free host memory
delete [] h_a;
delete [] h_b;
// reset the device
hipDeviceReset();
}
// accumulate the block sums
__global__ void accumulate(float* c, int n) {
// upgrade your original kernel here
}
int main(int argc, char** argv) {
// same as above ...
} | dbe382cf6673a844c0e45567cd75ee6e4c67f8eb.cu | // Thread Divergence - Minimized - Workshop 8
// w8.2.cu
// ...
// calculate the dot product block by block
__global__ void dotProduct(const float* a, const float* b, float* c, int n) {
// upgrade your original kernel here
}
// Thread Divergence - Workshop 8
// w8.1.cu
#include <iostream>
#include <cstdlib>
#include <cuda_runtime.h>
// to remove intellisense highlighting
#include <device_launch_parameters.h>
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include <device_functions.h>
const int ntpb = 1024; // number of threads per block
void init(float* a, int n) {
float f = 1.0f / RAND_MAX;
for (int i = 0; i < n; i++)
a[i] = std::rand() * f; // [0.0f 1.0f]
}
// calculate the dot product block by block
__global__ void dotProduct(const float* a, const float* b, float* c, int n) {
// include improved code
}
// accumulate the block sums
__global__ void accumulate(float* c, int n) {
// include improved code
}
int main(int argc, char** argv) {
// interpret command-line arguments
if (argc != 2) {
std::cerr << argv[0] << ": invalid number of arguments\n";
std::cerr << "Usage: " << argv[0] << " size_of_vectors\n";
return 1;
}
int n = std::atoi(argv[1]);
int nblocks = (n + ntpb - 1) / ntpb;
if (nblocks > ntpb) {
nblocks = ntpb;
n = nblocks * ntpb;
}
// host vectors
float* h_a = new float[n];
float* h_b = new float[n];
init(h_a, n);
init(h_b, n);
// device vectors (d_a, d_b, d_c)
float* d_a;
float* d_b;
float* d_c;
cudaMalloc((void**)&d_a, n * sizeof(float));
cudaMalloc((void**)&d_b, n * sizeof(float));
cudaMalloc((void**)&d_c, nblocks * sizeof(float));
// copy from host to device h_a -> d_a, h_b -> d_b
cudaMemcpy(d_a, h_a, n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, n * sizeof(float), cudaMemcpyHostToDevice);
// dot product on the device
dotProduct<<<nblocks, ntpb>>>(d_a, d_b, d_c, n);
// synchronize
cudaDeviceSynchronize();
// accumulate the block sums on the device
accumulate<<< 1, nblocks>>>(d_c, nblocks);
// copy from device to host d_c[0] -> h_d
float h_c;
cudaMemcpy(&h_c, d_c, sizeof(float), cudaMemcpyDeviceToHost);
float hx = 0.f;
for (int i = 0; i < n; i++)
hx += h_a[i] * h_b[i];
// compare results
std::cout << "Device = " << h_c << " Host = " << hx << std::endl;
// free device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// free host memory
delete [] h_a;
delete [] h_b;
// reset the device
cudaDeviceReset();
}
// accumulate the block sums
__global__ void accumulate(float* c, int n) {
// upgrade your original kernel here
}
int main(int argc, char** argv) {
// same as above ...
} |
a12652278f6990b1d0e106e05378ec13947f866f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "billow.cuh"
#include "..\..\cpp\modules\generators\Billow.h"
__device__ float billow2D(float2 point, hipTextureObject_t perm, hipTextureObject_t grad, float freq, float lacun, float persist, int init_seed, int octaves) {
// Will be incremented upon.
float result = 0.0f;
float amplitude = 1.0f;
float val;
// Scale point by freq
point.x = point.x * freq;
point.y = point.y * freq;
// TODO: Seeding the function is currently pointless and doesn't actually do anything.
// Use loop for octav-ing
for (size_t i = 0; i < octaves; ++i) {
int seed = (init_seed + i) & 0xffffffff;
val = perlin2d(perm, grad, point, seed);
val = fabsf(val);
result += val * amplitude;
// Modify vars for next octave.
freq *= lacun;
point.x *= freq;
point.y *= freq;
amplitude *= persist;
}
float tmp = result / 100.0f;
// * //
return tmp;
}
__device__ float billow2D_S(float2 point, hipTextureObject_t perm, hipTextureObject_t grad, float freq, float lacun, float persist, int init_seed, int octaves) {
float result = 0.0f;
float amplitude = 1.0f;
float val;
// Scale starting point by frequency.
point.x = point.x * freq;
point.y = point.y * freq;
// Use loop for fractal octave bit
for (size_t i = 0; i < octaves; ++i) {
int seed = (init_seed + i) & 0xffffffff;
val = simplex2d(perm, grad, point, seed);
val = fabsf(val);
result += val * amplitude;
freq *= lacun;
point.x *= freq;
point.y *= freq;
amplitude *= persist;
}
//result /= 100.0f;
return result;
}
__global__ void Billow2DKernel(hipSurfaceObject_t out, hipTextureObject_t perm, hipTextureObject_t grad, int width, int height, float2 origin, float freq, float lacun, float persist, int seed, int octaves) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= width || j >= height) {
return;
}
float x, y;
x = i + origin.x;
y = j + origin.y;
float2 p = make_float2(x, y);
// Call billow function
float val = billow2D(p, perm, grad, freq, lacun, persist, seed, octaves);
// Write val to the surface
surf2Dwrite(val, out, i * sizeof(float), j);
}
__global__ void Billow2DKernelSimplex(hipSurfaceObject_t out, hipTextureObject_t perm, hipTextureObject_t grad, int width, int height, float2 origin, float freq, float lacun, float persist, int seed, int octaves){
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= width || j >= height) {
return;
}
float x, y;
x = i + origin.x;
y = j + origin.y;
float2 p = make_float2(x, y);
// Call billow function
float val = billow2D_S(p, perm, grad, freq, lacun, persist, seed, octaves);
// Write val to the surface
surf2Dwrite(val, out, i * sizeof(float), j);
}
void BillowLauncher(hipSurfaceObject_t out, hipTextureObject_t perm, hipTextureObject_t grad, int width, int height, float2 origin, float freq, float lacun, float persist, int seed, int octaves) {
#ifdef CUDA_TIMING_TESTS
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
#endif // CUDA_TIMING_TESTS
// Setup dimensions of kernel launch.
// Use occupancy calc to find optimal sizes.
int blockSize, minGridSize;
#ifdef CUDA_TIMING_TESTS
hipEventRecord(start);
#endif // CUDA_TIMING_TESTS
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*)Billow2DKernel, 0, 0);
dim3 block(blockSize, blockSize, 1);
dim3 grid((width - 1) / blockSize + 1, (height - 1) / blockSize + 1, 1);
hipLaunchKernelGGL(( Billow2DKernel), dim3(block),dim3(grid), 0, 0, out, perm, grad, width, height, origin, freq, lacun, persist, seed, octaves);
// Check for succesfull kernel launch
cudaAssert(hipGetLastError());
// Synchronize device
cudaAssert(hipDeviceSynchronize());
#ifdef CUDA_TIMING_TESTS
hipEventRecord(stop);
hipEventSynchronize(stop);
float elapsed = 0.0f;
hipEventElapsedTime(&elapsed, start, stop);
printf("Kernel execution time in ms: %f\n", elapsed);
#endif // CUDA_TIMING_TESTS
// If this completes, kernel is done and "output" contains correct data.
}
void BillowSimplexLauncher(hipSurfaceObject_t out, hipTextureObject_t perm, hipTextureObject_t grad, int width, int height, float2 origin, float freq, float lacun, float persist, int seed, int octaves){
#ifdef CUDA_TIMING_TESTS
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
#endif // CUDA_TIMING_TESTS
// Setup dimensions of kernel launch.
// Use occupancy calc to find optimal sizes.
int blockSize, minGridSize;
#ifdef CUDA_TIMING_TESTS
hipEventRecord(start);
#endif // CUDA_TIMING_TESTS
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*)Billow2DKernel, 0, 0);
dim3 block(blockSize, blockSize, 1);
dim3 grid((width - 1) / blockSize + 1, (height - 1) / blockSize + 1, 1);
hipLaunchKernelGGL(( Billow2DKernelSimplex), dim3(block), dim3(grid), 0, 0, out, perm, grad, width, height, origin, freq, lacun, persist, seed, octaves);
// Check for succesfull kernel launch
cudaAssert(hipGetLastError());
// Synchronize device
cudaAssert(hipDeviceSynchronize());
#ifdef CUDA_TIMING_TESTS
hipEventRecord(stop);
hipEventSynchronize(stop);
float elapsed = 0.0f;
hipEventElapsedTime(&elapsed, start, stop);
printf("Kernel execution time in ms: %f\n", elapsed);
#endif // CUDA_TIMING_TESTS
// If this completes, kernel is done and "output" contains correct data.
}
| a12652278f6990b1d0e106e05378ec13947f866f.cu | #include "billow.cuh"
#include "..\..\cpp\modules\generators\Billow.h"
__device__ float billow2D(float2 point, cudaTextureObject_t perm, cudaTextureObject_t grad, float freq, float lacun, float persist, int init_seed, int octaves) {
// Will be incremented upon.
float result = 0.0f;
float amplitude = 1.0f;
float val;
// Scale point by freq
point.x = point.x * freq;
point.y = point.y * freq;
// TODO: Seeding the function is currently pointless and doesn't actually do anything.
// Use loop for octav-ing
for (size_t i = 0; i < octaves; ++i) {
int seed = (init_seed + i) & 0xffffffff;
val = perlin2d(perm, grad, point, seed);
val = fabsf(val);
result += val * amplitude;
// Modify vars for next octave.
freq *= lacun;
point.x *= freq;
point.y *= freq;
amplitude *= persist;
}
float tmp = result / 100.0f;
// * //
return tmp;
}
__device__ float billow2D_S(float2 point, cudaTextureObject_t perm, cudaTextureObject_t grad, float freq, float lacun, float persist, int init_seed, int octaves) {
float result = 0.0f;
float amplitude = 1.0f;
float val;
// Scale starting point by frequency.
point.x = point.x * freq;
point.y = point.y * freq;
// Use loop for fractal octave bit
for (size_t i = 0; i < octaves; ++i) {
int seed = (init_seed + i) & 0xffffffff;
val = simplex2d(perm, grad, point, seed);
val = fabsf(val);
result += val * amplitude;
freq *= lacun;
point.x *= freq;
point.y *= freq;
amplitude *= persist;
}
//result /= 100.0f;
return result;
}
__global__ void Billow2DKernel(cudaSurfaceObject_t out, cudaTextureObject_t perm, cudaTextureObject_t grad, int width, int height, float2 origin, float freq, float lacun, float persist, int seed, int octaves) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= width || j >= height) {
return;
}
float x, y;
x = i + origin.x;
y = j + origin.y;
float2 p = make_float2(x, y);
// Call billow function
float val = billow2D(p, perm, grad, freq, lacun, persist, seed, octaves);
// Write val to the surface
surf2Dwrite(val, out, i * sizeof(float), j);
}
__global__ void Billow2DKernelSimplex(cudaSurfaceObject_t out, cudaTextureObject_t perm, cudaTextureObject_t grad, int width, int height, float2 origin, float freq, float lacun, float persist, int seed, int octaves){
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= width || j >= height) {
return;
}
float x, y;
x = i + origin.x;
y = j + origin.y;
float2 p = make_float2(x, y);
// Call billow function
float val = billow2D_S(p, perm, grad, freq, lacun, persist, seed, octaves);
// Write val to the surface
surf2Dwrite(val, out, i * sizeof(float), j);
}
void BillowLauncher(cudaSurfaceObject_t out, cudaTextureObject_t perm, cudaTextureObject_t grad, int width, int height, float2 origin, float freq, float lacun, float persist, int seed, int octaves) {
#ifdef CUDA_TIMING_TESTS
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
#endif // CUDA_TIMING_TESTS
// Setup dimensions of kernel launch.
// Use occupancy calc to find optimal sizes.
int blockSize, minGridSize;
#ifdef CUDA_TIMING_TESTS
cudaEventRecord(start);
#endif // CUDA_TIMING_TESTS
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*)Billow2DKernel, 0, 0);
dim3 block(blockSize, blockSize, 1);
dim3 grid((width - 1) / blockSize + 1, (height - 1) / blockSize + 1, 1);
Billow2DKernel<<<block,grid>>>(out, perm, grad, width, height, origin, freq, lacun, persist, seed, octaves);
// Check for succesfull kernel launch
cudaAssert(cudaGetLastError());
// Synchronize device
cudaAssert(cudaDeviceSynchronize());
#ifdef CUDA_TIMING_TESTS
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float elapsed = 0.0f;
cudaEventElapsedTime(&elapsed, start, stop);
printf("Kernel execution time in ms: %f\n", elapsed);
#endif // CUDA_TIMING_TESTS
// If this completes, kernel is done and "output" contains correct data.
}
void BillowSimplexLauncher(cudaSurfaceObject_t out, cudaTextureObject_t perm, cudaTextureObject_t grad, int width, int height, float2 origin, float freq, float lacun, float persist, int seed, int octaves){
#ifdef CUDA_TIMING_TESTS
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
#endif // CUDA_TIMING_TESTS
// Setup dimensions of kernel launch.
// Use occupancy calc to find optimal sizes.
int blockSize, minGridSize;
#ifdef CUDA_TIMING_TESTS
cudaEventRecord(start);
#endif // CUDA_TIMING_TESTS
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*)Billow2DKernel, 0, 0);
dim3 block(blockSize, blockSize, 1);
dim3 grid((width - 1) / blockSize + 1, (height - 1) / blockSize + 1, 1);
Billow2DKernelSimplex<<<block, grid>>>(out, perm, grad, width, height, origin, freq, lacun, persist, seed, octaves);
// Check for succesfull kernel launch
cudaAssert(cudaGetLastError());
// Synchronize device
cudaAssert(cudaDeviceSynchronize());
#ifdef CUDA_TIMING_TESTS
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float elapsed = 0.0f;
cudaEventElapsedTime(&elapsed, start, stop);
printf("Kernel execution time in ms: %f\n", elapsed);
#endif // CUDA_TIMING_TESTS
// If this completes, kernel is done and "output" contains correct data.
}
|
34c128e7e2d841903142cd91c3604ffd1739c011.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPBlas.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/vol2col.cuh>
namespace at {
namespace native {
namespace {
static inline void slow_conv_transpose3d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
const Tensor& bias,
int kernel_depth,
int kernel_width,
int kernel_height,
int stride_depth,
int stride_width,
int stride_height,
int padding_depth,
int padding_width,
int padding_height,
int dilation_depth,
int dilation_width,
int dilation_height,
int output_padding_depth,
int output_padding_width,
int output_padding_height,
int weight_nullable) {
TORCH_CHECK(
input.numel() != 0 && (input.dim() == 4 || input.dim() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: ",
input.sizes());
TORCH_CHECK(
stride_depth > 0 && stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_depth: ",
stride_depth,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_depth > 0 && dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_depth: ",
dilation_depth,
", dilation_height: ",
dilation_height,
", dilation_width: ",
dilation_width);
TORCH_CHECK(
(output_padding_depth < stride_depth ||
output_padding_depth < dilation_depth) &&
(output_padding_width < stride_width ||
output_padding_width < dilation_width) &&
(output_padding_height < stride_height ||
output_padding_height < dilation_height),
"output padding must be smaller than either stride or dilation,",
" but got output_padding_depth: ",
output_padding_depth,
" output_padding_height: ",
output_padding_height,
" output_padding_width: ",
output_padding_width,
" stride_depth: ",
stride_depth,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width,
" dilation_depth: ",
dilation_depth,
" dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
// number of input & output planes and kernel size is indirectly defined by
// the weight tensor
if (weight.defined()) {
TORCH_CHECK(
weight.numel() != 0 && weight.dim() == 5,
"non-empty 5D (n_output_plane x n_input_plane ",
"x kernel_depth x kernel_height x kernel_width) tensor ",
"expected for weight, but got: ",
weight.sizes());
if (bias.defined()) {
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
int dimf = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
if (ndim == 5) {
dimf++;
dimd++;
dimh++;
dimw++;
}
if (weight.defined()) {
const int64_t n_input_plane = weight.size(0);
check_dim_size(input, ndim, dimf, n_input_plane);
}
int64_t input_width = input.size(dimw);
int64_t input_height = input.size(dimh);
int64_t input_depth = input.size(dimd);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_depth < 1 || output_width < 1 || output_height < 1) {
AT_ERROR(
"Given input size per channel: (",
input_depth,
" x ",
input_height,
" x ",
input_width,
"). Calculated output size per channel: (",
output_depth,
" x ",
output_height,
" x ",
output_width,
"). Output size is too small");
}
if (grad_output.defined()) {
if (weight.defined()) {
const int64_t n_output_plane = weight.size(1);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
} else if (bias.defined()) {
const int64_t n_output_plane = bias.size(0);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
}
check_dim_size(grad_output, ndim, dimd, output_depth);
check_dim_size(grad_output, ndim, dimh, output_height);
check_dim_size(grad_output, ndim, dimw, output_width);
}
}
void slow_conv_transpose3d_out_cuda_template(
Tensor& output,
const Tensor& input_,
const Tensor& weight_,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& finput,
Tensor& fgrad_input) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
Tensor columns = finput;
Tensor ones = fgrad_input;
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2},
weight_arg{weight_, "weight", 3}, bias_arg{bias, "bias", 4},
columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6};
checkAllSameGPU(
"slow_conv_transpose3d_out_cuda",
{input_arg, output_arg, weight_arg, bias_arg, columns_arg, ones_arg});
slow_conv_transpose3d_shape_check(
input_,
Tensor(),
weight_,
bias,
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
0);
TORCH_CHECK(
!bias.defined() || bias.is_contiguous(),
"bias tensor has to be contiguous");
Tensor input = input_.contiguous();
Tensor weight = weight_.contiguous();
int is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
output.resize_(
{batch_size, n_output_plane, output_depth, output_height, output_width});
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width});
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets
// increased, and always contains ones.
if (ones.dim() != 3 ||
ones.size(0) * ones.size(1) * ones.size(2) <
output_depth * output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_depth, output_height, output_width});
ones.fill_(1);
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "slow_conv_transpose3d_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
// Helpers
Tensor input_n;
Tensor output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
output_n = output.select(0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m =
weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4);
int64_t n = columns.size(1);
int64_t k = weight.size(0);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
'n',
't',
n,
m,
k,
static_cast<scalar_t>(1),
input_n.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
m,
static_cast<scalar_t>(0),
columns.data_ptr<scalar_t>(),
n);
// Unpack columns back into input:
at::native::col2vol<scalar_t, accscalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
columns.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
output_n.data_ptr<scalar_t>());
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t n_ = output_depth * output_height * output_width;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
if (bias.defined()) {
at::cuda::blas::gemm<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
't',
'n',
n_,
m_,
k_,
static_cast<scalar_t>(1),
ones.data_ptr<scalar_t>(),
k_,
bias.data_ptr<scalar_t>(),
k_,
static_cast<scalar_t>(1),
output_n.data_ptr<scalar_t>(),
n_);
}
}
// Resize output
if (is_batch) {
output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{n_input_plane, input_depth, input_height, input_width});
}
});
}
void slow_conv_transpose3d_backward_out_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,
const Tensor& weight_,
const Tensor& finput,
const Tensor& fgrad_input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
Tensor grad_columns = finput;
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
weight_arg{weight_, "weight", 3},
grad_columns_arg{grad_columns, "grad_columns", 4},
grad_input_arg{grad_input, "grad_input", 5};
checkAllSameGPU(
"slow_conv_transpose3d_backward_out_cuda",
{input_arg,
grad_output_arg,
weight_arg,
grad_columns_arg,
grad_input_arg});
slow_conv_transpose3d_shape_check(
input_,
grad_output_,
weight_,
Tensor(),
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
0);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
Tensor weight = weight_.contiguous();
bool is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
grad_output.resize_({1,
grad_output.size(0),
grad_output.size(1),
grad_output.size(2),
grad_output.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
grad_input.resize_(
{batch_size, n_input_plane, input_depth, input_height, input_width});
// Resize temporary columns
grad_columns.resize_(
{n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "slow_conv_transpose3d_backward_out_cuda", [&] {
// Helpers
Tensor grad_input_n;
Tensor grad_output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per sample:
grad_input_n = grad_input.select(0, elt);
grad_output_n = grad_output.select(0, elt);
// Extract columns:
at::native::vol2col<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
grad_columns.data_ptr<scalar_t>());
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(0);
int64_t n = grad_columns.size(1);
int64_t k =
weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
'n',
'n',
n,
m,
k,
static_cast<scalar_t>(1),
grad_columns.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
k,
static_cast<scalar_t>(0),
grad_input_n.data_ptr<scalar_t>(),
n);
}
// Resize output
if (is_batch) {
grad_output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{n_input_plane, input_depth, input_height, input_width});
grad_input.resize_(
{n_input_plane, input_depth, input_height, input_width});
}
});
}
void slow_conv_transpose3d_acc_grad_parameters_cuda(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& finput,
const Tensor& fgrad_input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
int scale_) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
Tensor columns = finput;
Tensor ones = fgrad_input;
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
grad_weight_arg{grad_weight, "grad_weight", 3},
grad_bias_arg{grad_bias, "grad_bias", 4},
columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6};
checkAllSameGPU(
"slow_conv_transpose3d_acc_grad_parameters_cuda",
{input_arg,
grad_output_arg,
grad_weight_arg,
grad_bias_arg,
columns_arg,
ones_arg});
slow_conv_transpose3d_shape_check(
input_,
grad_output_,
grad_weight,
grad_bias,
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
1);
int n_output_plane;
if (grad_weight.defined()) {
n_output_plane = grad_weight.size(1);
} else if (grad_bias.defined()) {
n_output_plane = grad_bias.size(0);
} else {
return;
}
if (grad_weight.defined()) {
TORCH_CHECK(
grad_weight.is_contiguous(), "grad_weight needs to be contiguous");
}
if (grad_bias.defined()) {
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous");
TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous");
}
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
bool is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
grad_output.resize_({1,
grad_output.size(0),
grad_output.size(1),
grad_output.size(2),
grad_output.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Define a buffer of ones, for bias accumulation
if (ones.dim() != 3 ||
ones.size(0) * ones.size(1) * ones.size(2) <
output_depth * output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_depth, output_height, output_width});
ones.fill_(1);
}
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(),
"slow_conv_transpose3d_acc_grad_parameters_cuda",
[&] {
// Helpers
Tensor input_n;
Tensor grad_output_n;
scalar_t scale = static_cast<scalar_t>(scale_);
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
grad_output_n = grad_output.select(0, elt);
// Do Weight:
if (grad_weight.defined()) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
// Extract columns:
at::native::vol2col<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
columns.data_ptr<scalar_t>());
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t n = columns.size(0); // n_output_plane * kt * kh * kw
int64_t m = input_n.size(0); // n_input_plane
int64_t k = columns.size(1); // input_height * input_width
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
't',
'n',
n,
m,
k,
scale,
columns.data_ptr<scalar_t>(),
k,
input_n.data_ptr<scalar_t>(),
k,
static_cast<scalar_t>(1),
grad_weight.data_ptr<scalar_t>(),
n);
}
// Do Bias:
if (grad_bias.defined()) {
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t k_ = output_depth * output_height * output_width;
// Do GEMV (note: this is a bit confusing because gemv assumes
// column-major matrices)
at::cuda::blas::gemv<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
't',
k_,
m_,
scale,
grad_output_n.data_ptr<scalar_t>(),
k_,
ones.data_ptr<scalar_t>(),
1,
static_cast<scalar_t>(1),
grad_bias.data_ptr<scalar_t>(),
1);
}
}
// Resize
if (is_batch) {
grad_output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{input.size(1), input_depth, input_height, input_width});
}
});
}
} // namespace
Tensor& slow_conv_transpose3d_out_cuda(
Tensor& output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor finput = at::empty_like(input, at::MemoryFormat::Contiguous);
Tensor fgrad = at::empty_like(input, at::MemoryFormat::Contiguous);
slow_conv_transpose3d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
finput,
fgrad);
return output;
}
Tensor slow_conv_transpose3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor output = at::empty_like(input, at::MemoryFormat::Contiguous);
Tensor finput = at::empty_like(input, at::MemoryFormat::Contiguous);
Tensor fgrad = at::empty_like(input, at::MemoryFormat::Contiguous);
slow_conv_transpose3d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
finput,
fgrad);
return output;
}
std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cuda(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& finput,
const Tensor& fgrad) {
if (grad_input.defined()) {
slow_conv_transpose3d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
finput,
fgrad,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose3d_acc_grad_parameters_cuda(
input,
grad_output,
grad_weight,
grad_bias,
finput,
fgrad,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor&, Tensor&, Tensor&>(
grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& finput,
const Tensor& fgrad,
std::array<bool, 3> output_mask) {
Tensor grad_input;
Tensor grad_weight;
Tensor grad_bias;
if (output_mask[0]) {
grad_input = at::empty({0}, grad_output.options());
} else {
grad_input = Tensor();
}
if (output_mask[1]) {
grad_weight = at::empty({0}, grad_output.options());
} else {
grad_weight = Tensor();
}
if (output_mask[2]) {
grad_bias = at::empty({0}, grad_output.options());
} else {
grad_bias = Tensor();
}
if (grad_input.defined()) {
slow_conv_transpose3d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
finput,
fgrad,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose3d_acc_grad_parameters_cuda(
input,
grad_output,
grad_weight,
grad_bias,
finput,
fgrad,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
| 34c128e7e2d841903142cd91c3604ffd1739c011.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDABlas.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/vol2col.cuh>
namespace at {
namespace native {
namespace {
static inline void slow_conv_transpose3d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
const Tensor& bias,
int kernel_depth,
int kernel_width,
int kernel_height,
int stride_depth,
int stride_width,
int stride_height,
int padding_depth,
int padding_width,
int padding_height,
int dilation_depth,
int dilation_width,
int dilation_height,
int output_padding_depth,
int output_padding_width,
int output_padding_height,
int weight_nullable) {
TORCH_CHECK(
input.numel() != 0 && (input.dim() == 4 || input.dim() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: ",
input.sizes());
TORCH_CHECK(
stride_depth > 0 && stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_depth: ",
stride_depth,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_depth > 0 && dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_depth: ",
dilation_depth,
", dilation_height: ",
dilation_height,
", dilation_width: ",
dilation_width);
TORCH_CHECK(
(output_padding_depth < stride_depth ||
output_padding_depth < dilation_depth) &&
(output_padding_width < stride_width ||
output_padding_width < dilation_width) &&
(output_padding_height < stride_height ||
output_padding_height < dilation_height),
"output padding must be smaller than either stride or dilation,",
" but got output_padding_depth: ",
output_padding_depth,
" output_padding_height: ",
output_padding_height,
" output_padding_width: ",
output_padding_width,
" stride_depth: ",
stride_depth,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width,
" dilation_depth: ",
dilation_depth,
" dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
// number of input & output planes and kernel size is indirectly defined by
// the weight tensor
if (weight.defined()) {
TORCH_CHECK(
weight.numel() != 0 && weight.dim() == 5,
"non-empty 5D (n_output_plane x n_input_plane ",
"x kernel_depth x kernel_height x kernel_width) tensor ",
"expected for weight, but got: ",
weight.sizes());
if (bias.defined()) {
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
int dimf = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
if (ndim == 5) {
dimf++;
dimd++;
dimh++;
dimw++;
}
if (weight.defined()) {
const int64_t n_input_plane = weight.size(0);
check_dim_size(input, ndim, dimf, n_input_plane);
}
int64_t input_width = input.size(dimw);
int64_t input_height = input.size(dimh);
int64_t input_depth = input.size(dimd);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_depth < 1 || output_width < 1 || output_height < 1) {
AT_ERROR(
"Given input size per channel: (",
input_depth,
" x ",
input_height,
" x ",
input_width,
"). Calculated output size per channel: (",
output_depth,
" x ",
output_height,
" x ",
output_width,
"). Output size is too small");
}
if (grad_output.defined()) {
if (weight.defined()) {
const int64_t n_output_plane = weight.size(1);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
} else if (bias.defined()) {
const int64_t n_output_plane = bias.size(0);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
}
check_dim_size(grad_output, ndim, dimd, output_depth);
check_dim_size(grad_output, ndim, dimh, output_height);
check_dim_size(grad_output, ndim, dimw, output_width);
}
}
void slow_conv_transpose3d_out_cuda_template(
Tensor& output,
const Tensor& input_,
const Tensor& weight_,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& finput,
Tensor& fgrad_input) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
Tensor columns = finput;
Tensor ones = fgrad_input;
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2},
weight_arg{weight_, "weight", 3}, bias_arg{bias, "bias", 4},
columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6};
checkAllSameGPU(
"slow_conv_transpose3d_out_cuda",
{input_arg, output_arg, weight_arg, bias_arg, columns_arg, ones_arg});
slow_conv_transpose3d_shape_check(
input_,
Tensor(),
weight_,
bias,
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
0);
TORCH_CHECK(
!bias.defined() || bias.is_contiguous(),
"bias tensor has to be contiguous");
Tensor input = input_.contiguous();
Tensor weight = weight_.contiguous();
int is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
output.resize_(
{batch_size, n_output_plane, output_depth, output_height, output_width});
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width});
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets
// increased, and always contains ones.
if (ones.dim() != 3 ||
ones.size(0) * ones.size(1) * ones.size(2) <
output_depth * output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_depth, output_height, output_width});
ones.fill_(1);
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "slow_conv_transpose3d_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
// Helpers
Tensor input_n;
Tensor output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
output_n = output.select(0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m =
weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4);
int64_t n = columns.size(1);
int64_t k = weight.size(0);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::cuda::getCurrentCUDAStream(),
'n',
't',
n,
m,
k,
static_cast<scalar_t>(1),
input_n.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
m,
static_cast<scalar_t>(0),
columns.data_ptr<scalar_t>(),
n);
// Unpack columns back into input:
at::native::col2vol<scalar_t, accscalar_t>(
at::cuda::getCurrentCUDAStream(),
columns.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
output_n.data_ptr<scalar_t>());
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t n_ = output_depth * output_height * output_width;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
if (bias.defined()) {
at::cuda::blas::gemm<scalar_t>(
at::cuda::getCurrentCUDAStream(),
't',
'n',
n_,
m_,
k_,
static_cast<scalar_t>(1),
ones.data_ptr<scalar_t>(),
k_,
bias.data_ptr<scalar_t>(),
k_,
static_cast<scalar_t>(1),
output_n.data_ptr<scalar_t>(),
n_);
}
}
// Resize output
if (is_batch) {
output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{n_input_plane, input_depth, input_height, input_width});
}
});
}
void slow_conv_transpose3d_backward_out_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,
const Tensor& weight_,
const Tensor& finput,
const Tensor& fgrad_input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
Tensor grad_columns = finput;
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
weight_arg{weight_, "weight", 3},
grad_columns_arg{grad_columns, "grad_columns", 4},
grad_input_arg{grad_input, "grad_input", 5};
checkAllSameGPU(
"slow_conv_transpose3d_backward_out_cuda",
{input_arg,
grad_output_arg,
weight_arg,
grad_columns_arg,
grad_input_arg});
slow_conv_transpose3d_shape_check(
input_,
grad_output_,
weight_,
Tensor(),
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
0);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
Tensor weight = weight_.contiguous();
bool is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
grad_output.resize_({1,
grad_output.size(0),
grad_output.size(1),
grad_output.size(2),
grad_output.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
grad_input.resize_(
{batch_size, n_input_plane, input_depth, input_height, input_width});
// Resize temporary columns
grad_columns.resize_(
{n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "slow_conv_transpose3d_backward_out_cuda", [&] {
// Helpers
Tensor grad_input_n;
Tensor grad_output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per sample:
grad_input_n = grad_input.select(0, elt);
grad_output_n = grad_output.select(0, elt);
// Extract columns:
at::native::vol2col<scalar_t>(
at::cuda::getCurrentCUDAStream(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
grad_columns.data_ptr<scalar_t>());
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(0);
int64_t n = grad_columns.size(1);
int64_t k =
weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::cuda::getCurrentCUDAStream(),
'n',
'n',
n,
m,
k,
static_cast<scalar_t>(1),
grad_columns.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
k,
static_cast<scalar_t>(0),
grad_input_n.data_ptr<scalar_t>(),
n);
}
// Resize output
if (is_batch) {
grad_output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{n_input_plane, input_depth, input_height, input_width});
grad_input.resize_(
{n_input_plane, input_depth, input_height, input_width});
}
});
}
void slow_conv_transpose3d_acc_grad_parameters_cuda(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& finput,
const Tensor& fgrad_input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
int scale_) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
Tensor columns = finput;
Tensor ones = fgrad_input;
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
grad_weight_arg{grad_weight, "grad_weight", 3},
grad_bias_arg{grad_bias, "grad_bias", 4},
columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6};
checkAllSameGPU(
"slow_conv_transpose3d_acc_grad_parameters_cuda",
{input_arg,
grad_output_arg,
grad_weight_arg,
grad_bias_arg,
columns_arg,
ones_arg});
slow_conv_transpose3d_shape_check(
input_,
grad_output_,
grad_weight,
grad_bias,
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
1);
int n_output_plane;
if (grad_weight.defined()) {
n_output_plane = grad_weight.size(1);
} else if (grad_bias.defined()) {
n_output_plane = grad_bias.size(0);
} else {
return;
}
if (grad_weight.defined()) {
TORCH_CHECK(
grad_weight.is_contiguous(), "grad_weight needs to be contiguous");
}
if (grad_bias.defined()) {
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous");
TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous");
}
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
bool is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
grad_output.resize_({1,
grad_output.size(0),
grad_output.size(1),
grad_output.size(2),
grad_output.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Define a buffer of ones, for bias accumulation
if (ones.dim() != 3 ||
ones.size(0) * ones.size(1) * ones.size(2) <
output_depth * output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_depth, output_height, output_width});
ones.fill_(1);
}
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(),
"slow_conv_transpose3d_acc_grad_parameters_cuda",
[&] {
// Helpers
Tensor input_n;
Tensor grad_output_n;
scalar_t scale = static_cast<scalar_t>(scale_);
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
grad_output_n = grad_output.select(0, elt);
// Do Weight:
if (grad_weight.defined()) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
// Extract columns:
at::native::vol2col<scalar_t>(
at::cuda::getCurrentCUDAStream(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
columns.data_ptr<scalar_t>());
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t n = columns.size(0); // n_output_plane * kt * kh * kw
int64_t m = input_n.size(0); // n_input_plane
int64_t k = columns.size(1); // input_height * input_width
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::cuda::getCurrentCUDAStream(),
't',
'n',
n,
m,
k,
scale,
columns.data_ptr<scalar_t>(),
k,
input_n.data_ptr<scalar_t>(),
k,
static_cast<scalar_t>(1),
grad_weight.data_ptr<scalar_t>(),
n);
}
// Do Bias:
if (grad_bias.defined()) {
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t k_ = output_depth * output_height * output_width;
// Do GEMV (note: this is a bit confusing because gemv assumes
// column-major matrices)
at::cuda::blas::gemv<scalar_t>(
at::cuda::getCurrentCUDAStream(),
't',
k_,
m_,
scale,
grad_output_n.data_ptr<scalar_t>(),
k_,
ones.data_ptr<scalar_t>(),
1,
static_cast<scalar_t>(1),
grad_bias.data_ptr<scalar_t>(),
1);
}
}
// Resize
if (is_batch) {
grad_output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{input.size(1), input_depth, input_height, input_width});
}
});
}
} // namespace
Tensor& slow_conv_transpose3d_out_cuda(
Tensor& output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor finput = at::empty_like(input, at::MemoryFormat::Contiguous);
Tensor fgrad = at::empty_like(input, at::MemoryFormat::Contiguous);
slow_conv_transpose3d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
finput,
fgrad);
return output;
}
Tensor slow_conv_transpose3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor output = at::empty_like(input, at::MemoryFormat::Contiguous);
Tensor finput = at::empty_like(input, at::MemoryFormat::Contiguous);
Tensor fgrad = at::empty_like(input, at::MemoryFormat::Contiguous);
slow_conv_transpose3d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
finput,
fgrad);
return output;
}
std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cuda(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& finput,
const Tensor& fgrad) {
if (grad_input.defined()) {
slow_conv_transpose3d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
finput,
fgrad,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose3d_acc_grad_parameters_cuda(
input,
grad_output,
grad_weight,
grad_bias,
finput,
fgrad,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor&, Tensor&, Tensor&>(
grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& finput,
const Tensor& fgrad,
std::array<bool, 3> output_mask) {
Tensor grad_input;
Tensor grad_weight;
Tensor grad_bias;
if (output_mask[0]) {
grad_input = at::empty({0}, grad_output.options());
} else {
grad_input = Tensor();
}
if (output_mask[1]) {
grad_weight = at::empty({0}, grad_output.options());
} else {
grad_weight = Tensor();
}
if (output_mask[2]) {
grad_bias = at::empty({0}, grad_output.options());
} else {
grad_bias = Tensor();
}
if (grad_input.defined()) {
slow_conv_transpose3d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
finput,
fgrad,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose3d_acc_grad_parameters_cuda(
input,
grad_output,
grad_weight,
grad_bias,
finput,
fgrad,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
|
1784cb997d6d92cf2488c02c71bc879cc162c13f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#include <stdint.h>
#include "sha1.h"
__global__ void computeHMAC_SHA1(char *buf, char *keys, uint32_t *offsets, uint16_t *lengths, uint32_t *outputs, int N, uint8_t *checkbits)
{
uint32_t w_register[16];
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
uint32_t *w = w_register;
hash_digest_t h;
uint32_t offset = offsets[index];
uint16_t length = lengths[index];
uint32_t *out = outputs + 5 * index;
for (unsigned i = 0; i < 16; i++)
w[i] = 0x36363636;
xorpads(w, (uint32_t*)(keys + 64 * index));
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
//SHA1 compute on ipad
computeSHA1Block((char*)w, w, 0, 64, h);
//SHA1 compute on mesage
unsigned num_iter = (length + 63 + 9) / 64;
for (unsigned i = 0; i < num_iter; i++)
computeSHA1Block(buf + offset , w, i * 64 , length , h);
*(out) = swap(h.h1);
*(out+1) = swap(h.h2);
*(out+2) = swap(h.h3);
*(out+3) = swap(h.h4);
*(out+4) = swap(h.h5);
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
for (unsigned i = 0; i < 16; i++)
w[i] = 0x5c5c5c5c;
xorpads(w, (uint32_t*)(keys + 64 * index));
//SHA 1 compute on opads
computeSHA1Block((char*)w, w, 0, 64, h);
//SHA 1 compute on (hash of ipad|m)
computeSHA1Block((char*)out, w, 0, 20, h);
*(out) = swap(h.h1);
*(out+1) = swap(h.h2);
*(out+2) = swap(h.h3);
*(out+3) = swap(h.h4);
*(out+4) = swap(h.h5);
}
__syncthreads();
if (threadIdx.x == 0)
*(checkbits + blockIdx.x) = 1;
}
extern "C" void hmac_sha1_gpu(char *buf, char *keys, uint32_t *offsets, uint16_t *lengths,
uint32_t *outputs, int N, uint8_t *checkbits,
unsigned threads_per_blk, hipStream_t stream)
{
int num_blks = (N + threads_per_blk - 1) / threads_per_blk;
if (stream == 0) {
hipLaunchKernelGGL(( computeHMAC_SHA1), dim3(num_blks), dim3(threads_per_blk), 0, 0,
buf, keys, offsets, lengths, outputs, N, checkbits);
} else {
hipLaunchKernelGGL(( computeHMAC_SHA1), dim3(num_blks), dim3(threads_per_blk), 0, stream,
buf, keys, offsets, lengths, outputs, N, checkbits);
}
}
| 1784cb997d6d92cf2488c02c71bc879cc162c13f.cu | #include <stdio.h>
#include <assert.h>
#include <stdint.h>
#include "sha1.h"
__global__ void computeHMAC_SHA1(char *buf, char *keys, uint32_t *offsets, uint16_t *lengths, uint32_t *outputs, int N, uint8_t *checkbits)
{
uint32_t w_register[16];
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
uint32_t *w = w_register;
hash_digest_t h;
uint32_t offset = offsets[index];
uint16_t length = lengths[index];
uint32_t *out = outputs + 5 * index;
for (unsigned i = 0; i < 16; i++)
w[i] = 0x36363636;
xorpads(w, (uint32_t*)(keys + 64 * index));
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
//SHA1 compute on ipad
computeSHA1Block((char*)w, w, 0, 64, h);
//SHA1 compute on mesage
unsigned num_iter = (length + 63 + 9) / 64;
for (unsigned i = 0; i < num_iter; i++)
computeSHA1Block(buf + offset , w, i * 64 , length , h);
*(out) = swap(h.h1);
*(out+1) = swap(h.h2);
*(out+2) = swap(h.h3);
*(out+3) = swap(h.h4);
*(out+4) = swap(h.h5);
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
for (unsigned i = 0; i < 16; i++)
w[i] = 0x5c5c5c5c;
xorpads(w, (uint32_t*)(keys + 64 * index));
//SHA 1 compute on opads
computeSHA1Block((char*)w, w, 0, 64, h);
//SHA 1 compute on (hash of ipad|m)
computeSHA1Block((char*)out, w, 0, 20, h);
*(out) = swap(h.h1);
*(out+1) = swap(h.h2);
*(out+2) = swap(h.h3);
*(out+3) = swap(h.h4);
*(out+4) = swap(h.h5);
}
__syncthreads();
if (threadIdx.x == 0)
*(checkbits + blockIdx.x) = 1;
}
extern "C" void hmac_sha1_gpu(char *buf, char *keys, uint32_t *offsets, uint16_t *lengths,
uint32_t *outputs, int N, uint8_t *checkbits,
unsigned threads_per_blk, cudaStream_t stream)
{
int num_blks = (N + threads_per_blk - 1) / threads_per_blk;
if (stream == 0) {
computeHMAC_SHA1<<<num_blks, threads_per_blk>>>(
buf, keys, offsets, lengths, outputs, N, checkbits);
} else {
computeHMAC_SHA1<<<num_blks, threads_per_blk, 0, stream>>>(
buf, keys, offsets, lengths, outputs, N, checkbits);
}
}
|
4d317f0baa00d48b23d903bbbdf139e9554d03aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2015 by Contributors
* \file roi_pooling.cu
* \brief roi pooling operator
* \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
*/
#include "./roi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
namespace mshadow {
namespace cuda {
template<typename Dtype>
__global__ void ROIPoolForwardKernel(const int count, const Dtype* bottom_data,
const float spatial_scale, const float pad_ratio,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data,
Dtype* argmax_data) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
if (roi_batch_ind < 0) {
top_data[index] = 0;
argmax_data[index] = 0;
continue;
}
Dtype pad_w = (bottom_rois[3] - bottom_rois[1] + 1) * pad_ratio;
Dtype pad_h = (bottom_rois[4] - bottom_rois[2] + 1) * pad_ratio;
int roi_start_w = round((bottom_rois[1] - pad_w) * spatial_scale);
int roi_start_h = round((bottom_rois[2] - pad_h) * spatial_scale);
int roi_end_w = round((bottom_rois[3] + pad_w) * spatial_scale);
int roi_end_h = round((bottom_rois[4] + pad_h) * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = (Dtype)maxidx;
}
}
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
const Dtype *bottom_data = data.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *top_data = out.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward");
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
hipLaunchKernelGGL(( ROIPoolForwardKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream,
count, bottom_data, spatial_scale, pad_ratio, channels, height, width,
pooled_height, pooled_width, bottom_rois, top_data, argmax_data);
}
template<typename Dtype>
__global__ void ROIPoolBackwardAccKernel(const int count, const Dtype* top_diff,
const Dtype* argmax_data, const int num_rois,
const float spatial_scale, const float pad_ratio,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
Dtype* bottom_diff, const Dtype* bottom_rois) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
Dtype pad_w = (bottom_rois[3] - bottom_rois[1] + 1) * pad_ratio;
Dtype pad_h = (bottom_rois[4] - bottom_rois[2] + 1) * pad_ratio;
int roi_start_w = round((offset_bottom_rois[1] - pad_w) * spatial_scale);
int roi_start_h = round((offset_bottom_rois[2] - pad_h) * spatial_scale);
int roi_end_w = round((offset_bottom_rois[3] + pad_w) * spatial_scale);
int roi_end_h = round((offset_bottom_rois[4] + pad_h) * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const Dtype* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (static_cast<int>(offset_argmax_data[ph * pooled_width + pw]) == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] += gradient;
}
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
const Dtype *top_diff = out_grad.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *bottom_diff = in_grad.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = in_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward");
hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
hipLaunchKernelGGL(( ROIPoolBackwardAccKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream,
count, top_diff, argmax_data, num_rois, spatial_scale, pad_ratio, channels, height, width,
pooled_height, pooled_width, bottom_diff, bottom_rois);
}
} // namespace cuda
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
cuda::ROIPoolForward(out, data, bbox, max_idx, spatial_scale, pad_ratio);
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
cuda::ROIPoolBackwardAcc(in_grad, out_grad, bbox, max_idx, spatial_scale, pad_ratio);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(ROIPoolingParam param, int dtype) {
Operator* op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
| 4d317f0baa00d48b23d903bbbdf139e9554d03aa.cu | /*!
* Copyright (c) 2015 by Contributors
* \file roi_pooling.cu
* \brief roi pooling operator
* \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
*/
#include "./roi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
namespace mshadow {
namespace cuda {
template<typename Dtype>
__global__ void ROIPoolForwardKernel(const int count, const Dtype* bottom_data,
const float spatial_scale, const float pad_ratio,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data,
Dtype* argmax_data) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
if (roi_batch_ind < 0) {
top_data[index] = 0;
argmax_data[index] = 0;
continue;
}
Dtype pad_w = (bottom_rois[3] - bottom_rois[1] + 1) * pad_ratio;
Dtype pad_h = (bottom_rois[4] - bottom_rois[2] + 1) * pad_ratio;
int roi_start_w = round((bottom_rois[1] - pad_w) * spatial_scale);
int roi_start_h = round((bottom_rois[2] - pad_h) * spatial_scale);
int roi_end_w = round((bottom_rois[3] + pad_w) * spatial_scale);
int roi_end_h = round((bottom_rois[4] + pad_h) * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = (Dtype)maxidx;
}
}
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
const Dtype *bottom_data = data.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *top_data = out.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward");
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
ROIPoolForwardKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>(
count, bottom_data, spatial_scale, pad_ratio, channels, height, width,
pooled_height, pooled_width, bottom_rois, top_data, argmax_data);
}
template<typename Dtype>
__global__ void ROIPoolBackwardAccKernel(const int count, const Dtype* top_diff,
const Dtype* argmax_data, const int num_rois,
const float spatial_scale, const float pad_ratio,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
Dtype* bottom_diff, const Dtype* bottom_rois) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
Dtype pad_w = (bottom_rois[3] - bottom_rois[1] + 1) * pad_ratio;
Dtype pad_h = (bottom_rois[4] - bottom_rois[2] + 1) * pad_ratio;
int roi_start_w = round((offset_bottom_rois[1] - pad_w) * spatial_scale);
int roi_start_h = round((offset_bottom_rois[2] - pad_h) * spatial_scale);
int roi_end_w = round((offset_bottom_rois[3] + pad_w) * spatial_scale);
int roi_end_h = round((offset_bottom_rois[4] + pad_h) * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const Dtype* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (static_cast<int>(offset_argmax_data[ph * pooled_width + pw]) == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] += gradient;
}
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
const Dtype *top_diff = out_grad.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *bottom_diff = in_grad.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = in_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward");
cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
ROIPoolBackwardAccKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>(
count, top_diff, argmax_data, num_rois, spatial_scale, pad_ratio, channels, height, width,
pooled_height, pooled_width, bottom_diff, bottom_rois);
}
} // namespace cuda
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
cuda::ROIPoolForward(out, data, bbox, max_idx, spatial_scale, pad_ratio);
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
cuda::ROIPoolBackwardAcc(in_grad, out_grad, bbox, max_idx, spatial_scale, pad_ratio);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(ROIPoolingParam param, int dtype) {
Operator* op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
|
e0026b156b52afbc1360ddabeb4cfc934687f1d9.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "params.hpp"
#include "indices.hpp"
// Nearest lower power of 2
__device__ __inline__ uint flp2 (uint x)
{
return (0x80000000u >> __clz(x));
}
//Computes the squared difference between two numbers
template<typename T>
__device__ __inline__ T L2p2(const T i1, const T i2)
{
T diff = i1 - i2;
return diff*diff;
}
/*
Adds new patch to patch stack (only N most similar are kept)
Note: Stack is just an array, not FIFO
*/
__device__
void add_to_matched_image(
uint *stack, //IN/OUT: Stack of N patches matched to current reference patch
uchar *num_patches_in_stack,//IN/OUT: Number of patches in stack
const uint value, //IN: [..DIFF(ushort)..|..LOC_Y(sbyte)..|..LOC_X(sbyte)..]
const Params & params //IN: Denoising parameters
)
{
//stack[*num_patches_in_stack-1] is most similar (lowest number)
int k;
uchar num = (*num_patches_in_stack);
if (num < params.N) //add new value
{
k = num++;
while(k > 0 && value > stack[k-1])
{
stack[k] = stack[k-1];
--k;
}
stack[k] = value;
*num_patches_in_stack = num;
}
else if (value >= stack[0])
return;
else //delete highest value and add new
{
k = 1;
while (k < params.N && value < stack[k])
{
stack[k-1] = stack[k];
k++;
}
stack[k-1] = value;
}
}
/*
Block-matching algorithm
For each processed reference patch it finds maximaly N similar patches that pass the distance threshold and stores them to the g_stacks array.
It also returns the number of them for each reference patch in g_num_patches_in_stack.
Used denoising parameters: n,k,N,T,p
Division: Kernel handles gridDim.y lines starting with the line passed in argument. Each block handles warpSize reference patches in line.
Each thread process one reference patch. All the warps of a block process the same reference patches.
*/
__global__
void block_matching(
const uchar* __restrict image, //IN: Original image
ushort* __restrict g_stacks, //OUT: For each reference patch contains addresses of similar patches (patch is adressed by top left corner) [..LOC_Y(sbyte)..|..LOC_X(sbyte)..]
uint* __restrict g_num_patches_in_stack, //OUT: For each reference patch contains number of similar patches
const uint2 image_dim, //IN: Image dimensions
const uint2 stacks_dim, //IN: Size of area, where reference patches could be located
const Params params, //IN: Denoising parameters
const uint2 start_point) //IN: Address of the top-left reference patch of a batch
{
//One block is processing warpSize patches (because each warp is computing distance of same warpSize patches from different displaced patches)
int tid = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
int num_warps = blockDim.x/warpSize;
//p_block denotes reference rectangle on which current cuda block is computing
uint p_rectangle_width = ((warpSize-1) * params.p) + params.k;
uint p_rectangle_start = start_point.x + blockIdx.x * warpSize * params.p;
//Shared arrays
extern __shared__ uint s_data[];
uint *s_diff = s_data; //SIZE: p_rectangle_width*num_warps
uint *s_stacks = &s_data[p_rectangle_width*num_warps]; //SIZE: params.N*num_warps*warpSize
uchar *s_patches_in_stack = (uchar*)&s_data[num_warps*(p_rectangle_width + params.N*warpSize)]; //SIZE: num_warps*warpSize
uchar *s_image_p = (uchar*)&s_patches_in_stack[num_warps*warpSize]; //SIZE: p_rectangle_width*params.k
s_diff += idx2(0, wid, p_rectangle_width);
//Initialize s_patches_in_stack to zero
s_patches_in_stack[ idx2(tid, wid, warpSize) ] = 0;
int2 p; //Address of reference patch
int2 q; //Address of patch against which the difference is computed
p.x = p_rectangle_start + (tid*params.p);
p.y = start_point.y + (blockIdx.y*params.p);
//Ensure, that the bottom most patches will be taken as reference patches regardless the p parameter.
if (p.y >= stacks_dim.y && p.y < stacks_dim.y + params.p - 1)
p.y = stacks_dim.y - 1;
else if (p.y >= stacks_dim.y) return;
//Ensure, that the right most patches will be taken as reference patches regardless the p parameter.
uint inner_p_x = tid*params.p;
if (p.x >= stacks_dim.x && p.x < stacks_dim.x + params.p - 1)
{
inner_p_x -= (p.x - (stacks_dim.x - 1));
p.x = stacks_dim.x - 1;
}
//Load reference patches needed by actual block to shared memory
for(int i = threadIdx.x; i < p_rectangle_width*params.k; i+=blockDim.x)
{
int sx = i % p_rectangle_width;
int sy = i / p_rectangle_width;
if (p_rectangle_start+sx >= image_dim.x) continue;
s_image_p[i] = image[idx2(p_rectangle_start+sx,p.y+sy,image_dim.x)];
}
__syncthreads();
//scale difference so that it can fit ushort
uint shift = (__clz(params.Tn) < 16u) ? 16u - (uint)__clz(params.Tn) : 0;
//Ensure that displaced patch coordinates (q) will be positive
int2 from;
from.y = (p.y - (int)params.n < 0) ? -p.y : -(int)params.n;
from.x = (((int)p_rectangle_start) - (int)params.n < 0) ? -((int)p_rectangle_start) : -(int)params.n;
from.x += wid;
//For each displacement (x,y) in n neighbourhood
for(int y = from.y; y <= (int)params.n; ++y)
{
q.y = p.y + y;
if (q.y >= stacks_dim.y) break;
for(int x = from.x; x <= (int)params.n; x += num_warps)
{
//Reference patch is always the most similar to itself (there is no need to copute it)
if (x == 0 && y == 0) continue;
//Each warp is computing the same patch with slightly different displacement.
//Compute distance of reference patch p from current patch q which is dispaced by (x+tid,y)
//q_block denotes displaced rectangle which is processed by the current warp
uint q_rectangle_start = p_rectangle_start + x;
q.x = q_rectangle_start + inner_p_x;
//Compute distance for each column of reference patch
for(uint i = tid; i < p_rectangle_width && p_rectangle_start+i < image_dim.x &&
q_rectangle_start+i < image_dim.x; i+=warpSize)
{
uint dist = 0;
for(uint iy = 0; iy < params.k; ++iy)
{
dist += L2p2((int)s_image_p[ idx2(i, iy, p_rectangle_width) ],
(int)image[ idx2(q_rectangle_start+i, q.y+iy, image_dim.x) ]);
}
s_diff[i] = dist;
}
if (p.x >= stacks_dim.x || q.x >= stacks_dim.x) continue;
//Sum column distances to obtain patch distance
uint diff = 0;
for (uint i = 0; i < params.k; ++i)
diff += s_diff[inner_p_x + i];
//Distance threshold
if(diff < params.Tn)
{
uint loc_y = (uint)((q.y - p.y) & 0xFF); //relative location y (-127 to 127)
uint loc_x = (uint)((q.x - p.x) & 0xFF); //relative location x (-127 to 127)
diff >>= shift;
diff <<= 16u; // [..DIFF(ushort)..|..LOC_Y(sbyte)..|..LOC_X(sbyte)..]
diff |= (loc_y << 8u);
diff |= loc_x;
//Add current patch to s_stacks
add_to_matched_image(
&s_stacks[ params.N * idx2(tid, wid, warpSize) ],
&s_patches_in_stack[ idx2(tid, wid, warpSize) ],
diff,
params
);
}
}
}
__syncthreads();
uint batch_size = gridDim.x*warpSize;
uint block_address_x = blockIdx.x*warpSize+tid;
if (wid > 0) return;
//Select N most similar patches for each reference patch from stacks in shared memory and save them to global memory
//Each thread represents one reference patch
//Each thread will find N most similar blocks in num_warps stacks (which were computed by different warps) and save them into global memory
//In shared memory the most similar patch is at the end, in global memory the order does not matter
//DEV: performance impact cca 8%
if (p.x >= stacks_dim.x) return;
int j;
for (j = 0; j < params.N; ++j)
{
uint count = 0;
uint minIdx = 0;
uint minVal = 0xFFFFFFFF; //INF
//Finds patch with minimal value of remaining
for (int i = minIdx; i < num_warps; ++i)
{
count = (uint)s_patches_in_stack[ idx2(tid, i, warpSize) ];
if (count == 0) continue;
uint newMinVal = s_stacks[ idx3(count-1,tid,i,params.N,warpSize) ];
if (newMinVal < minVal)
{
minVal = newMinVal;
minIdx = i;
}
}
if (minVal == 0xFFFFFFFF) break; //All stacks are empty
//Remove patch from shared stack
s_patches_in_stack[ idx2(tid, minIdx, warpSize) ]--;
//Adds patch to stack in global memory
g_stacks[idx3(j, block_address_x, blockIdx.y, params.N, batch_size)] = (ushort)(minVal & 0xFFFF);
}
//Save to the global memory the number of similar patches rounded to the nearest lower power of two
g_num_patches_in_stack[ idx2(block_address_x ,blockIdx.y, batch_size) ] = flp2((uint)j+1)-1;
}
extern "C" void run_block_matching(
const uchar* __restrict image, //Original image
ushort* __restrict stacks, //For each reference patch contains addresses of similar patches (patch is adressed by top left corner)
uint* __restrict num_patches_in_stack, //For each reference patch contains number of similar patches
const uint2 image_dim, //Image dimensions
const uint2 stacks_dim, //size of area where reference patches could be located
const Params params, //Denoising parameters
const uint2 start_point, //Address of the top-left reference patch of a batch
const dim3 num_threads,
const dim3 num_blocks,
const uint shared_memory_size
)
{
hipLaunchKernelGGL(( block_matching), dim3(num_blocks), dim3(num_threads),shared_memory_size, 0,
image,
stacks,
num_patches_in_stack,
image_dim,
stacks_dim,
params,
start_point
);
}
| e0026b156b52afbc1360ddabeb4cfc934687f1d9.cu | #include <cuda.h>
#include "params.hpp"
#include "indices.hpp"
// Nearest lower power of 2
__device__ __inline__ uint flp2 (uint x)
{
return (0x80000000u >> __clz(x));
}
//Computes the squared difference between two numbers
template<typename T>
__device__ __inline__ T L2p2(const T i1, const T i2)
{
T diff = i1 - i2;
return diff*diff;
}
/*
Adds new patch to patch stack (only N most similar are kept)
Note: Stack is just an array, not FIFO
*/
__device__
void add_to_matched_image(
uint *stack, //IN/OUT: Stack of N patches matched to current reference patch
uchar *num_patches_in_stack,//IN/OUT: Number of patches in stack
const uint value, //IN: [..DIFF(ushort)..|..LOC_Y(sbyte)..|..LOC_X(sbyte)..]
const Params & params //IN: Denoising parameters
)
{
//stack[*num_patches_in_stack-1] is most similar (lowest number)
int k;
uchar num = (*num_patches_in_stack);
if (num < params.N) //add new value
{
k = num++;
while(k > 0 && value > stack[k-1])
{
stack[k] = stack[k-1];
--k;
}
stack[k] = value;
*num_patches_in_stack = num;
}
else if (value >= stack[0])
return;
else //delete highest value and add new
{
k = 1;
while (k < params.N && value < stack[k])
{
stack[k-1] = stack[k];
k++;
}
stack[k-1] = value;
}
}
/*
Block-matching algorithm
For each processed reference patch it finds maximaly N similar patches that pass the distance threshold and stores them to the g_stacks array.
It also returns the number of them for each reference patch in g_num_patches_in_stack.
Used denoising parameters: n,k,N,T,p
Division: Kernel handles gridDim.y lines starting with the line passed in argument. Each block handles warpSize reference patches in line.
Each thread process one reference patch. All the warps of a block process the same reference patches.
*/
__global__
void block_matching(
const uchar* __restrict image, //IN: Original image
ushort* __restrict g_stacks, //OUT: For each reference patch contains addresses of similar patches (patch is adressed by top left corner) [..LOC_Y(sbyte)..|..LOC_X(sbyte)..]
uint* __restrict g_num_patches_in_stack, //OUT: For each reference patch contains number of similar patches
const uint2 image_dim, //IN: Image dimensions
const uint2 stacks_dim, //IN: Size of area, where reference patches could be located
const Params params, //IN: Denoising parameters
const uint2 start_point) //IN: Address of the top-left reference patch of a batch
{
//One block is processing warpSize patches (because each warp is computing distance of same warpSize patches from different displaced patches)
int tid = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
int num_warps = blockDim.x/warpSize;
//p_block denotes reference rectangle on which current cuda block is computing
uint p_rectangle_width = ((warpSize-1) * params.p) + params.k;
uint p_rectangle_start = start_point.x + blockIdx.x * warpSize * params.p;
//Shared arrays
extern __shared__ uint s_data[];
uint *s_diff = s_data; //SIZE: p_rectangle_width*num_warps
uint *s_stacks = &s_data[p_rectangle_width*num_warps]; //SIZE: params.N*num_warps*warpSize
uchar *s_patches_in_stack = (uchar*)&s_data[num_warps*(p_rectangle_width + params.N*warpSize)]; //SIZE: num_warps*warpSize
uchar *s_image_p = (uchar*)&s_patches_in_stack[num_warps*warpSize]; //SIZE: p_rectangle_width*params.k
s_diff += idx2(0, wid, p_rectangle_width);
//Initialize s_patches_in_stack to zero
s_patches_in_stack[ idx2(tid, wid, warpSize) ] = 0;
int2 p; //Address of reference patch
int2 q; //Address of patch against which the difference is computed
p.x = p_rectangle_start + (tid*params.p);
p.y = start_point.y + (blockIdx.y*params.p);
//Ensure, that the bottom most patches will be taken as reference patches regardless the p parameter.
if (p.y >= stacks_dim.y && p.y < stacks_dim.y + params.p - 1)
p.y = stacks_dim.y - 1;
else if (p.y >= stacks_dim.y) return;
//Ensure, that the right most patches will be taken as reference patches regardless the p parameter.
uint inner_p_x = tid*params.p;
if (p.x >= stacks_dim.x && p.x < stacks_dim.x + params.p - 1)
{
inner_p_x -= (p.x - (stacks_dim.x - 1));
p.x = stacks_dim.x - 1;
}
//Load reference patches needed by actual block to shared memory
for(int i = threadIdx.x; i < p_rectangle_width*params.k; i+=blockDim.x)
{
int sx = i % p_rectangle_width;
int sy = i / p_rectangle_width;
if (p_rectangle_start+sx >= image_dim.x) continue;
s_image_p[i] = image[idx2(p_rectangle_start+sx,p.y+sy,image_dim.x)];
}
__syncthreads();
//scale difference so that it can fit ushort
uint shift = (__clz(params.Tn) < 16u) ? 16u - (uint)__clz(params.Tn) : 0;
//Ensure that displaced patch coordinates (q) will be positive
int2 from;
from.y = (p.y - (int)params.n < 0) ? -p.y : -(int)params.n;
from.x = (((int)p_rectangle_start) - (int)params.n < 0) ? -((int)p_rectangle_start) : -(int)params.n;
from.x += wid;
//For each displacement (x,y) in n neighbourhood
for(int y = from.y; y <= (int)params.n; ++y)
{
q.y = p.y + y;
if (q.y >= stacks_dim.y) break;
for(int x = from.x; x <= (int)params.n; x += num_warps)
{
//Reference patch is always the most similar to itself (there is no need to copute it)
if (x == 0 && y == 0) continue;
//Each warp is computing the same patch with slightly different displacement.
//Compute distance of reference patch p from current patch q which is dispaced by (x+tid,y)
//q_block denotes displaced rectangle which is processed by the current warp
uint q_rectangle_start = p_rectangle_start + x;
q.x = q_rectangle_start + inner_p_x;
//Compute distance for each column of reference patch
for(uint i = tid; i < p_rectangle_width && p_rectangle_start+i < image_dim.x &&
q_rectangle_start+i < image_dim.x; i+=warpSize)
{
uint dist = 0;
for(uint iy = 0; iy < params.k; ++iy)
{
dist += L2p2((int)s_image_p[ idx2(i, iy, p_rectangle_width) ],
(int)image[ idx2(q_rectangle_start+i, q.y+iy, image_dim.x) ]);
}
s_diff[i] = dist;
}
if (p.x >= stacks_dim.x || q.x >= stacks_dim.x) continue;
//Sum column distances to obtain patch distance
uint diff = 0;
for (uint i = 0; i < params.k; ++i)
diff += s_diff[inner_p_x + i];
//Distance threshold
if(diff < params.Tn)
{
uint loc_y = (uint)((q.y - p.y) & 0xFF); //relative location y (-127 to 127)
uint loc_x = (uint)((q.x - p.x) & 0xFF); //relative location x (-127 to 127)
diff >>= shift;
diff <<= 16u; // [..DIFF(ushort)..|..LOC_Y(sbyte)..|..LOC_X(sbyte)..]
diff |= (loc_y << 8u);
diff |= loc_x;
//Add current patch to s_stacks
add_to_matched_image(
&s_stacks[ params.N * idx2(tid, wid, warpSize) ],
&s_patches_in_stack[ idx2(tid, wid, warpSize) ],
diff,
params
);
}
}
}
__syncthreads();
uint batch_size = gridDim.x*warpSize;
uint block_address_x = blockIdx.x*warpSize+tid;
if (wid > 0) return;
//Select N most similar patches for each reference patch from stacks in shared memory and save them to global memory
//Each thread represents one reference patch
//Each thread will find N most similar blocks in num_warps stacks (which were computed by different warps) and save them into global memory
//In shared memory the most similar patch is at the end, in global memory the order does not matter
//DEV: performance impact cca 8%
if (p.x >= stacks_dim.x) return;
int j;
for (j = 0; j < params.N; ++j)
{
uint count = 0;
uint minIdx = 0;
uint minVal = 0xFFFFFFFF; //INF
//Finds patch with minimal value of remaining
for (int i = minIdx; i < num_warps; ++i)
{
count = (uint)s_patches_in_stack[ idx2(tid, i, warpSize) ];
if (count == 0) continue;
uint newMinVal = s_stacks[ idx3(count-1,tid,i,params.N,warpSize) ];
if (newMinVal < minVal)
{
minVal = newMinVal;
minIdx = i;
}
}
if (minVal == 0xFFFFFFFF) break; //All stacks are empty
//Remove patch from shared stack
s_patches_in_stack[ idx2(tid, minIdx, warpSize) ]--;
//Adds patch to stack in global memory
g_stacks[idx3(j, block_address_x, blockIdx.y, params.N, batch_size)] = (ushort)(minVal & 0xFFFF);
}
//Save to the global memory the number of similar patches rounded to the nearest lower power of two
g_num_patches_in_stack[ idx2(block_address_x ,blockIdx.y, batch_size) ] = flp2((uint)j+1)-1;
}
extern "C" void run_block_matching(
const uchar* __restrict image, //Original image
ushort* __restrict stacks, //For each reference patch contains addresses of similar patches (patch is adressed by top left corner)
uint* __restrict num_patches_in_stack, //For each reference patch contains number of similar patches
const uint2 image_dim, //Image dimensions
const uint2 stacks_dim, //size of area where reference patches could be located
const Params params, //Denoising parameters
const uint2 start_point, //Address of the top-left reference patch of a batch
const dim3 num_threads,
const dim3 num_blocks,
const uint shared_memory_size
)
{
block_matching<<<num_blocks, num_threads,shared_memory_size>>>(
image,
stacks,
num_patches_in_stack,
image_dim,
stacks_dim,
params,
start_point
);
}
|
d2b3df36037324d57c03d9cba1e1addf090b89c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "math_functions.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
__global__ void dVdM_hiddenlayer( double *dVdinput, const double *dVdO,const double *h, const int* t_alter, const double *param, const int* int_param)
{
int neuron_id = blockIdx.x*blockDim.x + threadIdx.x;
if(neuron_id > int_param[1] - 1 ) {return;}
int last_id = blockIdx.y*blockDim.y + threadIdx.y;
if (last_id >int_param[2] - 1) {return;}
// int T_id = blockIdx.x;
// if(T_id > t_alter[last_id]-1-T_id) {return;}
//determine the id of the thread
__shared__ int T_size;
// __shared__ int T_max;//T_max must be higher than 1
__shared__ int nNeurons[2];// cur, last(need to be altered)
__shared__ int maxsteps;
__shared__ double threshold;
__shared__ double decay1;
__shared__ double decay2;
// __shared__ double V_0;
T_size = int_param[0];
// T_max = t_alter[last_id];
nNeurons[0] = int_param[1];
nNeurons[1] = int_param[2];
maxsteps = int_param[3];
// nNeurons[2] = int_param[4];
threshold = param[0];
decay1 = param[1];
decay2 = param[2];
// V_0 = param[3];
int i = t_alter[last_id] - 1;
int step = maxsteps;
//to compute all dV_(neuron_id)^(t_j)/dM(S)_(neuron_id)^(T_id) T_id < t_j <T_max
last_id = (last_id*nNeurons[0]+ neuron_id)*T_size;// int dV_startid = (last_id*nNeurons[0]+ neuron_id)*T_size;
neuron_id = neuron_id*T_size;// int h_startid = neuron_id*T_size;
double tmp = dVdO[last_id+i]*h[neuron_id+i];
double tmp2 = 0;
double dM = 1*tmp;
double dS = -1*tmp;
double dE = -1*tmp;
// double dE = -1;
while((i>=0)&&(step>=0)){
dVdinput[last_id + i] = dM+dS;
i--;
step--;
if(i<0){break;}
tmp2 = dE* h[neuron_id+i]*threshold;
tmp = dVdO[last_id+i]*h[neuron_id+i];
dM = decay1*dM+tmp;
dS = decay2*dS - tmp;
}
}
| d2b3df36037324d57c03d9cba1e1addf090b89c2.cu | #include "math_functions.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
__global__ void dVdM_hiddenlayer( double *dVdinput, const double *dVdO,const double *h, const int* t_alter, const double *param, const int* int_param)
{
int neuron_id = blockIdx.x*blockDim.x + threadIdx.x;
if(neuron_id > int_param[1] - 1 ) {return;}
int last_id = blockIdx.y*blockDim.y + threadIdx.y;
if (last_id >int_param[2] - 1) {return;}
// int T_id = blockIdx.x;
// if(T_id > t_alter[last_id]-1-T_id) {return;}
//determine the id of the thread
__shared__ int T_size;
// __shared__ int T_max;//T_max must be higher than 1
__shared__ int nNeurons[2];// cur, last(need to be altered)
__shared__ int maxsteps;
__shared__ double threshold;
__shared__ double decay1;
__shared__ double decay2;
// __shared__ double V_0;
T_size = int_param[0];
// T_max = t_alter[last_id];
nNeurons[0] = int_param[1];
nNeurons[1] = int_param[2];
maxsteps = int_param[3];
// nNeurons[2] = int_param[4];
threshold = param[0];
decay1 = param[1];
decay2 = param[2];
// V_0 = param[3];
int i = t_alter[last_id] - 1;
int step = maxsteps;
//to compute all dV_(neuron_id)^(t_j)/dM(S)_(neuron_id)^(T_id) T_id < t_j <T_max
last_id = (last_id*nNeurons[0]+ neuron_id)*T_size;// int dV_startid = (last_id*nNeurons[0]+ neuron_id)*T_size;
neuron_id = neuron_id*T_size;// int h_startid = neuron_id*T_size;
double tmp = dVdO[last_id+i]*h[neuron_id+i];
double tmp2 = 0;
double dM = 1*tmp;
double dS = -1*tmp;
double dE = -1*tmp;
// double dE = -1;
while((i>=0)&&(step>=0)){
dVdinput[last_id + i] = dM+dS;
i--;
step--;
if(i<0){break;}
tmp2 = dE* h[neuron_id+i]*threshold;
tmp = dVdO[last_id+i]*h[neuron_id+i];
dM = decay1*dM+tmp;
dS = decay2*dS - tmp;
}
}
|
0eceb977e7f8da16f62e03e7d125e87def6e01ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* Cuda kernels that does the heavy work
*/
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
extern "C" void check_cuda(hipError_t result, char const *const func, const char *const file, int const line);
__global__ void render_init_kernel(int max_x, int max_y, hiprandState_t *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
//Each thread gets same seed, a different sequence number, no offset
hiprand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
} | 0eceb977e7f8da16f62e03e7d125e87def6e01ff.cu | #include "includes.h"
/*
* Cuda kernels that does the heavy work
*/
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
extern "C" void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line);
__global__ void render_init_kernel(int max_x, int max_y, curandState *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
//Each thread gets same seed, a different sequence number, no offset
curand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
} |
5edba97140422723891236ee82f8f4647d178411.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util/cuPrintf.cu"
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <assert.h>
#include "mem.h"
#include "timer.h"
extern int debug;
__global__ void kernel(float *array, int n, int stride)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int start = index * stride;
int end = (index+1) * stride;
if (end > n)
end = n;
for (int i = start; i < end; i++) {
array[i] = sqrtf(array[i]);
}
cuPrintf("n %d stride %d my_id %d start %d end %d array[0]=%f\n", n, stride, index, start, end, array[0]);
}
void launch_kernel(int n_tblk, int nt_tblk, float *device, int n)
{
if (debug) cudaPrintfInit(); // initialize cuPrintf
{
Timer t("Kernel finished ");
hipLaunchKernelGGL(( kernel), dim3(n_tblk),dim3(nt_tblk), 0, 0, device, n, n/(n_tblk*nt_tblk));
hipDeviceSynchronize();
}
if (debug) {
// display the device's greeting
cudaPrintfDisplay();
// clean up after cuPrintf
cudaPrintfEnd();
}
}
void alloc_mem(float **host_array, float **device_array, int n)
{
hipError_t err = hipSetDeviceFlags(hipDeviceMapHost);
if (err != hipSuccess) {
printf("CUDA error: %s\n", hipGetErrorString(err));
exit(-1);
}
hipHostMalloc(host_array, n*sizeof(float));
memset(*host_array, 0, n*sizeof(float));
// hipMalloc device memory
//hipMalloc(device_array, n* sizeof(float));
assert(hipHostGetDevicePointer(device_array, *host_array, 0) == hipSuccess);
// zero out the device array with hipMemset
hipMemset(*device_array, 0, n* sizeof(float));
}
void transfer_mem(float *device, float *host, int n, bool host2dev)
{
struct timespec t0, t1;
clock_gettime(CLOCK_REALTIME, &t0);
if (host2dev)
hipMemcpy(device, host, n* sizeof(float), hipMemcpyHostToDevice);
else
hipMemcpy(host, device, n* sizeof(float), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &t1);
printf("%s Transfer took %ld usec\n", host2dev?"H->D":"D->H", TIME_DIFF(t0, t1));
}
void copy_mem(float *dst, float *src, int n)
{
struct timespec t0, t1;
clock_gettime(CLOCK_REALTIME, &t0);
hipMemcpy(dst, src, n* sizeof(float), hipMemcpyDefault);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &t1);
struct hipPointerAttribute_t attr;
assert(hipPointerGetAttributes(&attr, dst)==hipSuccess);
printf("%s Transfer took %ld usec\n", (attr.memoryType == hipMemoryTypeHost)?"H->D":"D->H", TIME_DIFF(t0, t1));
}
void free_mem(float *host, float *device)
{
hipHostFree(host);
hipFree(device);
}
| 5edba97140422723891236ee82f8f4647d178411.cu | #include "util/cuPrintf.cu"
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <assert.h>
#include "mem.h"
#include "timer.h"
extern int debug;
__global__ void kernel(float *array, int n, int stride)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int start = index * stride;
int end = (index+1) * stride;
if (end > n)
end = n;
for (int i = start; i < end; i++) {
array[i] = sqrtf(array[i]);
}
cuPrintf("n %d stride %d my_id %d start %d end %d array[0]=%f\n", n, stride, index, start, end, array[0]);
}
void launch_kernel(int n_tblk, int nt_tblk, float *device, int n)
{
if (debug) cudaPrintfInit(); // initialize cuPrintf
{
Timer t("Kernel finished ");
kernel<<<n_tblk,nt_tblk>>>(device, n, n/(n_tblk*nt_tblk));
cudaDeviceSynchronize();
}
if (debug) {
// display the device's greeting
cudaPrintfDisplay();
// clean up after cuPrintf
cudaPrintfEnd();
}
}
void alloc_mem(float **host_array, float **device_array, int n)
{
cudaError_t err = cudaSetDeviceFlags(cudaDeviceMapHost);
if (err != cudaSuccess) {
printf("CUDA error: %s\n", cudaGetErrorString(err));
exit(-1);
}
cudaMallocHost(host_array, n*sizeof(float));
memset(*host_array, 0, n*sizeof(float));
// cudaMalloc device memory
//cudaMalloc(device_array, n* sizeof(float));
assert(cudaHostGetDevicePointer(device_array, *host_array, 0) == cudaSuccess);
// zero out the device array with cudaMemset
cudaMemset(*device_array, 0, n* sizeof(float));
}
void transfer_mem(float *device, float *host, int n, bool host2dev)
{
struct timespec t0, t1;
clock_gettime(CLOCK_REALTIME, &t0);
if (host2dev)
cudaMemcpy(device, host, n* sizeof(float), cudaMemcpyHostToDevice);
else
cudaMemcpy(host, device, n* sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &t1);
printf("%s Transfer took %ld usec\n", host2dev?"H->D":"D->H", TIME_DIFF(t0, t1));
}
void copy_mem(float *dst, float *src, int n)
{
struct timespec t0, t1;
clock_gettime(CLOCK_REALTIME, &t0);
cudaMemcpy(dst, src, n* sizeof(float), cudaMemcpyDefault);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &t1);
struct cudaPointerAttributes attr;
assert(cudaPointerGetAttributes(&attr, dst)==cudaSuccess);
printf("%s Transfer took %ld usec\n", (attr.memoryType == cudaMemoryTypeHost)?"H->D":"D->H", TIME_DIFF(t0, t1));
}
void free_mem(float *host, float *device)
{
cudaFreeHost(host);
cudaFree(device);
}
|
116d83c4261db25b064713104c1f3b5ec010554e.hip | // !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes from project
// includes from CUDA
#include <hip/hip_runtime.h>
//#include <helper_math.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1 = 0;
float Value2 = 0;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Division access
if((i%2)==0){
for(unsigned k=0; k<iterations;k++) {
Value1=I1*I2;
Value3=I1*I2;
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
} | 116d83c4261db25b064713104c1f3b5ec010554e.cu | // Includes
#include <stdio.h>
#include <stdlib.h>
// includes from project
// includes from CUDA
#include <cuda_runtime.h>
//#include <helper_math.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1 = 0;
float Value2 = 0;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Division access
if((i%2)==0){
for(unsigned k=0; k<iterations;k++) {
Value1=I1*I2;
Value3=I1*I2;
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
} |
f6499aa392979b3d911bf9587c7f3c52f73a7c30.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "eye_like_impl.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void _EyeLikeKernel(
size_t offset,
size_t stripe,
T* output_data,
CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
// offset is the first elements, stripe is width + 1.
output_data[offset + id * stripe] = static_cast<T>(1);
}
template <typename T>
void EyeLikeImpl(
size_t offset,
size_t stripe,
T* output_data,
size_t diag_count) {
constexpr int block_size = 256;
int blocksPerGrid = (int)(ceil(static_cast<float>(diag_count) / block_size));
CUDA_LONG N = static_cast<CUDA_LONG>(diag_count);
hipLaunchKernelGGL(( _EyeLikeKernel), dim3(blocksPerGrid), dim3(block_size), 0, 0, offset, stripe, output_data, N);
}
#define SPECIALIZED_IMPL(T) \
template void EyeLikeImpl<T>( \
size_t offset, \
size_t stripe, \
T* output_data, \
size_t diag_count);
SPECIALIZED_IMPL(int32_t)
SPECIALIZED_IMPL(int64_t)
SPECIALIZED_IMPL(uint64_t)
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
} // namespace cuda
} // namespace onnxruntime | f6499aa392979b3d911bf9587c7f3c52f73a7c30.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "eye_like_impl.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void _EyeLikeKernel(
size_t offset,
size_t stripe,
T* output_data,
CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
// offset is the first elements, stripe is width + 1.
output_data[offset + id * stripe] = static_cast<T>(1);
}
template <typename T>
void EyeLikeImpl(
size_t offset,
size_t stripe,
T* output_data,
size_t diag_count) {
constexpr int block_size = 256;
int blocksPerGrid = (int)(ceil(static_cast<float>(diag_count) / block_size));
CUDA_LONG N = static_cast<CUDA_LONG>(diag_count);
_EyeLikeKernel<<<blocksPerGrid, block_size, 0>>>(offset, stripe, output_data, N);
}
#define SPECIALIZED_IMPL(T) \
template void EyeLikeImpl<T>( \
size_t offset, \
size_t stripe, \
T* output_data, \
size_t diag_count);
SPECIALIZED_IMPL(int32_t)
SPECIALIZED_IMPL(int64_t)
SPECIALIZED_IMPL(uint64_t)
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
} // namespace cuda
} // namespace onnxruntime |
5b4787eba15680281d8302dd2c19667e52f22e6c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//xfail:NOT_ALL_VERIFIED
//--blockDim=16 --gridDim=16 --no-inline
//a = 12
//b = 36
//c = 48
__global__ void example(unsigned a, unsigned b, unsigned c) {
__requires(a == 12);
__requires(b == 36);
__assert(a + b != c);
}
| 5b4787eba15680281d8302dd2c19667e52f22e6c.cu | //xfail:NOT_ALL_VERIFIED
//--blockDim=16 --gridDim=16 --no-inline
//a = 12
//b = 36
//c = 48
__global__ void example(unsigned a, unsigned b, unsigned c) {
__requires(a == 12);
__requires(b == 36);
__assert(a + b != c);
}
|
f9c961fcd4d381e763c27b0086be530124bf5eec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda.h>
#include<stdio.h>
#include<math.h>
#define TILEWIDTH 32
__global__
void vecConvKernel(float* A, float* B, float* C, int n){
//identify the index of the data to be read
int tx=threadIdx.x;
int bx=blockIdx.x;
int index=bx*blockDim.x+tx;
__shared__ float Ads[TILEWIDTH];
__shared__ float Bds[2*TILEWIDTH];
//assuming n is multiple of TILEWIDTH
// if(index<n){
int i; float val=0.0;
for(i=0;i<gridDim.x-1;i++){
Ads[tx] = A[i*TILEWIDTH+tx];
Bds[tx] = B[i*TILEWIDTH+tx];
Bds[TILEWIDTH + tx] = B[(i+1)*TILEWIDTH + tx];
__syncthreads();
for(int k=0;k<TILEWIDTH;k++){
val+= Ads[k]*Bds[tx+k];
}
__syncthreads();
}
Ads[tx] = A[i*TILEWIDTH + tx];
Bds[tx] = B[i*TILEWIDTH+tx];
Bds[TILEWIDTH + tx] = B[tx];
__syncthreads();
for(int k=0;k<TILEWIDTH;k++){
val+= Ads[k]*Bds[tx+k];
}
__syncthreads();
C[index] = val;
// }
}
__host__
void vecConv(float* A,float* B,float* C, int n){
int c=ceil(n/256.0);
int size = n * sizeof(float);
float *d_A, *d_B, *d_C;
//Allocate device memory for A,B,C
hipMalloc((void**)&d_A, size);
hipMalloc((void**)&d_B, size);
hipMalloc((void**)&d_C, size);
//copy A,B to device memory
hipMemcpy(d_A, A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, size, hipMemcpyHostToDevice);
dim3 dimBlock(TILEWIDTH,1,1);
dim3 dimGrid(ceil(n/(float)TILEWIDTH),1,1);
//call kernal function that the calculates sum and stores it in C
hipLaunchKernelGGL(( vecConvKernel), dim3(dimGrid),dim3(dimBlock) , 0, 0, d_A,d_B,d_C,n);
//the y and z dimensions are set to 1 by default
//copy C from devce memory
hipMemcpy( C,d_C, size, hipMemcpyDeviceToHost);
//free device memories
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
//Kernal function that runs in each thread
int main(){
float *A,*B,*C;
int n=10;
A=(float*)malloc(n*sizeof(float));
B=(float*)malloc(n*sizeof(float));
C=(float*)malloc(n*sizeof(float));
int i;
for(i=0;i<n;i++){
A[i]=(float)i;
B[i]=(float)2*i;
}
vecConv(A,B,C,n);
for(i=0;i<n;i++){
printf("%f ",C[i]);
}
free(A);
free(B);
free(C);
return 0;
}
| f9c961fcd4d381e763c27b0086be530124bf5eec.cu | #include<cuda.h>
#include<stdio.h>
#include<math.h>
#define TILEWIDTH 32
__global__
void vecConvKernel(float* A, float* B, float* C, int n){
//identify the index of the data to be read
int tx=threadIdx.x;
int bx=blockIdx.x;
int index=bx*blockDim.x+tx;
__shared__ float Ads[TILEWIDTH];
__shared__ float Bds[2*TILEWIDTH];
//assuming n is multiple of TILEWIDTH
// if(index<n){
int i; float val=0.0;
for(i=0;i<gridDim.x-1;i++){
Ads[tx] = A[i*TILEWIDTH+tx];
Bds[tx] = B[i*TILEWIDTH+tx];
Bds[TILEWIDTH + tx] = B[(i+1)*TILEWIDTH + tx];
__syncthreads();
for(int k=0;k<TILEWIDTH;k++){
val+= Ads[k]*Bds[tx+k];
}
__syncthreads();
}
Ads[tx] = A[i*TILEWIDTH + tx];
Bds[tx] = B[i*TILEWIDTH+tx];
Bds[TILEWIDTH + tx] = B[tx];
__syncthreads();
for(int k=0;k<TILEWIDTH;k++){
val+= Ads[k]*Bds[tx+k];
}
__syncthreads();
C[index] = val;
// }
}
__host__
void vecConv(float* A,float* B,float* C, int n){
int c=ceil(n/256.0);
int size = n * sizeof(float);
float *d_A, *d_B, *d_C;
//Allocate device memory for A,B,C
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
//copy A,B to device memory
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
dim3 dimBlock(TILEWIDTH,1,1);
dim3 dimGrid(ceil(n/(float)TILEWIDTH),1,1);
//call kernal function that the calculates sum and stores it in C
vecConvKernel<<< dimGrid,dimBlock >>>(d_A,d_B,d_C,n);
//the y and z dimensions are set to 1 by default
//copy C from devce memory
cudaMemcpy( C,d_C, size, cudaMemcpyDeviceToHost);
//free device memories
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
//Kernal function that runs in each thread
int main(){
float *A,*B,*C;
int n=10;
A=(float*)malloc(n*sizeof(float));
B=(float*)malloc(n*sizeof(float));
C=(float*)malloc(n*sizeof(float));
int i;
for(i=0;i<n;i++){
A[i]=(float)i;
B[i]=(float)2*i;
}
vecConv(A,B,C,n);
for(i=0;i<n;i++){
printf("%f ",C[i]);
}
free(A);
free(B);
free(C);
return 0;
}
|
e65ff4e588ad4d9c051492c47d5a5fe4bcdc8dec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void colorDistDiff_kernel(uchar4 *out_image, const float *disparity, int disparity_pitch, const float *disparity_prior, int width, int height, float f, float b, float ox, float oy, float dist_thres) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
int ind = y * width + x;
uchar4 temp = out_image[ind];
float disp = *((float *)((char *)disparity + y * disparity_pitch) + x);
float disp_model = disparity_prior[ind];
// 3D reconstruct and measure Euclidian distance
float xt = __fdividef((x - ox), f);
float yt = -__fdividef((y - oy), f); // coord. transform
float Zm = -(f * b) / disp_model;
float Xm = xt * Zm;
float Ym = yt * Zm;
float Zd = -(f * b) / disp;
float Xd = xt * Zd;
float Yd = yt * Zd;
float d_md = sqrtf((Xm - Xd) * (Xm - Xd) + (Ym - Yd) * (Ym - Yd) +
(Zm - Zd) * (Zm - Zd));
bool color = (d_md > dist_thres) | (isfinite(disp) & ~isfinite(disp_model));
if (color) { // color
temp.x *= 0.5f;
temp.y *= 0.5f;
}
out_image[ind] = temp;
}
} | e65ff4e588ad4d9c051492c47d5a5fe4bcdc8dec.cu | #include "includes.h"
__global__ void colorDistDiff_kernel(uchar4 *out_image, const float *disparity, int disparity_pitch, const float *disparity_prior, int width, int height, float f, float b, float ox, float oy, float dist_thres) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
int ind = y * width + x;
uchar4 temp = out_image[ind];
float disp = *((float *)((char *)disparity + y * disparity_pitch) + x);
float disp_model = disparity_prior[ind];
// 3D reconstruct and measure Euclidian distance
float xt = __fdividef((x - ox), f);
float yt = -__fdividef((y - oy), f); // coord. transform
float Zm = -(f * b) / disp_model;
float Xm = xt * Zm;
float Ym = yt * Zm;
float Zd = -(f * b) / disp;
float Xd = xt * Zd;
float Yd = yt * Zd;
float d_md = sqrtf((Xm - Xd) * (Xm - Xd) + (Ym - Yd) * (Ym - Yd) +
(Zm - Zd) * (Zm - Zd));
bool color = (d_md > dist_thres) | (isfinite(disp) & ~isfinite(disp_model));
if (color) { // color
temp.x *= 0.5f;
temp.y *= 0.5f;
}
out_image[ind] = temp;
}
} |
5371107c0face92cacfc8e17d685d7818336c366.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Modifications Copyright 2017 H2O.ai, Inc.
*/
#include "solver/glm.h"
#include <stdio.h>
#include <stdlib.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include <algorithm>
#include <limits>
#include <deque>
#include "cml/cml_blas.cuh"
#include "cml/cml_vector.cuh"
#include "interface_defs.h"
#include "matrix/matrix.h"
#include "matrix/matrix_dense.h"
#include "matrix/matrix_sparse.h"
#include "projector/projector.h"
#include "projector/projector_direct.h"
#include "projector/projector_cgls.h"
#include "util.h"
#include "cuda_utils.h"
#include "timer.h"
//#include "kmeans.h"
typedef struct {
double* sendBuff;
double* recvBuff;
int size;
hipStream_t stream;
} PerThreadData;
#define __HBAR__ \
"----------------------------------------------------------------------------\n"
namespace h2o4gpu {
namespace {
template<typename T, typename Op>
struct ApplyOp: thrust::binary_function<FunctionObj<T>, FunctionObj<T>, T> {
Op binary_op;
ApplyOp(Op binary_op) :
binary_op(binary_op) {
}
__host__ __device__ FunctionObj<T> operator()(FunctionObj<T> &h, T x) {
h.a = binary_op(h.a, x);
h.d = binary_op(h.d, x);
h.e = binary_op(binary_op(h.e, x), x);
return h;
}
};
} // namespace
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::H2O4GPU(int sharedA, int me, int wDev, const M &A) :
_A(sharedA, me, wDev, A), _P(wDev, _A), _z(0), _zt(0), _rho(
static_cast<T>(kRhoInit)), _done_init(false), _x(0), _y(0), _mu(
0), _lambda(0), _optval(static_cast<T>(0.)), _time(
static_cast<T>(0.)), _trainPreds(0), _validPreds(0), _xp(0), _trainPredsp(
0), _validPredsp(0), _trainerror(0), _validerror(0), _trainmean(
0), _validmean(0), _trainstddev(0), _validstddev(0), _final_iter(
0), _abs_tol(static_cast<T>(kAbsTol)), _rel_tol(
static_cast<T>(kRelTol)), _max_iter(kMaxIter), _stop_early(1), _stop_early_error_fraction(
1.0), _init_iter(kInitIter), _verbose(kVerbose), _adaptive_rho(
kAdaptiveRho), _equil(kEquil), _gap_stop(kGapStop), _init_x(
false), _init_lambda(false), _nDev(1), //FIXME - allow larger comm groups
_wDev(wDev)
#ifdef USE_NCCL2
,_comms(0)
#endif
{
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
_x = new T[_A.Cols()]();
_y = new T[_A.Rows()]();
_mu = new T[_A.Cols()]();
_lambda = new T[_A.Rows()]();
_trainPreds = new T[_A.Rows()]();
_validPreds = new T[_A.ValidRows()]();
}
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::H2O4GPU(const M &A) :
_A(A._sharedA, A._me, A._wDev, A), _P(_A._wDev, _A), _z(0), _zt(0), _rho(
static_cast<T>(kRhoInit)), _done_init(false), _x(0), _y(0), _mu(
0), _lambda(0), _optval(static_cast<T>(0.)), _time(
static_cast<T>(0.)), _trainPreds(0), _validPreds(0), _xp(0), _trainPredsp(
0), _validPredsp(0), _trainerror(0), _validerror(0), _trainmean(
0), _validmean(0), _trainstddev(0), _validstddev(0), _final_iter(
0), _abs_tol(static_cast<T>(kAbsTol)), _rel_tol(
static_cast<T>(kRelTol)), _max_iter(kMaxIter), _stop_early(1), _stop_early_error_fraction(
1.0), _init_iter(kInitIter), _verbose(kVerbose), _adaptive_rho(
kAdaptiveRho), _equil(kEquil), _gap_stop(kGapStop), _init_x(
false), _init_lambda(false), _nDev(1), //FIXME - allow larger comm groups
_wDev(_A._wDev)
#ifdef USE_NCCL2
,comms(0)
#endif
{
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
_x = new T[_A.Cols()]();
_y = new T[_A.Rows()]();
_mu = new T[_A.Cols()]();
_lambda = new T[_A.Rows()]();
_trainPreds = new T[_A.Rows()]();
_validPreds = new T[_A.ValidRows()]();
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::_Init() {
DEBUG_EXPECT(!_done_init);
if (_done_init)
return 1;
_done_init = true;
CUDACHECK(hipSetDevice(_wDev));
#ifdef DEBUG
// get device ID
int devID;
CUDACHECK(hipGetDevice(&devID));
hipDeviceProp_t props;
// get device properties
CUDACHECK(hipGetDeviceProperties(&props, devID));
#endif
#ifdef USE_NCCL2
for (int i = 0; i < _nDev; i++) {
if(i==0 && i==_nDev-1) i=_wDev; // force to chosen device
hipDeviceProp_t props;
CUDACHECK(hipGetDeviceProperties(&props, i));
CUDACHECK(hipSetDevice(i));
// CUDACHECK(hipSetDeviceFlags(hipDeviceMapHost)); // TODO: MapHostMemory
printf("Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,i); fflush(stdout);
}
// initialize nccl
std::vector<int> dList(_nDev);
for (int i = 0; i < _nDev; ++i)
dList[i] = i % nVis;
ncclComm_t* _comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*_nDev);
NCCLCHECK(ncclCommInitAll(_comms, _nDev, dList.data()));// initialize communicator (One communicator per process)
printf("# NCCL: Using devices\n");
for (int g = 0; g < _nDev; ++g) {
int cudaDev;
int rank;
hipDeviceProp_t prop;
NCCLCHECK(ncclCommCuDevice(_comms[g], &cudaDev));
NCCLCHECK(ncclCommUserRank(_comms[g], &rank));
CUDACHECK(hipGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name); fflush(stdout);
}
#endif
PUSH_RANGE("Malloc",Malloc,1);
double t0 = timer<double>();
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
hipMalloc(&_z, (m + n) * sizeof(T));
hipMemset(_z, 0, (m + n) * sizeof(T));
hipMalloc(&_zt, (m + n) * sizeof(T));
hipMemset(_zt, 0, (m + n) * sizeof(T));
// local (i.e. GPU) values for _x and training predictions (i.e. predicted y from Atrain*_x)
hipMalloc(&_xp, (n) * sizeof(T));
hipMalloc(&_trainPredsp, (m) * sizeof(T));
hipMalloc(&_validPredsp, (mvalid) * sizeof(T));
hipMemset(_xp, 0, (n) * sizeof(T));
hipMemset(_trainPredsp, 0, (m) * sizeof(T));
hipMemset(_validPredsp, 0, (mvalid) * sizeof(T));
CUDA_CHECK_ERR();
_A.Init();
POP_RANGE("Malloc",Malloc,1);
PUSH_RANGE("Eq",Eq,1);
_A.Equil(_equil);
POP_RANGE("Eq",Eq,1);
// PUSH_RANGE("Init1",Init1,1);
_P.Init();
CUDA_CHECK_ERR();
// POP_RANGE("Init1",Init1,1);
#ifdef DEBUG
printf("Time to allocate data structures: %f\n", timer<double>() - t0);
#endif
return 0;
}
template<typename T, typename M, typename P>
H2O4GPUStatus H2O4GPU<T, M, P>::Solve(const std::vector<FunctionObj<T> > &f,
const std::vector<FunctionObj<T> > &g) {
// PUSH_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
// Initialize Projector P and Matrix A.
if (!_done_init) {
// PUSH_RANGE("Init2",Init2,1);
_Init();
// POP_RANGE("Init2",Init2,1);
}
CUDACHECK(hipSetDevice(_wDev));
double t0 = timer<double>();
// TODO: Constants are set arbitrarily based upon limited experiments in academic papers
// Constants for adaptive-rho and over-relaxation.
const T kDeltaMin = static_cast<T>(1.05); // for adaptive rho and rescaling
const T kGamma = static_cast<T>(1.01); // for adaptive rho and rescaling
const T kTau = static_cast<T>(0.8); // for adaptive rho and rescaling
const T kAlpha = static_cast<T>(1.7); // set to 1.0 to disable over-relaxation technique, normally 1.5-1.8 and was set to 1.7
const T kKappa = static_cast<T>(0.9); // for adaptive rho and rescaling
const T kOne = static_cast<T>(1.0); // definition
const T kZero = static_cast<T>(0.0); // definition
const T kProjTolMax = static_cast<T>(1e-6); // Projection tolerance
const T kProjTolMin = static_cast<T>(1e-2); // Projection tolerance
const T kProjTolPow = static_cast<T>(1.3); // Projection tolerance
const T kProjTolIni = static_cast<T>(1e-5); // Projection tolerance
const bool use_exact_stop = true; // false does worse in trainerror and maximum number of iterations with simple.R
// fprintf(stderr,"solve _data=%p\n",_A._data); fflush(stderr);
// fprintf(stderr,"solve _datay=%p\n",_A._datay); fflush(stderr);
// Notes on variable names:
//
// Original Boyd ADMM paper solves:
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf
// Minimize: f(x) + g(z)
// Subject to: Ax + Bz = c
// Primary variable: x
// Dual variable: z
// Step size: \rho
// Where for Lasso: f(x) = (1/2)||x-b||_2^2 and g(z) = \lambda||z||_1 with constraint x=Az
//
// H2O4GPU paper and code:
// http://foges.github.io/h2o4gpu/ and http://stanford.edu/~boyd/papers/h2o4gpu.html
// Minimize: f(y) + g(x) for a variety (but limited set) of f and g shown in src/include/prox_lib.h
// Subject to: y = Ax (always)
// Where for Lasso: f(y) = (1/2)||y-b||_2^2 and g(x) = \lambda||x||_1 and constraint is y=Ax
// Primary variable: y
// Dual variable: x
// Step size or Proximal parameter: \rho
// Intermediate variable: z
// Internally h2o4gpu code uses \mu and \nu scaled variables, performs pre-conditioning using e and d.
// \lambda_{max} = ||A^T b|| makes sense if have (1/2) in front of f(y) for Lasso
//
// H2O4GPU overall steps:
// 1) Precondition A using d and e and renormalize variables and all equations using d and e
// 2) Compute Gramian: A^T A only once
// 3) Cholesky of gram: Only compute cholesky once -- s and info->s in Project just kOne=1 and just ensure GPU has cholesky already. Could have put into Init with Gramian)
// 4) Project: Solve L L^T x = b for x by forward and backward solve (Ly=b for y and then y=L^T x for x)
// 5) Repeat #4, until convergence from primary (min Ax-b) and dual (min f(y)+g(x)) residuals
// Extract values from h2o4gpu_data
PUSH_RANGE("H2O4GPUExtract",H2O4GPUExtract,3);
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
thrust::device_vector<FunctionObj<T> > f_gpu = f;
thrust::device_vector<FunctionObj<T> > g_gpu = g;
// TODO: Need to give scale to these
// const T kRhoMin = static_cast<T>(1e-4); // lower range for adaptive rho
// const T kRhoMax = static_cast<T>(1e4); // upper range for adaptive rho
const T kRhoMin = static_cast<T>(std::numeric_limits<T>::epsilon()); // lower range for adaptive rho
const T kRhoMax = static_cast<T>(1.0 / kRhoMin); // upper range for adaptive rho
POP_RANGE("H2O4GPUExtract",H2O4GPUExtract,3);
PUSH_RANGE("H2O4GPUAlloc",H2O4GPUAlloc,4);
// Create cuBLAS handle.
hipblasHandle_t hdl;
hipblasCreate(&hdl);
CUDA_CHECK_ERR();
// Allocate data for ADMM variables.
cml::vector<T> de = cml::vector_view_array(_A._de, m + n);
cml::vector<T> z = cml::vector_view_array(_z, m + n);
cml::vector<T> zt = cml::vector_view_array(_zt, m + n);
cml::vector<T> zprev = cml::vector_calloc<T>(m + n);
cml::vector<T> ztemp = cml::vector_calloc<T>(m + n);
cml::vector<T> z12 = cml::vector_calloc<T>(m + n);
CUDA_CHECK_ERR();
// Create views for x and y components (same memory space used, not value copy)
cml::vector<T> d = cml::vector_subvector(&de, 0, m);
cml::vector<T> e = cml::vector_subvector(&de, m, n);
cml::vector<T> x = cml::vector_subvector(&z, 0, n);
cml::vector<T> y = cml::vector_subvector(&z, n, m);
cml::vector<T> x12 = cml::vector_subvector(&z12, 0, n);
cml::vector<T> y12 = cml::vector_subvector(&z12, n, m);
cml::vector<T> xprev = cml::vector_subvector(&zprev, 0, n);
cml::vector<T> yprev = cml::vector_subvector(&zprev, n, m);
cml::vector<T> xtemp = cml::vector_subvector(&ztemp, 0, n);
cml::vector<T> ytemp = cml::vector_subvector(&ztemp, n, m);
CUDA_CHECK_ERR(); POP_RANGE("H2O4GPUAlloc",H2O4GPUAlloc,4);
PUSH_RANGE("H2O4GPUScale",H2O4GPUScale,5);
// Scale f and g to account for diagonal scaling e and d.
// f/d -> f
thrust::transform(f_gpu.begin(), f_gpu.end(),
thrust::device_pointer_cast(d.data), f_gpu.begin(),
ApplyOp<T, thrust::divides<T> >(thrust::divides<T>()));
// g*e -> g
thrust::transform(g_gpu.begin(), g_gpu.end(),
thrust::device_pointer_cast(e.data), g_gpu.begin(),
ApplyOp<T, thrust::multiplies<T> >(thrust::multiplies<T>()));
CUDA_CHECK_ERR(); POP_RANGE("H2O4GPUScale",H2O4GPUScale,5);
PUSH_RANGE("Lambda",Lambda,6);
// Initialize (x, lambda) from (x0, lambda0).
if (_init_x) {
cml::vector_memcpy(&xtemp, _x); // _x->xtemp
cml::vector_div(&xtemp, &e); // xtemp/e -> xtemp
_A.Mul('n', kOne, xtemp.data, kZero, ytemp.data); // kOne*A*x + kZero*y -> y
wrapcudaDeviceSynchronize(); // not needed, as vector_memory is cuda call and will follow sequentially on device
cml::vector_memcpy(&z, &ztemp); // ztemp->z (xtemp and ytemp are views of ztemp)
CUDA_CHECK_ERR();
}
if (_init_lambda) {
cml::vector_memcpy(&ytemp, _lambda); // _lambda->ytemp
cml::vector_div(&ytemp, &d); // ytemp/d -> ytemp
_A.Mul('t', -kOne, ytemp.data, kZero, xtemp.data); // -kOne*y+kZero*x -> x
wrapcudaDeviceSynchronize(); // not needed, as vector_memory is cuda call and will follow sequentially on device
if (_rho != 0)
cml::blas_scal(hdl, -kOne / _rho, &ztemp); // ztemp = ztemp * (-kOne/_rho)
else
cml::blas_scal(hdl, kZero, &ztemp); // ztemp = ztemp * (-kOne/_rho)
cml::vector_memcpy(&zt, &ztemp); // ztemp->zt
CUDA_CHECK_ERR();
} POP_RANGE("Lambda",Lambda,6);
PUSH_RANGE("Guess",Guess,7);
// Make an initial guess for (x0 or lambda0).
if (_init_x && !_init_lambda) {
// Alternating projections to satisfy
// 1. \lambda \in \partial f(y), \mu \in \partial g(x)
// 2. \mu = -A^T\lambda
cml::vector_set_all(&zprev, kZero); // zprev = kZero
for (unsigned int i = 0; i < kInitIter; ++i) {
#ifdef USE_NVTX
char mystring[100];
sprintf(mystring,"GStep%d",i);
PUSH_RANGE(mystring,GStep,1);
#endif
ProjSubgradEval(g_gpu, xprev.data, x.data, xtemp.data);
ProjSubgradEval(f_gpu, yprev.data, y.data, ytemp.data);
_P.Project(xtemp.data, ytemp.data, kOne, xprev.data, yprev.data,
kProjTolIni);
wrapcudaDeviceSynchronize(); // not needed, as blas's are cuda call and will follow sequentially on device
CUDA_CHECK_ERR();
cml::blas_axpy(hdl, -kOne, &ztemp, &zprev); // alpha*X + Y -> Y
cml::blas_scal(hdl, -kOne, &zprev);
#ifdef USE_NVTX
POP_RANGE(mystring,GStep,1);
#endif
}
// xt = -1 / \rho * \mu, yt = -1 / \rho * \lambda.
cml::vector_memcpy(&zt, &zprev); // zprev->zt
if (_rho != 0)
cml::blas_scal(hdl, -kOne / _rho, &zt);
else
cml::blas_scal(hdl, kZero, &zt);
} else if (_init_lambda && !_init_x) {
ASSERT(false);
}
_init_x = _init_lambda = false;
POP_RANGE("Guess",Guess,7);
// Save initialization time.
double time_init = timer<double>() - t0;
#ifdef DEBUG
printf("Time to initialize: %f\n", time_init);
#endif
// Signal start of execution.
if (_verbose > 0) {
#pragma omp critical
{
printMe(std::cout, f[1].a, f[1].b, f[1].c, f[1].d, f[1].e, g[1].a,
g[1].b, g[1].c, g[1].d, g[1].e); //debugging only: print the second since the first can be for intercept (which is then 0)
//printData(std::cout); //only works for data in host memory!
}
}
if (_verbose > 1) {
Printf(
__HBAR__
" Iter | pri res | pri tol | dua res | dua tol | gap | eps gap |"
" pri obj\n" __HBAR__);
}
// Initialize scalars.
T sqrtn_atol = std::sqrt(static_cast<T>(n)) * _abs_tol;
T sqrtm_atol = std::sqrt(static_cast<T>(m)) * _abs_tol;
T sqrtmn_atol = std::sqrt(static_cast<T>(m + n)) * _abs_tol;
T delta = kDeltaMin, xi = static_cast<T>(1.0);
unsigned int k = 0u, kd = 0u, ku = 0u;
bool converged = false;
T nrm_r, nrm_s, gap, eps_gap, eps_pri, eps_dua;
// Stop early setup
unsigned int QUEUELENGTH = 10;
std::deque<T> nrm_r_deque;
std::deque<T> nrm_s_deque;
std::deque<T> nrm_r_avg;
std::deque<T> nrm_s_avg;
std::deque<T> nrm_r_error;
std::deque<T> nrm_s_error;
// LOOP until satisfy convergence criteria
for (;; ++k) {
#ifdef USE_NVTX
char mystring[100];
sprintf(mystring,"Step%d",k);
PUSH_RANGE(mystring,Step,1);
#endif
cml::vector_memcpy(&zprev, &z);
// Evaluate Proximal Operators g and f based upon chosen problem setup
PUSH_RANGE("Evaluate_fg",Evaluate_fg,9);
cml::blas_axpy(hdl, -kOne, &zt, &z); // -kOne*zt+z -> z
ProxEval(g_gpu, _rho, x.data, x12.data); // Evaluate g(rho,x)->x12 (x^{1/2} in paper)
ProxEval(f_gpu, _rho, y.data, y12.data); // Evaluate f(rho,y)->y12 (y^{1/2} in paper)
CUDA_CHECK_ERR(); POP_RANGE("Evaluate_fg",Evaluate_fg,9);
// Compute gap, optval, and tolerances.
PUSH_RANGE("gapoptvaltol",gapoptvaltol,9);
cml::blas_axpy(hdl, -kOne, &z12, &z); // -kOne*z12+z->z
cml::blas_dot(hdl, &z, &z12, &gap); // z*z12 -> gap
gap = std::abs(gap); // |gap| -> gap
eps_gap = sqrtmn_atol
+ _rel_tol * cml::blas_nrm2(hdl, &z)
* cml::blas_nrm2(hdl, &z12);
eps_pri = sqrtm_atol + _rel_tol * cml::blas_nrm2(hdl, &y12);
eps_dua = _rho * (sqrtn_atol + _rel_tol * cml::blas_nrm2(hdl, &x));
CUDA_CHECK_ERR(); POP_RANGE("gapoptvaltol",gapoptvaltol,9);
DEBUG_FPRINTF(stderr, "DEBUG1: %g %g\n", sqrtm_atol,
cml::blas_nrm2(hdl, &y12));
// Apply over relaxation (optional, can set kAlpha to 1, above, to disable)
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf S3.4.3
PUSH_RANGE("orelax",orelax,9);
cml::vector_memcpy(&ztemp, &zt);
cml::blas_axpy(hdl, kAlpha, &z12, &ztemp);
cml::blas_axpy(hdl, kOne - kAlpha, &zprev, &ztemp);
CUDA_CHECK_ERR(); POP_RANGE("orelax",orelax,9);
// Project onto y = Ax.
PUSH_RANGE("project",project,9);
T proj_tol = kProjTolMin / ::pow(static_cast<T>(k + 1), kProjTolPow);
proj_tol = ::max(proj_tol, kProjTolMax);
// (x^{k+1},y^{k+1}) := Project(x^{k+1/2}+\tilde{x}^k , y^{k+1/2}+\tilde{y}^k)
// xtemp.data: \tilde{x}^k
// ytemp.data: \tilde{y}^k
// x.data: x^{k+1/2}
// y.data: y^{k+1/2}
_P.Project(xtemp.data, ytemp.data, kOne, x.data, y.data, proj_tol);
//hipDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
CUDA_CHECK_ERR(); POP_RANGE("project",project,9);
// Calculate residuals nrm_s (dual residual) and nrm_r (primary residual)
PUSH_RANGE("resid",resid,9);
cml::vector_memcpy(&ztemp, &zprev);
cml::blas_axpy(hdl, -kOne, &z, &ztemp); // -1*z + ztemp -> ztemp
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_s = _rho * cml::blas_nrm2(hdl, &ztemp);
cml::vector_memcpy(&ztemp, &z12); // z12 has both x^{k+1/2} and y^{k+1/2}
cml::blas_axpy(hdl, -kOne, &z, &ztemp); // -1*z + ztemp -> ztemp (i.e. -x^k + x^{k+1/2} -> xtemp and -y^k + y^{k+1/2} -> ytemp)
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_r = cml::blas_nrm2(hdl, &ztemp);
// Calculate exact residuals only if necessary.
bool exact = false;
if ((nrm_r < eps_pri && nrm_s < eps_dua) || use_exact_stop) {
cml::vector_memcpy(&ztemp, &z12);
_A.Mul('n', kOne, x12.data, -kOne, ytemp.data);
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_r = cml::blas_nrm2(hdl, &ytemp);
if ((nrm_r < eps_pri) || use_exact_stop) {
cml::vector_memcpy(&ztemp, &z12);
cml::blas_axpy(hdl, kOne, &zt, &ztemp);
cml::blas_axpy(hdl, -kOne, &zprev, &ztemp);
_A.Mul('t', kOne, ytemp.data, kOne, xtemp.data);
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_s = _rho * cml::blas_nrm2(hdl, &xtemp);
exact = true;
}
} CUDA_CHECK_ERR(); POP_RANGE("resid",resid,9);
bool stopearly = false;
if (_stop_early) {
// STOP EARLY CHECK
nrm_r_deque.push_back(nrm_r);
nrm_s_deque.push_back(nrm_s);
nrm_r_avg.push_back(
std::accumulate(nrm_r_deque.begin(), nrm_r_deque.end(), 0.0)
/ static_cast<T>(nrm_r_deque.size()));
nrm_s_avg.push_back(
std::accumulate(nrm_s_deque.begin(), nrm_s_deque.end(), 0.0)
/ static_cast<T>(nrm_s_deque.size()));
if (nrm_r_deque.size() >= QUEUELENGTH
&& nrm_r_avg.size() >= QUEUELENGTH) {
T errorlocal_r = 0;
T errorlocal_s = 0;
for (unsigned int ii = 0; ii < QUEUELENGTH; ii++) {
errorlocal_r += std::abs(nrm_r_avg[ii] - nrm_r_deque[ii]);
errorlocal_s += std::abs(nrm_s_avg[ii] - nrm_s_deque[ii]);
}
nrm_r_error.push_back(errorlocal_r / static_cast<T>(QUEUELENGTH));
nrm_s_error.push_back(errorlocal_s / static_cast<T>(QUEUELENGTH));
}
if (k > QUEUELENGTH && nrm_r_deque.size() >= QUEUELENGTH
&& nrm_r_avg.size() >= QUEUELENGTH
&& nrm_s_deque.size() >= QUEUELENGTH
&& nrm_s_avg.size() >= QUEUELENGTH && nrm_r_error.size() >= 1
&& nrm_s_error.size() >= 1
&& std::abs(nrm_r_avg.back() - nrm_r_avg.front())
< nrm_r_error.back()
&& std::abs(nrm_s_avg.back() - nrm_s_avg.front())
< nrm_s_error.back()) {
if(_verbose > 2){
Printf("Stopped Early at iteration=%d: %g %g %g : %g %g %g\n",
k, nrm_r_avg.back(), nrm_r_avg.front(),
nrm_r_error.back(), nrm_s_avg.back(), nrm_s_avg.front(),
nrm_s_error.back());
fflush(stdout);
}
stopearly = true;
}
if (nrm_r_deque.size() >= QUEUELENGTH) {
nrm_r_deque.pop_front();
}
if (nrm_s_deque.size() >= QUEUELENGTH) {
nrm_s_deque.pop_front();
}
if (nrm_r_avg.size() >= QUEUELENGTH) {
nrm_r_avg.pop_front();
}
if (nrm_s_avg.size() >= QUEUELENGTH) {
nrm_s_avg.pop_front();
}
if (nrm_r_error.size() >= QUEUELENGTH) {
nrm_r_error.pop_front();
}
if (nrm_s_error.size() >= QUEUELENGTH) {
nrm_s_error.pop_front();
}
}
// Evaluate stopping criteria.
converged = stopearly
|| (exact && nrm_r < eps_pri && nrm_s < eps_dua
&& (!_gap_stop || gap < eps_gap));
if ((_verbose > 3 && k % 1 == 0) || (_verbose > 2 && k % 10 == 0)
|| (_verbose > 1 && k % 100 == 0)
|| (_verbose > 1 && converged)) {
T optval = FuncEval(f_gpu, y12.data) + FuncEval(g_gpu, x12.data);
Printf("%5d : %.2e <? %.2e %.2e <? %.2e %.2e <? %.2e % .2e\n",
k, nrm_r, eps_pri, nrm_s, eps_dua, gap, eps_gap, optval);
fflush(stdout);
}
// Break if converged or there are nans
if (converged || k == _max_iter - 1) { // || cml::vector_any_isnan(&zt))
_final_iter = k;
#ifdef USE_NVTX
POP_RANGE(mystring,Step,1); // pop at end of loop iteration
#endif
break;
}
// Update dual variable.
PUSH_RANGE("update",update,9);
cml::blas_axpy(hdl, kAlpha, &z12, &zt);
cml::blas_axpy(hdl, kOne - kAlpha, &zprev, &zt);
cml::blas_axpy(hdl, -kOne, &z, &zt);
CUDA_CHECK_ERR(); POP_RANGE("update",update,9);
// Adaptive rho (optional)
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf S3.4.1
// http://www.cs.umd.edu/sites/default/files/scholarly_papers/ZhengXu.pdf or https://arxiv.org/abs/1605.07246
// choose: 1 = H2O4GPU Boyd method
// choose: 2 = Original Boyd method of balancing residuals
// choose: 3 = Spectral method by Zheng et al. 2015
int whichadap = 1;
if (_adaptive_rho && _rho != 0) {
PUSH_RANGE("adaprho",adaprho,9);
if (whichadap == 1) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(kd)) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
ku = k;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(ku)) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
kd = k;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else if (nrm_s < xi * eps_dua && nrm_r < xi * eps_pri) {
xi *= kKappa;
} else {
delta = kDeltaMin;
} CUDA_CHECK_ERR();
} // end adaptive_rho==1
else if (whichadap == 2) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else {
delta = kDeltaMin;
}CUDA_CHECK_ERR();
} // end adaptive_rho==2
else if (whichadap == 3) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(kd)) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
ku = k;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(ku)) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
kd = k;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else if (nrm_s < xi * eps_dua && nrm_r < xi * eps_pri) {
xi *= kKappa;
} else {
delta = kDeltaMin;
} CUDA_CHECK_ERR();
} // end adaptive_rho==3
POP_RANGE("adaprho",adaprho,9);
} // end adaptive_rho
#ifdef USE_NVTX
POP_RANGE(mystring,Step,1); // pop at end of loop iteration
#endif
} // end for loop in k
// Get optimal value
_optval = FuncEval(f_gpu, y12.data) + FuncEval(g_gpu, x12.data);
// Check status
H2O4GPUStatus status;
if (!converged && k == _max_iter - 1)
status = H2O4GPU_MAX_ITER;
else if (!converged && k < _max_iter - 1)
status = H2O4GPU_NAN_FOUND;
else
status = H2O4GPU_SUCCESS;
// Get run time
_time = static_cast<T>(timer<double>() - t0);
// Print summary
if (_verbose > 0) {
Printf(__HBAR__
"Status: %s\n"
"Timing: Total = %3.2e s, Init = %3.2e s\n"
"Iter : %u\n", H2O4GPUStatusString(status).c_str(), _time, time_init,
k);
Printf(
__HBAR__
"Error Metrics:\n"
"Pri: "
"|Ax - y| / (abs_tol sqrt(m) / rel_tol + |y|) = %.2e (goal: %0.2e)\n"
"Dua: "
"|A'l + u| / (abs_tol sqrt(n) / rel_tol + |u|) = %.2e (goal: %0.2e)\n"
"Gap: "
"|x'u + y'l| / (abs_tol sqrt(m + n) / rel_tol + |x,u| |y,l|) = %.2e (goal: %0.2e, gap checked=%d)\n"
__HBAR__, _rel_tol * nrm_r / eps_pri, _rel_tol,
_rel_tol * nrm_s / eps_dua, _rel_tol, _rel_tol * gap / eps_gap,
_rel_tol, _gap_stop);
fflush(stdout);
}
// Scale x, y, lambda and mu for output.
PUSH_RANGE("Scale",Scale,1);
// xtemp and ytemp are views of ztemp, so these operations apply to xtemp and ytemp as well
cml::vector_memcpy(&ztemp, &zt); // zt->ztemp
cml::blas_axpy(hdl, -kOne, &zprev, &ztemp); // -kOne*zprev+ztemp->ztemp
cml::blas_axpy(hdl, kOne, &z12, &ztemp); // kOne*z12+ztemp->ztemp
cml::blas_scal(hdl, -_rho, &ztemp); // -_rho*ztemp -> ztemp
// operatons on limited views of ztemp
cml::vector_mul(&ytemp, &d); // ytemp*d -> ytemp
cml::vector_div(&xtemp, &e); // xtemp/e -> xtemp
cml::vector<T> x12copy = cml::vector_calloc<T>(n);
cml::vector_memcpy(&x12copy, &x12); // copy de version first to GPU
T * dcopy = new T[m]();
cml::vector_memcpy(dcopy, &d); // copy d to CPU
cml::vector_div(&y12, &d); // y12/d -> y12
cml::vector_mul(&x12, &e); // x12*e -> x12
POP_RANGE("Scale",Scale,1);
// Copy results from GPU to CPU for output.
PUSH_RANGE("Copy",Copy,1);
cml::vector_memcpy(_x, &x12); // x12->_x (GPU->CPU with vector<T>* to T*)
cml::vector_memcpy(_xp, &x12); // x12->_xp (GPU->GPU but vector<T>* to T*)
cml::vector_memcpy(_y, &y12); // y12->_y
cml::vector_memcpy(_mu, &xtemp); // xtemp->_mu
cml::vector_memcpy(_lambda, &ytemp); // ytemp->_lambda
// compute train predictions from trainPred = Atrain.xsolution
_A.Mul('n', static_cast<T>(1.), x12copy.data, static_cast<T>(0.),
_trainPredsp); // _xp and _trainPredsp are both simple pointers on GPU
cml::vector_memcpy(m, 1, _trainPreds, _trainPredsp); // pointer on GPU to pointer on CPU
for (unsigned int i = 0; i < m; i++) {
_trainPreds[i] /= dcopy[i];
// DEBUG_FPRINTF(stderr,"Tp[%d]=%g\n",i,_trainPreds[i]);
}
if (dcopy)
delete[] dcopy;
if (x12copy.data)
cml::vector_free(&x12copy);
if (mvalid > 0) {
double tpre = timer<double>();
// compute valid from validPreds = Avalid.xsolution
_A.Mulvalid('n', static_cast<T>(1.), _xp, static_cast<T>(0.),
_validPredsp);
double tpost = timer<double>();
cml::vector_memcpy(mvalid, 1, _validPreds, _validPredsp);
double tpost2cpu = timer<double>();
#ifdef DEBUG
fprintf(stderr,"PREDICT TIME: %g %g\n",tpost-tpre,tpost2cpu-tpre); fflush(stderr);
#endif
}
// compute error (not yet)
// compute mean (not yet)
// compute stddev (not yet)
// Store z.
cml::vector_memcpy(&z, &zprev); // zprev->z
// Free memory.
cml::vector_free(&z12);
cml::vector_free(&zprev);
cml::vector_free(&ztemp);
if (hdl)
hipblasDestroy(hdl);
CUDA_CHECK_ERR(); POP_RANGE("Copy",Copy,1);
// POP_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
return status;
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::_Init_Predict() {
DEBUG_EXPECT(!_done_init);
if (_done_init)
return 1;
_done_init = true;
CUDACHECK(hipSetDevice(_wDev));
#ifdef DEBUG
// get device ID
int devID;
CUDACHECK(hipGetDevice(&devID));
hipDeviceProp_t props;
// get device properties
CUDACHECK(hipGetDeviceProperties(&props, devID));
#endif
#ifdef USE_NCCL2
for (int i = 0; i < _nDev; i++) {
if(i==0 && i==_nDev-1) i=_wDev; // force to chosen device
hipDeviceProp_t props;
CUDACHECK(hipGetDeviceProperties(&props, i));
CUDACHECK(hipSetDevice(i));
// CUDACHECK(hipSetDeviceFlags(hipDeviceMapHost)); // TODO: MapHostMemory
printf("Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,i); fflush(stdout);
}
// initialize nccl
std::vector<int> dList(_nDev);
for (int i = 0; i < _nDev; ++i)
dList[i] = i % nVis;
ncclComm_t* _comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*_nDev);
NCCLCHECK(ncclCommInitAll(_comms, _nDev, dList.data()));// initialize communicator (One communicator per process)
printf("# NCCL: Using devices\n");
for (int g = 0; g < _nDev; ++g) {
int cudaDev;
int rank;
hipDeviceProp_t prop;
NCCLCHECK(ncclCommCuDevice(_comms[g], &cudaDev));
NCCLCHECK(ncclCommUserRank(_comms[g], &rank));
CUDACHECK(hipGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name); fflush(stdout);
}
#endif
PUSH_RANGE("Malloc",Malloc,1);
double t0 = timer<double>();
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
// local (i.e. GPU) values for _x and training predictions (i.e. predicted y from Atrain*_x)
hipMalloc(&_xp, (n) * sizeof(T));
hipMalloc(&_trainPredsp, (m) * sizeof(T));
hipMalloc(&_validPredsp, (mvalid) * sizeof(T));
hipMemset(_xp, 0, (n) * sizeof(T));
hipMemset(_trainPredsp, 0, (m) * sizeof(T));
hipMemset(_validPredsp, 0, (mvalid) * sizeof(T));
CUDA_CHECK_ERR();
_A.Init();
POP_RANGE("Malloc",Malloc,1);
#ifdef DEBUG
printf("Pred: Time to allocate data structures: %f\n", timer<double>() - t0);
#endif
return 0;
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::Predict() {
double t0 = timer<double>();
// Initialize Projector P and Matrix A.
if (!_done_init) {
// PUSH_RANGE("Init2",Init2,1);
_Init_Predict();
// POP_RANGE("Init2",Init2,1);
}
CUDACHECK(hipSetDevice(_wDev));
// PUSH_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
// copy over X (assume called SetInitX) directly from CPU to GPU during fit
cml::vector<T> xtemp = cml::vector_calloc<T>(n);
CUDA_CHECK_ERR();
cml::vector_memcpy(&xtemp, _x); // _x->xtemp
CUDA_CHECK_ERR();
// compute valid from validPreds = Avalid.xsolution
_A.Mulvalid('n', static_cast<T>(1.), xtemp.data, static_cast<T>(0.),
_validPredsp);
CUDA_CHECK_ERR();
// copy back to CPU
cml::vector_memcpy(mvalid, 1, _validPreds, _validPredsp);
CUDA_CHECK_ERR();
// compute error (not yet)
// compute mean (not yet)
// compute stddev (not yet)
// Free memory.
cml::vector_free(&xtemp);
CUDA_CHECK_ERR();
return 0;
}
template<typename T, typename M, typename P>
void H2O4GPU<T, M, P>::ResetX(void) {
if (!_done_init)
_Init();
CUDACHECK(hipSetDevice(_wDev));
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
DEBUG_FPRINTF(stderr, "in h2o4gpu ResetX: m=%d n=%d\n", (int)m, (int)n);
hipMemset(_z, 0, (m + n) * sizeof(T));
hipMemset(_zt, 0, (m + n) * sizeof(T));
}
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::~H2O4GPU() {
CUDACHECK(hipSetDevice(_wDev));
if(1){
if (_z)
hipFree(_z);
if (_zt)
hipFree(_zt);
if (_xp)
hipFree(_xp);
if (_trainPredsp)
hipFree(_trainPredsp);
if (_validPredsp)
hipFree(_validPredsp);
CUDA_CHECK_ERR();
}
_z = _zt = _xp = _trainPredsp = _validPredsp = 0;
#ifdef USE_NCCL2
for(int i=0; i<_nDev; ++i)
ncclCommDestroy(_comms[i]);
free(_comms);
#endif
if (_x)
delete[] _x;
if (_y)
delete[] _y;
if (_mu)
delete[] _mu;
if (_lambda)
delete[] _lambda;
if (_trainPreds)
delete[] _trainPreds;
if (_validPreds)
delete[] _validPreds;
_x = _y = _mu = _lambda = _trainPreds = _validPreds = 0;
}
// Explicit template instantiation.
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE==1
template class H2O4GPU<double, MatrixDense<double>,
ProjectorDirect<double, MatrixDense<double> > > ;
template class H2O4GPU<double, MatrixDense<double>,
ProjectorCgls<double, MatrixDense<double> > > ;
template class H2O4GPU<double, MatrixSparse<double>,
ProjectorCgls<double, MatrixSparse<double> > > ;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE==1
template class H2O4GPU<float, MatrixDense<float>,
ProjectorDirect<float, MatrixDense<float> > > ;
template class H2O4GPU<float, MatrixDense<float>,
ProjectorCgls<float, MatrixDense<float> > > ;
template class H2O4GPU<float, MatrixSparse<float>,
ProjectorCgls<float, MatrixSparse<float> > > ;
#endif
} // namespace h2o4gpu
| 5371107c0face92cacfc8e17d685d7818336c366.cu | /*!
* Modifications Copyright 2017 H2O.ai, Inc.
*/
#include "solver/glm.h"
#include <stdio.h>
#include <stdlib.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include <algorithm>
#include <limits>
#include <deque>
#include "cml/cml_blas.cuh"
#include "cml/cml_vector.cuh"
#include "interface_defs.h"
#include "matrix/matrix.h"
#include "matrix/matrix_dense.h"
#include "matrix/matrix_sparse.h"
#include "projector/projector.h"
#include "projector/projector_direct.h"
#include "projector/projector_cgls.h"
#include "util.h"
#include "cuda_utils.h"
#include "timer.h"
//#include "kmeans.h"
typedef struct {
double* sendBuff;
double* recvBuff;
int size;
cudaStream_t stream;
} PerThreadData;
#define __HBAR__ \
"----------------------------------------------------------------------------\n"
namespace h2o4gpu {
namespace {
template<typename T, typename Op>
struct ApplyOp: thrust::binary_function<FunctionObj<T>, FunctionObj<T>, T> {
Op binary_op;
ApplyOp(Op binary_op) :
binary_op(binary_op) {
}
__host__ __device__ FunctionObj<T> operator()(FunctionObj<T> &h, T x) {
h.a = binary_op(h.a, x);
h.d = binary_op(h.d, x);
h.e = binary_op(binary_op(h.e, x), x);
return h;
}
};
} // namespace
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::H2O4GPU(int sharedA, int me, int wDev, const M &A) :
_A(sharedA, me, wDev, A), _P(wDev, _A), _z(0), _zt(0), _rho(
static_cast<T>(kRhoInit)), _done_init(false), _x(0), _y(0), _mu(
0), _lambda(0), _optval(static_cast<T>(0.)), _time(
static_cast<T>(0.)), _trainPreds(0), _validPreds(0), _xp(0), _trainPredsp(
0), _validPredsp(0), _trainerror(0), _validerror(0), _trainmean(
0), _validmean(0), _trainstddev(0), _validstddev(0), _final_iter(
0), _abs_tol(static_cast<T>(kAbsTol)), _rel_tol(
static_cast<T>(kRelTol)), _max_iter(kMaxIter), _stop_early(1), _stop_early_error_fraction(
1.0), _init_iter(kInitIter), _verbose(kVerbose), _adaptive_rho(
kAdaptiveRho), _equil(kEquil), _gap_stop(kGapStop), _init_x(
false), _init_lambda(false), _nDev(1), //FIXME - allow larger comm groups
_wDev(wDev)
#ifdef USE_NCCL2
,_comms(0)
#endif
{
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
_x = new T[_A.Cols()]();
_y = new T[_A.Rows()]();
_mu = new T[_A.Cols()]();
_lambda = new T[_A.Rows()]();
_trainPreds = new T[_A.Rows()]();
_validPreds = new T[_A.ValidRows()]();
}
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::H2O4GPU(const M &A) :
_A(A._sharedA, A._me, A._wDev, A), _P(_A._wDev, _A), _z(0), _zt(0), _rho(
static_cast<T>(kRhoInit)), _done_init(false), _x(0), _y(0), _mu(
0), _lambda(0), _optval(static_cast<T>(0.)), _time(
static_cast<T>(0.)), _trainPreds(0), _validPreds(0), _xp(0), _trainPredsp(
0), _validPredsp(0), _trainerror(0), _validerror(0), _trainmean(
0), _validmean(0), _trainstddev(0), _validstddev(0), _final_iter(
0), _abs_tol(static_cast<T>(kAbsTol)), _rel_tol(
static_cast<T>(kRelTol)), _max_iter(kMaxIter), _stop_early(1), _stop_early_error_fraction(
1.0), _init_iter(kInitIter), _verbose(kVerbose), _adaptive_rho(
kAdaptiveRho), _equil(kEquil), _gap_stop(kGapStop), _init_x(
false), _init_lambda(false), _nDev(1), //FIXME - allow larger comm groups
_wDev(_A._wDev)
#ifdef USE_NCCL2
,comms(0)
#endif
{
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
_x = new T[_A.Cols()]();
_y = new T[_A.Rows()]();
_mu = new T[_A.Cols()]();
_lambda = new T[_A.Rows()]();
_trainPreds = new T[_A.Rows()]();
_validPreds = new T[_A.ValidRows()]();
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::_Init() {
DEBUG_EXPECT(!_done_init);
if (_done_init)
return 1;
_done_init = true;
CUDACHECK(cudaSetDevice(_wDev));
#ifdef DEBUG
// get device ID
int devID;
CUDACHECK(cudaGetDevice(&devID));
cudaDeviceProp props;
// get device properties
CUDACHECK(cudaGetDeviceProperties(&props, devID));
#endif
#ifdef USE_NCCL2
for (int i = 0; i < _nDev; i++) {
if(i==0 && i==_nDev-1) i=_wDev; // force to chosen device
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, i));
CUDACHECK(cudaSetDevice(i));
// CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory
printf("Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,i); fflush(stdout);
}
// initialize nccl
std::vector<int> dList(_nDev);
for (int i = 0; i < _nDev; ++i)
dList[i] = i % nVis;
ncclComm_t* _comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*_nDev);
NCCLCHECK(ncclCommInitAll(_comms, _nDev, dList.data()));// initialize communicator (One communicator per process)
printf("# NCCL: Using devices\n");
for (int g = 0; g < _nDev; ++g) {
int cudaDev;
int rank;
cudaDeviceProp prop;
NCCLCHECK(ncclCommCuDevice(_comms[g], &cudaDev));
NCCLCHECK(ncclCommUserRank(_comms[g], &rank));
CUDACHECK(cudaGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name); fflush(stdout);
}
#endif
PUSH_RANGE("Malloc",Malloc,1);
double t0 = timer<double>();
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
cudaMalloc(&_z, (m + n) * sizeof(T));
cudaMemset(_z, 0, (m + n) * sizeof(T));
cudaMalloc(&_zt, (m + n) * sizeof(T));
cudaMemset(_zt, 0, (m + n) * sizeof(T));
// local (i.e. GPU) values for _x and training predictions (i.e. predicted y from Atrain*_x)
cudaMalloc(&_xp, (n) * sizeof(T));
cudaMalloc(&_trainPredsp, (m) * sizeof(T));
cudaMalloc(&_validPredsp, (mvalid) * sizeof(T));
cudaMemset(_xp, 0, (n) * sizeof(T));
cudaMemset(_trainPredsp, 0, (m) * sizeof(T));
cudaMemset(_validPredsp, 0, (mvalid) * sizeof(T));
CUDA_CHECK_ERR();
_A.Init();
POP_RANGE("Malloc",Malloc,1);
PUSH_RANGE("Eq",Eq,1);
_A.Equil(_equil);
POP_RANGE("Eq",Eq,1);
// PUSH_RANGE("Init1",Init1,1);
_P.Init();
CUDA_CHECK_ERR();
// POP_RANGE("Init1",Init1,1);
#ifdef DEBUG
printf("Time to allocate data structures: %f\n", timer<double>() - t0);
#endif
return 0;
}
template<typename T, typename M, typename P>
H2O4GPUStatus H2O4GPU<T, M, P>::Solve(const std::vector<FunctionObj<T> > &f,
const std::vector<FunctionObj<T> > &g) {
// PUSH_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
// Initialize Projector P and Matrix A.
if (!_done_init) {
// PUSH_RANGE("Init2",Init2,1);
_Init();
// POP_RANGE("Init2",Init2,1);
}
CUDACHECK(cudaSetDevice(_wDev));
double t0 = timer<double>();
// TODO: Constants are set arbitrarily based upon limited experiments in academic papers
// Constants for adaptive-rho and over-relaxation.
const T kDeltaMin = static_cast<T>(1.05); // for adaptive rho and rescaling
const T kGamma = static_cast<T>(1.01); // for adaptive rho and rescaling
const T kTau = static_cast<T>(0.8); // for adaptive rho and rescaling
const T kAlpha = static_cast<T>(1.7); // set to 1.0 to disable over-relaxation technique, normally 1.5-1.8 and was set to 1.7
const T kKappa = static_cast<T>(0.9); // for adaptive rho and rescaling
const T kOne = static_cast<T>(1.0); // definition
const T kZero = static_cast<T>(0.0); // definition
const T kProjTolMax = static_cast<T>(1e-6); // Projection tolerance
const T kProjTolMin = static_cast<T>(1e-2); // Projection tolerance
const T kProjTolPow = static_cast<T>(1.3); // Projection tolerance
const T kProjTolIni = static_cast<T>(1e-5); // Projection tolerance
const bool use_exact_stop = true; // false does worse in trainerror and maximum number of iterations with simple.R
// fprintf(stderr,"solve _data=%p\n",_A._data); fflush(stderr);
// fprintf(stderr,"solve _datay=%p\n",_A._datay); fflush(stderr);
// Notes on variable names:
//
// Original Boyd ADMM paper solves:
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf
// Minimize: f(x) + g(z)
// Subject to: Ax + Bz = c
// Primary variable: x
// Dual variable: z
// Step size: \rho
// Where for Lasso: f(x) = (1/2)||x-b||_2^2 and g(z) = \lambda||z||_1 with constraint x=Az
//
// H2O4GPU paper and code:
// http://foges.github.io/h2o4gpu/ and http://stanford.edu/~boyd/papers/h2o4gpu.html
// Minimize: f(y) + g(x) for a variety (but limited set) of f and g shown in src/include/prox_lib.h
// Subject to: y = Ax (always)
// Where for Lasso: f(y) = (1/2)||y-b||_2^2 and g(x) = \lambda||x||_1 and constraint is y=Ax
// Primary variable: y
// Dual variable: x
// Step size or Proximal parameter: \rho
// Intermediate variable: z
// Internally h2o4gpu code uses \mu and \nu scaled variables, performs pre-conditioning using e and d.
// \lambda_{max} = ||A^T b|| makes sense if have (1/2) in front of f(y) for Lasso
//
// H2O4GPU overall steps:
// 1) Precondition A using d and e and renormalize variables and all equations using d and e
// 2) Compute Gramian: A^T A only once
// 3) Cholesky of gram: Only compute cholesky once -- s and info->s in Project just kOne=1 and just ensure GPU has cholesky already. Could have put into Init with Gramian)
// 4) Project: Solve L L^T x = b for x by forward and backward solve (Ly=b for y and then y=L^T x for x)
// 5) Repeat #4, until convergence from primary (min Ax-b) and dual (min f(y)+g(x)) residuals
// Extract values from h2o4gpu_data
PUSH_RANGE("H2O4GPUExtract",H2O4GPUExtract,3);
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
thrust::device_vector<FunctionObj<T> > f_gpu = f;
thrust::device_vector<FunctionObj<T> > g_gpu = g;
// TODO: Need to give scale to these
// const T kRhoMin = static_cast<T>(1e-4); // lower range for adaptive rho
// const T kRhoMax = static_cast<T>(1e4); // upper range for adaptive rho
const T kRhoMin = static_cast<T>(std::numeric_limits<T>::epsilon()); // lower range for adaptive rho
const T kRhoMax = static_cast<T>(1.0 / kRhoMin); // upper range for adaptive rho
POP_RANGE("H2O4GPUExtract",H2O4GPUExtract,3);
PUSH_RANGE("H2O4GPUAlloc",H2O4GPUAlloc,4);
// Create cuBLAS handle.
cublasHandle_t hdl;
cublasCreate(&hdl);
CUDA_CHECK_ERR();
// Allocate data for ADMM variables.
cml::vector<T> de = cml::vector_view_array(_A._de, m + n);
cml::vector<T> z = cml::vector_view_array(_z, m + n);
cml::vector<T> zt = cml::vector_view_array(_zt, m + n);
cml::vector<T> zprev = cml::vector_calloc<T>(m + n);
cml::vector<T> ztemp = cml::vector_calloc<T>(m + n);
cml::vector<T> z12 = cml::vector_calloc<T>(m + n);
CUDA_CHECK_ERR();
// Create views for x and y components (same memory space used, not value copy)
cml::vector<T> d = cml::vector_subvector(&de, 0, m);
cml::vector<T> e = cml::vector_subvector(&de, m, n);
cml::vector<T> x = cml::vector_subvector(&z, 0, n);
cml::vector<T> y = cml::vector_subvector(&z, n, m);
cml::vector<T> x12 = cml::vector_subvector(&z12, 0, n);
cml::vector<T> y12 = cml::vector_subvector(&z12, n, m);
cml::vector<T> xprev = cml::vector_subvector(&zprev, 0, n);
cml::vector<T> yprev = cml::vector_subvector(&zprev, n, m);
cml::vector<T> xtemp = cml::vector_subvector(&ztemp, 0, n);
cml::vector<T> ytemp = cml::vector_subvector(&ztemp, n, m);
CUDA_CHECK_ERR(); POP_RANGE("H2O4GPUAlloc",H2O4GPUAlloc,4);
PUSH_RANGE("H2O4GPUScale",H2O4GPUScale,5);
// Scale f and g to account for diagonal scaling e and d.
// f/d -> f
thrust::transform(f_gpu.begin(), f_gpu.end(),
thrust::device_pointer_cast(d.data), f_gpu.begin(),
ApplyOp<T, thrust::divides<T> >(thrust::divides<T>()));
// g*e -> g
thrust::transform(g_gpu.begin(), g_gpu.end(),
thrust::device_pointer_cast(e.data), g_gpu.begin(),
ApplyOp<T, thrust::multiplies<T> >(thrust::multiplies<T>()));
CUDA_CHECK_ERR(); POP_RANGE("H2O4GPUScale",H2O4GPUScale,5);
PUSH_RANGE("Lambda",Lambda,6);
// Initialize (x, lambda) from (x0, lambda0).
if (_init_x) {
cml::vector_memcpy(&xtemp, _x); // _x->xtemp
cml::vector_div(&xtemp, &e); // xtemp/e -> xtemp
_A.Mul('n', kOne, xtemp.data, kZero, ytemp.data); // kOne*A*x + kZero*y -> y
wrapcudaDeviceSynchronize(); // not needed, as vector_memory is cuda call and will follow sequentially on device
cml::vector_memcpy(&z, &ztemp); // ztemp->z (xtemp and ytemp are views of ztemp)
CUDA_CHECK_ERR();
}
if (_init_lambda) {
cml::vector_memcpy(&ytemp, _lambda); // _lambda->ytemp
cml::vector_div(&ytemp, &d); // ytemp/d -> ytemp
_A.Mul('t', -kOne, ytemp.data, kZero, xtemp.data); // -kOne*y+kZero*x -> x
wrapcudaDeviceSynchronize(); // not needed, as vector_memory is cuda call and will follow sequentially on device
if (_rho != 0)
cml::blas_scal(hdl, -kOne / _rho, &ztemp); // ztemp = ztemp * (-kOne/_rho)
else
cml::blas_scal(hdl, kZero, &ztemp); // ztemp = ztemp * (-kOne/_rho)
cml::vector_memcpy(&zt, &ztemp); // ztemp->zt
CUDA_CHECK_ERR();
} POP_RANGE("Lambda",Lambda,6);
PUSH_RANGE("Guess",Guess,7);
// Make an initial guess for (x0 or lambda0).
if (_init_x && !_init_lambda) {
// Alternating projections to satisfy
// 1. \lambda \in \partial f(y), \mu \in \partial g(x)
// 2. \mu = -A^T\lambda
cml::vector_set_all(&zprev, kZero); // zprev = kZero
for (unsigned int i = 0; i < kInitIter; ++i) {
#ifdef USE_NVTX
char mystring[100];
sprintf(mystring,"GStep%d",i);
PUSH_RANGE(mystring,GStep,1);
#endif
ProjSubgradEval(g_gpu, xprev.data, x.data, xtemp.data);
ProjSubgradEval(f_gpu, yprev.data, y.data, ytemp.data);
_P.Project(xtemp.data, ytemp.data, kOne, xprev.data, yprev.data,
kProjTolIni);
wrapcudaDeviceSynchronize(); // not needed, as blas's are cuda call and will follow sequentially on device
CUDA_CHECK_ERR();
cml::blas_axpy(hdl, -kOne, &ztemp, &zprev); // alpha*X + Y -> Y
cml::blas_scal(hdl, -kOne, &zprev);
#ifdef USE_NVTX
POP_RANGE(mystring,GStep,1);
#endif
}
// xt = -1 / \rho * \mu, yt = -1 / \rho * \lambda.
cml::vector_memcpy(&zt, &zprev); // zprev->zt
if (_rho != 0)
cml::blas_scal(hdl, -kOne / _rho, &zt);
else
cml::blas_scal(hdl, kZero, &zt);
} else if (_init_lambda && !_init_x) {
ASSERT(false);
}
_init_x = _init_lambda = false;
POP_RANGE("Guess",Guess,7);
// Save initialization time.
double time_init = timer<double>() - t0;
#ifdef DEBUG
printf("Time to initialize: %f\n", time_init);
#endif
// Signal start of execution.
if (_verbose > 0) {
#pragma omp critical
{
printMe(std::cout, f[1].a, f[1].b, f[1].c, f[1].d, f[1].e, g[1].a,
g[1].b, g[1].c, g[1].d, g[1].e); //debugging only: print the second since the first can be for intercept (which is then 0)
//printData(std::cout); //only works for data in host memory!
}
}
if (_verbose > 1) {
Printf(
__HBAR__
" Iter | pri res | pri tol | dua res | dua tol | gap | eps gap |"
" pri obj\n" __HBAR__);
}
// Initialize scalars.
T sqrtn_atol = std::sqrt(static_cast<T>(n)) * _abs_tol;
T sqrtm_atol = std::sqrt(static_cast<T>(m)) * _abs_tol;
T sqrtmn_atol = std::sqrt(static_cast<T>(m + n)) * _abs_tol;
T delta = kDeltaMin, xi = static_cast<T>(1.0);
unsigned int k = 0u, kd = 0u, ku = 0u;
bool converged = false;
T nrm_r, nrm_s, gap, eps_gap, eps_pri, eps_dua;
// Stop early setup
unsigned int QUEUELENGTH = 10;
std::deque<T> nrm_r_deque;
std::deque<T> nrm_s_deque;
std::deque<T> nrm_r_avg;
std::deque<T> nrm_s_avg;
std::deque<T> nrm_r_error;
std::deque<T> nrm_s_error;
// LOOP until satisfy convergence criteria
for (;; ++k) {
#ifdef USE_NVTX
char mystring[100];
sprintf(mystring,"Step%d",k);
PUSH_RANGE(mystring,Step,1);
#endif
cml::vector_memcpy(&zprev, &z);
// Evaluate Proximal Operators g and f based upon chosen problem setup
PUSH_RANGE("Evaluate_fg",Evaluate_fg,9);
cml::blas_axpy(hdl, -kOne, &zt, &z); // -kOne*zt+z -> z
ProxEval(g_gpu, _rho, x.data, x12.data); // Evaluate g(rho,x)->x12 (x^{1/2} in paper)
ProxEval(f_gpu, _rho, y.data, y12.data); // Evaluate f(rho,y)->y12 (y^{1/2} in paper)
CUDA_CHECK_ERR(); POP_RANGE("Evaluate_fg",Evaluate_fg,9);
// Compute gap, optval, and tolerances.
PUSH_RANGE("gapoptvaltol",gapoptvaltol,9);
cml::blas_axpy(hdl, -kOne, &z12, &z); // -kOne*z12+z->z
cml::blas_dot(hdl, &z, &z12, &gap); // z*z12 -> gap
gap = std::abs(gap); // |gap| -> gap
eps_gap = sqrtmn_atol
+ _rel_tol * cml::blas_nrm2(hdl, &z)
* cml::blas_nrm2(hdl, &z12);
eps_pri = sqrtm_atol + _rel_tol * cml::blas_nrm2(hdl, &y12);
eps_dua = _rho * (sqrtn_atol + _rel_tol * cml::blas_nrm2(hdl, &x));
CUDA_CHECK_ERR(); POP_RANGE("gapoptvaltol",gapoptvaltol,9);
DEBUG_FPRINTF(stderr, "DEBUG1: %g %g\n", sqrtm_atol,
cml::blas_nrm2(hdl, &y12));
// Apply over relaxation (optional, can set kAlpha to 1, above, to disable)
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf S3.4.3
PUSH_RANGE("orelax",orelax,9);
cml::vector_memcpy(&ztemp, &zt);
cml::blas_axpy(hdl, kAlpha, &z12, &ztemp);
cml::blas_axpy(hdl, kOne - kAlpha, &zprev, &ztemp);
CUDA_CHECK_ERR(); POP_RANGE("orelax",orelax,9);
// Project onto y = Ax.
PUSH_RANGE("project",project,9);
T proj_tol = kProjTolMin / std::pow(static_cast<T>(k + 1), kProjTolPow);
proj_tol = std::max(proj_tol, kProjTolMax);
// (x^{k+1},y^{k+1}) := Project(x^{k+1/2}+\tilde{x}^k , y^{k+1/2}+\tilde{y}^k)
// xtemp.data: \tilde{x}^k
// ytemp.data: \tilde{y}^k
// x.data: x^{k+1/2}
// y.data: y^{k+1/2}
_P.Project(xtemp.data, ytemp.data, kOne, x.data, y.data, proj_tol);
//cudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
CUDA_CHECK_ERR(); POP_RANGE("project",project,9);
// Calculate residuals nrm_s (dual residual) and nrm_r (primary residual)
PUSH_RANGE("resid",resid,9);
cml::vector_memcpy(&ztemp, &zprev);
cml::blas_axpy(hdl, -kOne, &z, &ztemp); // -1*z + ztemp -> ztemp
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_s = _rho * cml::blas_nrm2(hdl, &ztemp);
cml::vector_memcpy(&ztemp, &z12); // z12 has both x^{k+1/2} and y^{k+1/2}
cml::blas_axpy(hdl, -kOne, &z, &ztemp); // -1*z + ztemp -> ztemp (i.e. -x^k + x^{k+1/2} -> xtemp and -y^k + y^{k+1/2} -> ytemp)
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_r = cml::blas_nrm2(hdl, &ztemp);
// Calculate exact residuals only if necessary.
bool exact = false;
if ((nrm_r < eps_pri && nrm_s < eps_dua) || use_exact_stop) {
cml::vector_memcpy(&ztemp, &z12);
_A.Mul('n', kOne, x12.data, -kOne, ytemp.data);
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_r = cml::blas_nrm2(hdl, &ytemp);
if ((nrm_r < eps_pri) || use_exact_stop) {
cml::vector_memcpy(&ztemp, &z12);
cml::blas_axpy(hdl, kOne, &zt, &ztemp);
cml::blas_axpy(hdl, -kOne, &zprev, &ztemp);
_A.Mul('t', kOne, ytemp.data, kOne, xtemp.data);
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_s = _rho * cml::blas_nrm2(hdl, &xtemp);
exact = true;
}
} CUDA_CHECK_ERR(); POP_RANGE("resid",resid,9);
bool stopearly = false;
if (_stop_early) {
// STOP EARLY CHECK
nrm_r_deque.push_back(nrm_r);
nrm_s_deque.push_back(nrm_s);
nrm_r_avg.push_back(
std::accumulate(nrm_r_deque.begin(), nrm_r_deque.end(), 0.0)
/ static_cast<T>(nrm_r_deque.size()));
nrm_s_avg.push_back(
std::accumulate(nrm_s_deque.begin(), nrm_s_deque.end(), 0.0)
/ static_cast<T>(nrm_s_deque.size()));
if (nrm_r_deque.size() >= QUEUELENGTH
&& nrm_r_avg.size() >= QUEUELENGTH) {
T errorlocal_r = 0;
T errorlocal_s = 0;
for (unsigned int ii = 0; ii < QUEUELENGTH; ii++) {
errorlocal_r += std::abs(nrm_r_avg[ii] - nrm_r_deque[ii]);
errorlocal_s += std::abs(nrm_s_avg[ii] - nrm_s_deque[ii]);
}
nrm_r_error.push_back(errorlocal_r / static_cast<T>(QUEUELENGTH));
nrm_s_error.push_back(errorlocal_s / static_cast<T>(QUEUELENGTH));
}
if (k > QUEUELENGTH && nrm_r_deque.size() >= QUEUELENGTH
&& nrm_r_avg.size() >= QUEUELENGTH
&& nrm_s_deque.size() >= QUEUELENGTH
&& nrm_s_avg.size() >= QUEUELENGTH && nrm_r_error.size() >= 1
&& nrm_s_error.size() >= 1
&& std::abs(nrm_r_avg.back() - nrm_r_avg.front())
< nrm_r_error.back()
&& std::abs(nrm_s_avg.back() - nrm_s_avg.front())
< nrm_s_error.back()) {
if(_verbose > 2){
Printf("Stopped Early at iteration=%d: %g %g %g : %g %g %g\n",
k, nrm_r_avg.back(), nrm_r_avg.front(),
nrm_r_error.back(), nrm_s_avg.back(), nrm_s_avg.front(),
nrm_s_error.back());
fflush(stdout);
}
stopearly = true;
}
if (nrm_r_deque.size() >= QUEUELENGTH) {
nrm_r_deque.pop_front();
}
if (nrm_s_deque.size() >= QUEUELENGTH) {
nrm_s_deque.pop_front();
}
if (nrm_r_avg.size() >= QUEUELENGTH) {
nrm_r_avg.pop_front();
}
if (nrm_s_avg.size() >= QUEUELENGTH) {
nrm_s_avg.pop_front();
}
if (nrm_r_error.size() >= QUEUELENGTH) {
nrm_r_error.pop_front();
}
if (nrm_s_error.size() >= QUEUELENGTH) {
nrm_s_error.pop_front();
}
}
// Evaluate stopping criteria.
converged = stopearly
|| (exact && nrm_r < eps_pri && nrm_s < eps_dua
&& (!_gap_stop || gap < eps_gap));
if ((_verbose > 3 && k % 1 == 0) || (_verbose > 2 && k % 10 == 0)
|| (_verbose > 1 && k % 100 == 0)
|| (_verbose > 1 && converged)) {
T optval = FuncEval(f_gpu, y12.data) + FuncEval(g_gpu, x12.data);
Printf("%5d : %.2e <? %.2e %.2e <? %.2e %.2e <? %.2e % .2e\n",
k, nrm_r, eps_pri, nrm_s, eps_dua, gap, eps_gap, optval);
fflush(stdout);
}
// Break if converged or there are nans
if (converged || k == _max_iter - 1) { // || cml::vector_any_isnan(&zt))
_final_iter = k;
#ifdef USE_NVTX
POP_RANGE(mystring,Step,1); // pop at end of loop iteration
#endif
break;
}
// Update dual variable.
PUSH_RANGE("update",update,9);
cml::blas_axpy(hdl, kAlpha, &z12, &zt);
cml::blas_axpy(hdl, kOne - kAlpha, &zprev, &zt);
cml::blas_axpy(hdl, -kOne, &z, &zt);
CUDA_CHECK_ERR(); POP_RANGE("update",update,9);
// Adaptive rho (optional)
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf S3.4.1
// http://www.cs.umd.edu/sites/default/files/scholarly_papers/ZhengXu.pdf or https://arxiv.org/abs/1605.07246
// choose: 1 = H2O4GPU Boyd method
// choose: 2 = Original Boyd method of balancing residuals
// choose: 3 = Spectral method by Zheng et al. 2015
int whichadap = 1;
if (_adaptive_rho && _rho != 0) {
PUSH_RANGE("adaprho",adaprho,9);
if (whichadap == 1) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(kd)) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
ku = k;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(ku)) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
kd = k;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else if (nrm_s < xi * eps_dua && nrm_r < xi * eps_pri) {
xi *= kKappa;
} else {
delta = kDeltaMin;
} CUDA_CHECK_ERR();
} // end adaptive_rho==1
else if (whichadap == 2) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else {
delta = kDeltaMin;
}CUDA_CHECK_ERR();
} // end adaptive_rho==2
else if (whichadap == 3) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(kd)) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
ku = k;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(ku)) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
kd = k;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else if (nrm_s < xi * eps_dua && nrm_r < xi * eps_pri) {
xi *= kKappa;
} else {
delta = kDeltaMin;
} CUDA_CHECK_ERR();
} // end adaptive_rho==3
POP_RANGE("adaprho",adaprho,9);
} // end adaptive_rho
#ifdef USE_NVTX
POP_RANGE(mystring,Step,1); // pop at end of loop iteration
#endif
} // end for loop in k
// Get optimal value
_optval = FuncEval(f_gpu, y12.data) + FuncEval(g_gpu, x12.data);
// Check status
H2O4GPUStatus status;
if (!converged && k == _max_iter - 1)
status = H2O4GPU_MAX_ITER;
else if (!converged && k < _max_iter - 1)
status = H2O4GPU_NAN_FOUND;
else
status = H2O4GPU_SUCCESS;
// Get run time
_time = static_cast<T>(timer<double>() - t0);
// Print summary
if (_verbose > 0) {
Printf(__HBAR__
"Status: %s\n"
"Timing: Total = %3.2e s, Init = %3.2e s\n"
"Iter : %u\n", H2O4GPUStatusString(status).c_str(), _time, time_init,
k);
Printf(
__HBAR__
"Error Metrics:\n"
"Pri: "
"|Ax - y| / (abs_tol sqrt(m) / rel_tol + |y|) = %.2e (goal: %0.2e)\n"
"Dua: "
"|A'l + u| / (abs_tol sqrt(n) / rel_tol + |u|) = %.2e (goal: %0.2e)\n"
"Gap: "
"|x'u + y'l| / (abs_tol sqrt(m + n) / rel_tol + |x,u| |y,l|) = %.2e (goal: %0.2e, gap checked=%d)\n"
__HBAR__, _rel_tol * nrm_r / eps_pri, _rel_tol,
_rel_tol * nrm_s / eps_dua, _rel_tol, _rel_tol * gap / eps_gap,
_rel_tol, _gap_stop);
fflush(stdout);
}
// Scale x, y, lambda and mu for output.
PUSH_RANGE("Scale",Scale,1);
// xtemp and ytemp are views of ztemp, so these operations apply to xtemp and ytemp as well
cml::vector_memcpy(&ztemp, &zt); // zt->ztemp
cml::blas_axpy(hdl, -kOne, &zprev, &ztemp); // -kOne*zprev+ztemp->ztemp
cml::blas_axpy(hdl, kOne, &z12, &ztemp); // kOne*z12+ztemp->ztemp
cml::blas_scal(hdl, -_rho, &ztemp); // -_rho*ztemp -> ztemp
// operatons on limited views of ztemp
cml::vector_mul(&ytemp, &d); // ytemp*d -> ytemp
cml::vector_div(&xtemp, &e); // xtemp/e -> xtemp
cml::vector<T> x12copy = cml::vector_calloc<T>(n);
cml::vector_memcpy(&x12copy, &x12); // copy de version first to GPU
T * dcopy = new T[m]();
cml::vector_memcpy(dcopy, &d); // copy d to CPU
cml::vector_div(&y12, &d); // y12/d -> y12
cml::vector_mul(&x12, &e); // x12*e -> x12
POP_RANGE("Scale",Scale,1);
// Copy results from GPU to CPU for output.
PUSH_RANGE("Copy",Copy,1);
cml::vector_memcpy(_x, &x12); // x12->_x (GPU->CPU with vector<T>* to T*)
cml::vector_memcpy(_xp, &x12); // x12->_xp (GPU->GPU but vector<T>* to T*)
cml::vector_memcpy(_y, &y12); // y12->_y
cml::vector_memcpy(_mu, &xtemp); // xtemp->_mu
cml::vector_memcpy(_lambda, &ytemp); // ytemp->_lambda
// compute train predictions from trainPred = Atrain.xsolution
_A.Mul('n', static_cast<T>(1.), x12copy.data, static_cast<T>(0.),
_trainPredsp); // _xp and _trainPredsp are both simple pointers on GPU
cml::vector_memcpy(m, 1, _trainPreds, _trainPredsp); // pointer on GPU to pointer on CPU
for (unsigned int i = 0; i < m; i++) {
_trainPreds[i] /= dcopy[i];
// DEBUG_FPRINTF(stderr,"Tp[%d]=%g\n",i,_trainPreds[i]);
}
if (dcopy)
delete[] dcopy;
if (x12copy.data)
cml::vector_free(&x12copy);
if (mvalid > 0) {
double tpre = timer<double>();
// compute valid from validPreds = Avalid.xsolution
_A.Mulvalid('n', static_cast<T>(1.), _xp, static_cast<T>(0.),
_validPredsp);
double tpost = timer<double>();
cml::vector_memcpy(mvalid, 1, _validPreds, _validPredsp);
double tpost2cpu = timer<double>();
#ifdef DEBUG
fprintf(stderr,"PREDICT TIME: %g %g\n",tpost-tpre,tpost2cpu-tpre); fflush(stderr);
#endif
}
// compute error (not yet)
// compute mean (not yet)
// compute stddev (not yet)
// Store z.
cml::vector_memcpy(&z, &zprev); // zprev->z
// Free memory.
cml::vector_free(&z12);
cml::vector_free(&zprev);
cml::vector_free(&ztemp);
if (hdl)
cublasDestroy(hdl);
CUDA_CHECK_ERR(); POP_RANGE("Copy",Copy,1);
// POP_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
return status;
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::_Init_Predict() {
DEBUG_EXPECT(!_done_init);
if (_done_init)
return 1;
_done_init = true;
CUDACHECK(cudaSetDevice(_wDev));
#ifdef DEBUG
// get device ID
int devID;
CUDACHECK(cudaGetDevice(&devID));
cudaDeviceProp props;
// get device properties
CUDACHECK(cudaGetDeviceProperties(&props, devID));
#endif
#ifdef USE_NCCL2
for (int i = 0; i < _nDev; i++) {
if(i==0 && i==_nDev-1) i=_wDev; // force to chosen device
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, i));
CUDACHECK(cudaSetDevice(i));
// CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory
printf("Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,i); fflush(stdout);
}
// initialize nccl
std::vector<int> dList(_nDev);
for (int i = 0; i < _nDev; ++i)
dList[i] = i % nVis;
ncclComm_t* _comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*_nDev);
NCCLCHECK(ncclCommInitAll(_comms, _nDev, dList.data()));// initialize communicator (One communicator per process)
printf("# NCCL: Using devices\n");
for (int g = 0; g < _nDev; ++g) {
int cudaDev;
int rank;
cudaDeviceProp prop;
NCCLCHECK(ncclCommCuDevice(_comms[g], &cudaDev));
NCCLCHECK(ncclCommUserRank(_comms[g], &rank));
CUDACHECK(cudaGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name); fflush(stdout);
}
#endif
PUSH_RANGE("Malloc",Malloc,1);
double t0 = timer<double>();
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
// local (i.e. GPU) values for _x and training predictions (i.e. predicted y from Atrain*_x)
cudaMalloc(&_xp, (n) * sizeof(T));
cudaMalloc(&_trainPredsp, (m) * sizeof(T));
cudaMalloc(&_validPredsp, (mvalid) * sizeof(T));
cudaMemset(_xp, 0, (n) * sizeof(T));
cudaMemset(_trainPredsp, 0, (m) * sizeof(T));
cudaMemset(_validPredsp, 0, (mvalid) * sizeof(T));
CUDA_CHECK_ERR();
_A.Init();
POP_RANGE("Malloc",Malloc,1);
#ifdef DEBUG
printf("Pred: Time to allocate data structures: %f\n", timer<double>() - t0);
#endif
return 0;
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::Predict() {
double t0 = timer<double>();
// Initialize Projector P and Matrix A.
if (!_done_init) {
// PUSH_RANGE("Init2",Init2,1);
_Init_Predict();
// POP_RANGE("Init2",Init2,1);
}
CUDACHECK(cudaSetDevice(_wDev));
// PUSH_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
// copy over X (assume called SetInitX) directly from CPU to GPU during fit
cml::vector<T> xtemp = cml::vector_calloc<T>(n);
CUDA_CHECK_ERR();
cml::vector_memcpy(&xtemp, _x); // _x->xtemp
CUDA_CHECK_ERR();
// compute valid from validPreds = Avalid.xsolution
_A.Mulvalid('n', static_cast<T>(1.), xtemp.data, static_cast<T>(0.),
_validPredsp);
CUDA_CHECK_ERR();
// copy back to CPU
cml::vector_memcpy(mvalid, 1, _validPreds, _validPredsp);
CUDA_CHECK_ERR();
// compute error (not yet)
// compute mean (not yet)
// compute stddev (not yet)
// Free memory.
cml::vector_free(&xtemp);
CUDA_CHECK_ERR();
return 0;
}
template<typename T, typename M, typename P>
void H2O4GPU<T, M, P>::ResetX(void) {
if (!_done_init)
_Init();
CUDACHECK(cudaSetDevice(_wDev));
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
DEBUG_FPRINTF(stderr, "in h2o4gpu ResetX: m=%d n=%d\n", (int)m, (int)n);
cudaMemset(_z, 0, (m + n) * sizeof(T));
cudaMemset(_zt, 0, (m + n) * sizeof(T));
}
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::~H2O4GPU() {
CUDACHECK(cudaSetDevice(_wDev));
if(1){
if (_z)
cudaFree(_z);
if (_zt)
cudaFree(_zt);
if (_xp)
cudaFree(_xp);
if (_trainPredsp)
cudaFree(_trainPredsp);
if (_validPredsp)
cudaFree(_validPredsp);
CUDA_CHECK_ERR();
}
_z = _zt = _xp = _trainPredsp = _validPredsp = 0;
#ifdef USE_NCCL2
for(int i=0; i<_nDev; ++i)
ncclCommDestroy(_comms[i]);
free(_comms);
#endif
if (_x)
delete[] _x;
if (_y)
delete[] _y;
if (_mu)
delete[] _mu;
if (_lambda)
delete[] _lambda;
if (_trainPreds)
delete[] _trainPreds;
if (_validPreds)
delete[] _validPreds;
_x = _y = _mu = _lambda = _trainPreds = _validPreds = 0;
}
// Explicit template instantiation.
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE==1
template class H2O4GPU<double, MatrixDense<double>,
ProjectorDirect<double, MatrixDense<double> > > ;
template class H2O4GPU<double, MatrixDense<double>,
ProjectorCgls<double, MatrixDense<double> > > ;
template class H2O4GPU<double, MatrixSparse<double>,
ProjectorCgls<double, MatrixSparse<double> > > ;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE==1
template class H2O4GPU<float, MatrixDense<float>,
ProjectorDirect<float, MatrixDense<float> > > ;
template class H2O4GPU<float, MatrixDense<float>,
ProjectorCgls<float, MatrixDense<float> > > ;
template class H2O4GPU<float, MatrixSparse<float>,
ProjectorCgls<float, MatrixSparse<float> > > ;
#endif
} // namespace h2o4gpu
|
53f686681306b484c6b6210b87fb2984b4b4bee1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ---------------------------------------------------------------------------
// File: cifar10_vgg.cu
// VGG-Net BNN inference source file for CIFAR10.
// ---------------------------------------------------------------------------
// See our arXiv paper for detail: https://arxiv.org/abs/2006.16578
// Ang Li, Scientist, Pacific Northwest National Laboratory(PNNL), U.S.
// Homepage: http://www.angliphd.com
// GitHub repo: http://www.github.com/pnnl/TCBNN
// PNNL-IPID: 31925-E, ECCN: EAR99, IR: PNNL-SA-152850
// BSD Lincese.
// Richland, 99352, WA, USA. June-30-2020.
// ---------------------------------------------------------------------------
#include <stdio.h>
#include <assert.h>
#include <sys/time.h>
#include <iostream>
#include <string>
#include <hip/hip_cooperative_groups.h>
#include <iostream>
#include <fstream>
#include <vector>
#include "utility.h"
#include "param.h"
#include "kernel_hip.cuh"
#include "data.h"
using namespace cooperative_groups;
using namespace std;
#ifdef NEWFMT
__global__ void vggnet128(
InConv128LayerParam* bconv1,
Conv128LayerParam* bconv2,
Conv128LayerParam* bconv3,
Conv128LayerParam* bconv4,
Conv128LayerParam* bconv5,
Conv128LayerParam* bconv6,
Fc128LayerParam* bfc1,
Fc128LayerParam* bfc2,
Out128LayerParam* bout)
{
//SET_KERNEL_TIMER;
grid_group grid = this_grid();
//========= Conv1 ============
InConv128LayerFMT(bconv1);
grid.sync();
//TICK_KERNEL_TIMER(bconv1);
//========= Conv2 ============
Conv128LayerFMT(bconv2);
grid.sync();
//TICK_KERNEL_TIMER(bconv2);
//========= Conv3 ============
Conv128LayerFMT(bconv3);
grid.sync();
//TICK_KERNEL_TIMER(bconv3);
//========= Conv4 ============
Conv128LayerFMT(bconv4);
grid.sync();
//TICK_KERNEL_TIMER(bconv4);
//========= Conv5 ============
Conv128LayerFMT(bconv5);
grid.sync();
//TICK_KERNEL_TIMER(bconv5);
//========= Conv6 ============
Conv128LayerFMT(bconv6);
grid.sync();
//TICK_KERNEL_TIMER(bconv6);
//========= Fc1 ============
Fc128LayerFMT(bfc1);
grid.sync();
//TICK_KERNEL_TIMER(bfc1);
//========= Fc2 ============
Fc128LayerFMT(bfc2);
grid.sync();
//TICK_KERNEL_TIMER(bfc2);
////========== Output ===========
Out128LayerFMT(bout);
//grid.sync();
//TICK_KERNEL_TIMER(bout);
}
#else
__global__ void vggnet128(
InConv128LayerParam* bconv1,
Conv128LayerParam* bconv2,
Conv128LayerParam* bconv3,
Conv128LayerParam* bconv4,
Conv128LayerParam* bconv5,
Conv128LayerParam* bconv6,
Fc128LayerParam* bfc1,
Fc128LayerParam* bfc2,
Out128LayerParam* bout)
{
grid_group grid = this_grid();
//SET_KERNEL_TIMER;
//========= Conv1 ============
InConv128Layer(bconv1);
grid.sync();
//TICK_KERNEL_TIMER(bconv1);
//========= Conv2 ============
Conv128Layer(bconv2);
grid.sync();
//TICK_KERNEL_TIMER(bconv2);
//========= Conv3 ============
Conv128Layer(bconv3);
grid.sync();
//TICK_KERNEL_TIMER(bconv3);
//========= Conv4 ============
Conv128Layer(bconv4);
grid.sync();
//TICK_KERNEL_TIMER(bconv4);
//========= Conv5 ============
Conv128Layer(bconv5);
grid.sync();
//TICK_KERNEL_TIMER(bconv5);
//========= Conv6 ============
Conv128Layer(bconv6);
grid.sync();
//TICK_KERNEL_TIMER(bconv6);
//========= Fc1 ============
Fc128Layer(bfc1);
grid.sync();
//TICK_KERNEL_TIMER(bfc1);
//========= Fc2 ============
Fc128Layer(bfc2);
grid.sync();
//TICK_KERNEL_TIMER(bfc2);
////========== Output ===========
Out128Layer(bout);
//TICK_KERNEL_TIMER(bout);
}
#endif
int main()
{
int dev = 0;
hipSetDevice(dev);
const unsigned batch = 512;
const unsigned output_size = 10;
const unsigned image_height = 32;
const unsigned image_width = 32;
const unsigned image_channel = 3;
const unsigned filter_height = 3;
const unsigned filter_width = 3;
const unsigned n_hidden = 1024;
//=============== Get Input and Label =================
float* images = (float*)malloc(batch*image_height*image_width*image_channel*sizeof(float));
unsigned* image_labels = (unsigned*)malloc(batch*sizeof(unsigned));
string cifar10_dir = "/home/lian599/data/cifar10c/test_batch.bin";
read_CIFAR10_normalized(cifar10_dir, images, image_labels, batch);
//================ Get Weight =================
//FILE* config_file = fopen("./cifar10.config","r");
FILE* config_file = fopen("./vgg_cifar10.csv","r");
//================ Set Network =================
//Bconv1 Layer
InConv128LayerParam* bconv1 = new InConv128LayerParam("Conv1", image_height, image_width,
filter_height, filter_width, 3, 128, batch);
InConv128LayerParam* bconv1_gpu = bconv1->initialize(images, config_file);
//Bconv2 Layer
Conv128LayerParam* bconv2 = new Conv128LayerParam("Conv2", bconv1->output_height,
bconv1->output_width, filter_height, filter_width, 128, 128, batch, 1, 1,
true, 2, 2, false);
Conv128LayerParam* bconv2_gpu = bconv2->initialize(config_file, bconv1->get_output_gpu());
//Bconv3 Layer
Conv128LayerParam* bconv3 = new Conv128LayerParam("Conv3", bconv2->output_height,
bconv2->output_width, filter_height, filter_width, 128, 256, batch);
Conv128LayerParam* bconv3_gpu = bconv3->initialize(config_file, bconv2->get_output_gpu());
//Bconv4 Layer
Conv128LayerParam* bconv4 = new Conv128LayerParam("Conv4", bconv3->output_height,
bconv3->output_width, filter_height, filter_width, 256, 256, batch, 1, 1,
true, 2, 2, false);
Conv128LayerParam* bconv4_gpu = bconv4->initialize(config_file, bconv3->get_output_gpu());
//Bconv5 Layer
Conv128LayerParam* bconv5 = new Conv128LayerParam("Conv5", bconv4->output_height,
bconv4->output_width, filter_height, filter_width, 256, 512, batch);
Conv128LayerParam* bconv5_gpu = bconv5->initialize(config_file, bconv4->get_output_gpu());
//Bconv6 Layer
Conv128LayerParam* bconv6 = new Conv128LayerParam("Conv6", bconv5->output_height,
bconv5->output_width, filter_height, filter_width, 512, 512, batch, 1, 1,
true, 2, 2, true);
Conv128LayerParam* bconv6_gpu = bconv6->initialize(config_file, bconv5->get_output_gpu());
//Fc1 Layer
Fc128LayerParam* bfc1 = new Fc128LayerParam("Fc1", batch, (bconv6->output_height)
*(bconv6->output_width)*512, n_hidden);
Fc128LayerParam* bfc1_gpu = bfc1->initialize(config_file, bconv6->get_output_gpu());
//Fc2 Layer
Fc128LayerParam* bfc2 = new Fc128LayerParam("Fc2", batch, n_hidden, n_hidden);
Fc128LayerParam* bfc2_gpu = bfc2->initialize(config_file, bfc1->get_output_gpu());
//Out Layer
Out128LayerParam* bout = new Out128LayerParam("Fout", batch, n_hidden, output_size);
Out128LayerParam* bout_gpu = bout->initialize(config_file, bfc2->get_output_gpu());
//================ Setup Kernel =================
int numThreads = 1024;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
int numBlocksPerSm;
//int shared_memory = 512*sizeof(int)*32;
int shared_memory = 256*sizeof(int)*32;
hipFuncSetAttribute(vggnet128, hipFuncAttributeMaxDynamicSharedMemorySize, shared_memory);
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, vggnet128, numThreads, shared_memory);
//printf("\n========= blk:%d ==========\n",numBlocksPerSm);
void* args[] = {&bconv1_gpu, &bconv2_gpu, &bconv3_gpu, &bconv4_gpu, &bconv5_gpu, &bconv6_gpu,
&bfc1_gpu, &bfc2_gpu, &bout_gpu};
START_TIMER;
hipLaunchCooperativeKernel((void*)vggnet128, numBlocksPerSm*deviceProp.multiProcessorCount,
numThreads, args, shared_memory);
STOP_TIMER;
CUDA_CHECK_KERNEL();
/*
float* out = bfc1->download_full_output();
for (int i=65536; i<65536+256; i++)
//for (int i=8192; i<8192+256; i++)
{
printf("%.f ", out[i]);
if ((i+1)%16==0) printf("\n");
}
printf("\n===%f===\n", bout->bn_scale[0]);
*/
/*
float* ss = bconv1->download_full_output();
int a = 0;
int b = 100;
int max_width = 4;
for (int i=a; i<b; i++)
{
printf("%*.0f ",max_width, ss[i]);
if ( (i-a+1)%18 == 0)
printf("\n");
}
printf("\n");
*/
//================ Output =================
float* output = bout->download_output();
//validate_prediction(output, image_labels, output_size, batch);
//for (int i=0; i<256; i++)
//{
//printf("%f ",output[i]);
//if ((i+1)%10==0) printf("\n");
//}
delete bconv1;
delete bconv2;
delete bconv3;
delete bconv4;
delete bconv5;
delete bconv6;
delete bfc1;
delete bfc2;
delete bout;
return 0;
}
| 53f686681306b484c6b6210b87fb2984b4b4bee1.cu | // ---------------------------------------------------------------------------
// File: cifar10_vgg.cu
// VGG-Net BNN inference source file for CIFAR10.
// ---------------------------------------------------------------------------
// See our arXiv paper for detail: https://arxiv.org/abs/2006.16578
// Ang Li, Scientist, Pacific Northwest National Laboratory(PNNL), U.S.
// Homepage: http://www.angliphd.com
// GitHub repo: http://www.github.com/pnnl/TCBNN
// PNNL-IPID: 31925-E, ECCN: EAR99, IR: PNNL-SA-152850
// BSD Lincese.
// Richland, 99352, WA, USA. June-30-2020.
// ---------------------------------------------------------------------------
#include <stdio.h>
#include <assert.h>
#include <sys/time.h>
#include <iostream>
#include <string>
#include <cooperative_groups.h>
#include <iostream>
#include <fstream>
#include <vector>
#include "utility.h"
#include "param.h"
#include "kernel.cuh"
#include "data.h"
using namespace cooperative_groups;
using namespace std;
#ifdef NEWFMT
__global__ void vggnet128(
InConv128LayerParam* bconv1,
Conv128LayerParam* bconv2,
Conv128LayerParam* bconv3,
Conv128LayerParam* bconv4,
Conv128LayerParam* bconv5,
Conv128LayerParam* bconv6,
Fc128LayerParam* bfc1,
Fc128LayerParam* bfc2,
Out128LayerParam* bout)
{
//SET_KERNEL_TIMER;
grid_group grid = this_grid();
//========= Conv1 ============
InConv128LayerFMT(bconv1);
grid.sync();
//TICK_KERNEL_TIMER(bconv1);
//========= Conv2 ============
Conv128LayerFMT(bconv2);
grid.sync();
//TICK_KERNEL_TIMER(bconv2);
//========= Conv3 ============
Conv128LayerFMT(bconv3);
grid.sync();
//TICK_KERNEL_TIMER(bconv3);
//========= Conv4 ============
Conv128LayerFMT(bconv4);
grid.sync();
//TICK_KERNEL_TIMER(bconv4);
//========= Conv5 ============
Conv128LayerFMT(bconv5);
grid.sync();
//TICK_KERNEL_TIMER(bconv5);
//========= Conv6 ============
Conv128LayerFMT(bconv6);
grid.sync();
//TICK_KERNEL_TIMER(bconv6);
//========= Fc1 ============
Fc128LayerFMT(bfc1);
grid.sync();
//TICK_KERNEL_TIMER(bfc1);
//========= Fc2 ============
Fc128LayerFMT(bfc2);
grid.sync();
//TICK_KERNEL_TIMER(bfc2);
////========== Output ===========
Out128LayerFMT(bout);
//grid.sync();
//TICK_KERNEL_TIMER(bout);
}
#else
__global__ void vggnet128(
InConv128LayerParam* bconv1,
Conv128LayerParam* bconv2,
Conv128LayerParam* bconv3,
Conv128LayerParam* bconv4,
Conv128LayerParam* bconv5,
Conv128LayerParam* bconv6,
Fc128LayerParam* bfc1,
Fc128LayerParam* bfc2,
Out128LayerParam* bout)
{
grid_group grid = this_grid();
//SET_KERNEL_TIMER;
//========= Conv1 ============
InConv128Layer(bconv1);
grid.sync();
//TICK_KERNEL_TIMER(bconv1);
//========= Conv2 ============
Conv128Layer(bconv2);
grid.sync();
//TICK_KERNEL_TIMER(bconv2);
//========= Conv3 ============
Conv128Layer(bconv3);
grid.sync();
//TICK_KERNEL_TIMER(bconv3);
//========= Conv4 ============
Conv128Layer(bconv4);
grid.sync();
//TICK_KERNEL_TIMER(bconv4);
//========= Conv5 ============
Conv128Layer(bconv5);
grid.sync();
//TICK_KERNEL_TIMER(bconv5);
//========= Conv6 ============
Conv128Layer(bconv6);
grid.sync();
//TICK_KERNEL_TIMER(bconv6);
//========= Fc1 ============
Fc128Layer(bfc1);
grid.sync();
//TICK_KERNEL_TIMER(bfc1);
//========= Fc2 ============
Fc128Layer(bfc2);
grid.sync();
//TICK_KERNEL_TIMER(bfc2);
////========== Output ===========
Out128Layer(bout);
//TICK_KERNEL_TIMER(bout);
}
#endif
int main()
{
int dev = 0;
cudaSetDevice(dev);
const unsigned batch = 512;
const unsigned output_size = 10;
const unsigned image_height = 32;
const unsigned image_width = 32;
const unsigned image_channel = 3;
const unsigned filter_height = 3;
const unsigned filter_width = 3;
const unsigned n_hidden = 1024;
//=============== Get Input and Label =================
float* images = (float*)malloc(batch*image_height*image_width*image_channel*sizeof(float));
unsigned* image_labels = (unsigned*)malloc(batch*sizeof(unsigned));
string cifar10_dir = "/home/lian599/data/cifar10c/test_batch.bin";
read_CIFAR10_normalized(cifar10_dir, images, image_labels, batch);
//================ Get Weight =================
//FILE* config_file = fopen("./cifar10.config","r");
FILE* config_file = fopen("./vgg_cifar10.csv","r");
//================ Set Network =================
//Bconv1 Layer
InConv128LayerParam* bconv1 = new InConv128LayerParam("Conv1", image_height, image_width,
filter_height, filter_width, 3, 128, batch);
InConv128LayerParam* bconv1_gpu = bconv1->initialize(images, config_file);
//Bconv2 Layer
Conv128LayerParam* bconv2 = new Conv128LayerParam("Conv2", bconv1->output_height,
bconv1->output_width, filter_height, filter_width, 128, 128, batch, 1, 1,
true, 2, 2, false);
Conv128LayerParam* bconv2_gpu = bconv2->initialize(config_file, bconv1->get_output_gpu());
//Bconv3 Layer
Conv128LayerParam* bconv3 = new Conv128LayerParam("Conv3", bconv2->output_height,
bconv2->output_width, filter_height, filter_width, 128, 256, batch);
Conv128LayerParam* bconv3_gpu = bconv3->initialize(config_file, bconv2->get_output_gpu());
//Bconv4 Layer
Conv128LayerParam* bconv4 = new Conv128LayerParam("Conv4", bconv3->output_height,
bconv3->output_width, filter_height, filter_width, 256, 256, batch, 1, 1,
true, 2, 2, false);
Conv128LayerParam* bconv4_gpu = bconv4->initialize(config_file, bconv3->get_output_gpu());
//Bconv5 Layer
Conv128LayerParam* bconv5 = new Conv128LayerParam("Conv5", bconv4->output_height,
bconv4->output_width, filter_height, filter_width, 256, 512, batch);
Conv128LayerParam* bconv5_gpu = bconv5->initialize(config_file, bconv4->get_output_gpu());
//Bconv6 Layer
Conv128LayerParam* bconv6 = new Conv128LayerParam("Conv6", bconv5->output_height,
bconv5->output_width, filter_height, filter_width, 512, 512, batch, 1, 1,
true, 2, 2, true);
Conv128LayerParam* bconv6_gpu = bconv6->initialize(config_file, bconv5->get_output_gpu());
//Fc1 Layer
Fc128LayerParam* bfc1 = new Fc128LayerParam("Fc1", batch, (bconv6->output_height)
*(bconv6->output_width)*512, n_hidden);
Fc128LayerParam* bfc1_gpu = bfc1->initialize(config_file, bconv6->get_output_gpu());
//Fc2 Layer
Fc128LayerParam* bfc2 = new Fc128LayerParam("Fc2", batch, n_hidden, n_hidden);
Fc128LayerParam* bfc2_gpu = bfc2->initialize(config_file, bfc1->get_output_gpu());
//Out Layer
Out128LayerParam* bout = new Out128LayerParam("Fout", batch, n_hidden, output_size);
Out128LayerParam* bout_gpu = bout->initialize(config_file, bfc2->get_output_gpu());
//================ Setup Kernel =================
int numThreads = 1024;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
int numBlocksPerSm;
//int shared_memory = 512*sizeof(int)*32;
int shared_memory = 256*sizeof(int)*32;
cudaFuncSetAttribute(vggnet128, cudaFuncAttributeMaxDynamicSharedMemorySize, shared_memory);
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, vggnet128, numThreads, shared_memory);
//printf("\n========= blk:%d ==========\n",numBlocksPerSm);
void* args[] = {&bconv1_gpu, &bconv2_gpu, &bconv3_gpu, &bconv4_gpu, &bconv5_gpu, &bconv6_gpu,
&bfc1_gpu, &bfc2_gpu, &bout_gpu};
START_TIMER;
cudaLaunchCooperativeKernel((void*)vggnet128, numBlocksPerSm*deviceProp.multiProcessorCount,
numThreads, args, shared_memory);
STOP_TIMER;
CUDA_CHECK_KERNEL();
/*
float* out = bfc1->download_full_output();
for (int i=65536; i<65536+256; i++)
//for (int i=8192; i<8192+256; i++)
{
printf("%.f ", out[i]);
if ((i+1)%16==0) printf("\n");
}
printf("\n===%f===\n", bout->bn_scale[0]);
*/
/*
float* ss = bconv1->download_full_output();
int a = 0;
int b = 100;
int max_width = 4;
for (int i=a; i<b; i++)
{
printf("%*.0f ",max_width, ss[i]);
if ( (i-a+1)%18 == 0)
printf("\n");
}
printf("\n");
*/
//================ Output =================
float* output = bout->download_output();
//validate_prediction(output, image_labels, output_size, batch);
//for (int i=0; i<256; i++)
//{
//printf("%f ",output[i]);
//if ((i+1)%10==0) printf("\n");
//}
delete bconv1;
delete bconv2;
delete bconv3;
delete bconv4;
delete bconv5;
delete bconv6;
delete bfc1;
delete bfc2;
delete bout;
return 0;
}
|
ad627b142ded9d5a1cdad7eebe2e104e8229752f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/pad_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void PadImageConstNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T value, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = ph - pad_t;
const int w = pw - pad_l;
top_data[index] = (h < 0 || w < 0 || h >= height || w >= width)
? value
: bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageReflectNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
top_data[index] = bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageEdgeNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
top_data[index] = bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageConstNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T value, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = ph - pad_t;
const int w = pw - pad_l;
top_data[index] = (h < 0 || w < 0 || h >= height || w >= width)
? value
: bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageReflectNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
top_data[index] =
bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageEdgeNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
top_data[index] =
bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageGradientConstNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / width;
const int pw = index % width + pad_l;
const int ph = nc % height + pad_t;
nc /= height;
bottom_diff[index] =
top_diff[(nc * padded_height + ph) * padded_width + pw];
}
}
template <typename T>
__global__ void PadImageGradientReflectNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
atomicAdd(&bottom_diff[(nc * height + h) * width + w], top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientEdgeNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
atomicAdd(&bottom_diff[(nc * height + h) * width + w], top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientConstNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % width + pad_l;
n /= width;
const int ph = n % height + pad_t;
n /= height;
bottom_diff[index] =
top_diff[((n * padded_height + ph) * padded_width + pw) * channels + c];
}
}
template <typename T>
__global__ void PadImageGradientReflectNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
atomicAdd(
&bottom_diff[((n * height + h) * width + w) * channels + c],
top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientEdgeNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
atomicAdd(
&bottom_diff[((n * height + h) * width + w) * channels + c],
top_diff[index]);
}
}
} // namespace
template <>
bool PadImageOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
auto* Y = Output(0);
const int num = X.dim32(0);
const int channels = X.dim32(1);
const int height = X.dim32(2);
const int width = X.dim32(3);
ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, channels);
const int output_size = Y->size();
const int padded_height = Y->dim32(2);
const int padded_width = Y->dim32(3);
const float* Xdata = X.data<float>();
float* Ydata = Y->mutable_data<float>();
switch (mode_) {
case PadMode::CONSTANT:
hipLaunchKernelGGL(( PadImageConstNCHW<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
value_,
Ydata);
break;
case PadMode::REFLECT:
hipLaunchKernelGGL(( PadImageReflectNCHW<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
case PadMode::EDGE:
hipLaunchKernelGGL(( PadImageEdgeNCHW<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
}
return true;
}
template<>
bool PadImageOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
auto* Y = Output(0);
const int num = X.dim32(0);
const int height = X.dim32(1);
const int width = X.dim32(2);
const int channels = X.dim32(3);
ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, channels);
const int output_size = Y->size();
const int padded_height = Y->dim32(1);
const int padded_width = Y->dim32(2);
const float* Xdata = X.data<float>();
float* Ydata = Y->mutable_data<float>();
switch (mode_) {
case PadMode::CONSTANT:
hipLaunchKernelGGL(( PadImageConstNHWC<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
value_,
Ydata);
break;
case PadMode::REFLECT:
hipLaunchKernelGGL(( PadImageReflectNHWC<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
case PadMode::EDGE:
hipLaunchKernelGGL(( PadImageEdgeNHWC<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
}
return true;
}
template<>
bool PadImageGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& dY = Input(0);
auto* dX = Output(0);
dX->Resize(
dY.dim32(0),
dY.dim32(1),
dY.dim32(2) - pad_t() - pad_b(),
dY.dim32(3) - pad_l() - pad_r());
const int input_size = dY.size();
const int padded_height = dY.dim32(2);
const int padded_width = dY.dim32(3);
const int output_size = dX->size();
const int num = dX->dim32(0);
const int channels = dX->dim32(1);
const int height = dX->dim32(2);
const int width = dX->dim32(3);
const float* dYdata = dY.data<float>();
float* dXdata = dX->mutable_data<float>();
math::Set<float, CUDAContext>(output_size, 0, dXdata, &context_);
switch (mode_) {
case PadMode::CONSTANT:
hipLaunchKernelGGL(( PadImageGradientConstNCHW<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::REFLECT:
hipLaunchKernelGGL(( PadImageGradientReflectNCHW<float>),
dim3(CAFFE_GET_BLOCKS(input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::EDGE:
hipLaunchKernelGGL(( PadImageGradientEdgeNCHW<float>),
dim3(CAFFE_GET_BLOCKS(input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
}
return true;
}
template<>
bool PadImageGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& dY = Input(0);
auto* dX = Output(0);
dX->Resize(
dY.dim32(0),
dY.dim32(1) - pad_t() - pad_b(),
dY.dim32(2) - pad_l() - pad_r(),
dY.dim32(3));
const int input_size = dY.size();
const int padded_height = dY.dim32(1);
const int padded_width = dY.dim32(2);
const int output_size = dX->size();
const int num = dX->dim32(0);
const int height = dX->dim32(1);
const int width = dX->dim32(2);
const int channels = dX->dim32(3);
const float* dYdata = dY.data<float>();
float* dXdata = dX->mutable_data<float>();
math::Set<float, CUDAContext>(output_size, 0, dXdata, &context_);
switch (mode_) {
case PadMode::CONSTANT:
hipLaunchKernelGGL(( PadImageGradientConstNHWC<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::REFLECT:
hipLaunchKernelGGL(( PadImageGradientReflectNHWC<float>),
dim3(CAFFE_GET_BLOCKS(input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::EDGE:
hipLaunchKernelGGL(( PadImageGradientEdgeNHWC<float>),
dim3(CAFFE_GET_BLOCKS(input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
}
return true;
}
REGISTER_CUDA_OPERATOR(PadImage, PadImageOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(PadImageGradient,
PadImageGradientOp<float, CUDAContext>);
} // namespace caffe2
| ad627b142ded9d5a1cdad7eebe2e104e8229752f.cu | #include <algorithm>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/pad_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void PadImageConstNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T value, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = ph - pad_t;
const int w = pw - pad_l;
top_data[index] = (h < 0 || w < 0 || h >= height || w >= width)
? value
: bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageReflectNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
top_data[index] = bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageEdgeNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
top_data[index] = bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageConstNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T value, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = ph - pad_t;
const int w = pw - pad_l;
top_data[index] = (h < 0 || w < 0 || h >= height || w >= width)
? value
: bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageReflectNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
top_data[index] =
bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageEdgeNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
top_data[index] =
bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageGradientConstNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / width;
const int pw = index % width + pad_l;
const int ph = nc % height + pad_t;
nc /= height;
bottom_diff[index] =
top_diff[(nc * padded_height + ph) * padded_width + pw];
}
}
template <typename T>
__global__ void PadImageGradientReflectNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
atomicAdd(&bottom_diff[(nc * height + h) * width + w], top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientEdgeNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
atomicAdd(&bottom_diff[(nc * height + h) * width + w], top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientConstNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % width + pad_l;
n /= width;
const int ph = n % height + pad_t;
n /= height;
bottom_diff[index] =
top_diff[((n * padded_height + ph) * padded_width + pw) * channels + c];
}
}
template <typename T>
__global__ void PadImageGradientReflectNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
atomicAdd(
&bottom_diff[((n * height + h) * width + w) * channels + c],
top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientEdgeNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
atomicAdd(
&bottom_diff[((n * height + h) * width + w) * channels + c],
top_diff[index]);
}
}
} // namespace
template <>
bool PadImageOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
auto* Y = Output(0);
const int num = X.dim32(0);
const int channels = X.dim32(1);
const int height = X.dim32(2);
const int width = X.dim32(3);
ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, channels);
const int output_size = Y->size();
const int padded_height = Y->dim32(2);
const int padded_width = Y->dim32(3);
const float* Xdata = X.data<float>();
float* Ydata = Y->mutable_data<float>();
switch (mode_) {
case PadMode::CONSTANT:
PadImageConstNCHW<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
value_,
Ydata);
break;
case PadMode::REFLECT:
PadImageReflectNCHW<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
case PadMode::EDGE:
PadImageEdgeNCHW<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
}
return true;
}
template<>
bool PadImageOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
auto* Y = Output(0);
const int num = X.dim32(0);
const int height = X.dim32(1);
const int width = X.dim32(2);
const int channels = X.dim32(3);
ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, channels);
const int output_size = Y->size();
const int padded_height = Y->dim32(1);
const int padded_width = Y->dim32(2);
const float* Xdata = X.data<float>();
float* Ydata = Y->mutable_data<float>();
switch (mode_) {
case PadMode::CONSTANT:
PadImageConstNHWC<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
value_,
Ydata);
break;
case PadMode::REFLECT:
PadImageReflectNHWC<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
case PadMode::EDGE:
PadImageEdgeNHWC<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
}
return true;
}
template<>
bool PadImageGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& dY = Input(0);
auto* dX = Output(0);
dX->Resize(
dY.dim32(0),
dY.dim32(1),
dY.dim32(2) - pad_t() - pad_b(),
dY.dim32(3) - pad_l() - pad_r());
const int input_size = dY.size();
const int padded_height = dY.dim32(2);
const int padded_width = dY.dim32(3);
const int output_size = dX->size();
const int num = dX->dim32(0);
const int channels = dX->dim32(1);
const int height = dX->dim32(2);
const int width = dX->dim32(3);
const float* dYdata = dY.data<float>();
float* dXdata = dX->mutable_data<float>();
math::Set<float, CUDAContext>(output_size, 0, dXdata, &context_);
switch (mode_) {
case PadMode::CONSTANT:
PadImageGradientConstNCHW<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::REFLECT:
PadImageGradientReflectNCHW<float><<<
CAFFE_GET_BLOCKS(input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::EDGE:
PadImageGradientEdgeNCHW<float><<<
CAFFE_GET_BLOCKS(input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
}
return true;
}
template<>
bool PadImageGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& dY = Input(0);
auto* dX = Output(0);
dX->Resize(
dY.dim32(0),
dY.dim32(1) - pad_t() - pad_b(),
dY.dim32(2) - pad_l() - pad_r(),
dY.dim32(3));
const int input_size = dY.size();
const int padded_height = dY.dim32(1);
const int padded_width = dY.dim32(2);
const int output_size = dX->size();
const int num = dX->dim32(0);
const int height = dX->dim32(1);
const int width = dX->dim32(2);
const int channels = dX->dim32(3);
const float* dYdata = dY.data<float>();
float* dXdata = dX->mutable_data<float>();
math::Set<float, CUDAContext>(output_size, 0, dXdata, &context_);
switch (mode_) {
case PadMode::CONSTANT:
PadImageGradientConstNHWC<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::REFLECT:
PadImageGradientReflectNHWC<float><<<
CAFFE_GET_BLOCKS(input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::EDGE:
PadImageGradientEdgeNHWC<float><<<
CAFFE_GET_BLOCKS(input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
}
return true;
}
REGISTER_CUDA_OPERATOR(PadImage, PadImageOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(PadImageGradient,
PadImageGradientOp<float, CUDAContext>);
} // namespace caffe2
|
11746445f7fcc0b8cd248bd634e651ece6cf9b3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/utils/cuda.hpp"
#ifdef LBANN_HAS_GPU
namespace lbann {
namespace cuda {
// -------------------------------------------------------------
// Utilities for CUDA events
// -------------------------------------------------------------
event_wrapper::event_wrapper() : m_event(nullptr), m_stream(0) {
CHECK_CUDA(hipEventCreateWithFlags(&m_event, hipEventDisableTiming));
}
event_wrapper::event_wrapper(const event_wrapper& other)
: m_event(nullptr), m_stream(other.m_stream) {
CHECK_CUDA(hipEventCreateWithFlags(&m_event, hipEventDisableTiming));
if (!other.query()) { record(m_stream); }
}
event_wrapper& event_wrapper::operator=(const event_wrapper& other) {
m_stream = other.m_stream;
if (!other.query()) { record(m_stream); }
return *this;
}
event_wrapper::~event_wrapper() {
hipEventDestroy(m_event);
}
void event_wrapper::record(hipStream_t stream) {
m_stream = stream;
CHECK_CUDA(hipEventRecord(m_event, m_stream));
}
bool event_wrapper::query() const {
const auto& status = hipEventQuery(m_event);
switch (status) {
case hipSuccess: return true;
case hipErrorNotReady: return false;
default:
CHECK_CUDA(status);
return false;
}
}
void event_wrapper::synchronize() {
CHECK_CUDA(hipEventSynchronize(m_event));
}
hipEvent_t& event_wrapper::get_event() { return m_event; }
// -------------------------------------------------------------
// Helper functions for tensor operations
// -------------------------------------------------------------
namespace {
using int4 = cuda::array<int, 4>;
/**
* Block dimensions: bdimx x bdimy x bdimz
*
* Grid dimensions: (dim[3] / bdimx) x (dim[2] / bdimy) x (dim[1] / bdimx)
*/
template <typename TensorDataType>
__global__ void copy_4d_kernel(
int4 dims,
const TensorDataType* __restrict__ input,
int4 input_strides,
TensorDataType* __restrict__ output,
int4 output_strides) {
// Indices
const auto& gidx = threadIdx.x + blockIdx.x * blockDim.x;
const auto& gidy = threadIdx.y + blockIdx.y * blockDim.y;
const auto& gidz = threadIdx.z + blockIdx.z * blockDim.z;
const auto& nthreadsx = gridDim.x * blockDim.x;
const auto& nthreadsy = gridDim.y * blockDim.y;
const auto& nthreadsz = gridDim.z * blockDim.z;
for (int i0=0; i0<dims[0]; ++i0) {
for (int i1=gidz; i1<dims[1]; i1+=nthreadsz) {
for (int i2=gidy; i2<dims[2]; i2+=nthreadsy) {
for (int i3=gidx; i3<dims[3]; i3+=nthreadsx) {
const auto& x = input[i0 * input_strides[0]
+ i1 * input_strides[1]
+ i2 * input_strides[2]
+ i3 * input_strides[3]];
auto& y = output[i0 * output_strides[0]
+ i1 * output_strides[1]
+ i2 * output_strides[2]
+ i3 * output_strides[3]];
y = x;
}
}
}
}
}
} // namespace <anon>
template <typename TensorDataType>
void copy_tensor(
hipStream_t stream,
const std::vector<size_t>& dims,
const TensorDataType* input,
const std::vector<size_t>& input_strides,
TensorDataType* output,
const std::vector<size_t>& output_strides) {
// Check inputs
if (dims.empty() || dims.size() > 4) {
LBANN_ERROR("invalid number of tensor dimensions (",dims.size(),")");
}
if (dims.size() != input_strides.size()) {
LBANN_ERROR(
"number of input strides (",input_strides.size(),") ",
"does not match number of tensor dimensions (",dims.size(),")");
}
if (dims.size() != output_strides.size()) {
LBANN_ERROR(
"number of output strides (",output_strides.size(),") ",
"does not match number of tensor dimensions (",dims.size(),")");
}
// Pad tensor dimensions to 4D
std::vector<int>
rdims(dims.rbegin(), dims.rend()),
input_rstrides(input_strides.rbegin(), input_strides.rend()),
output_rstrides(output_strides.rbegin(), output_strides.rend());
rdims.resize(4, 1);
input_rstrides.resize(4, input_rstrides.back());
output_rstrides.resize(4, output_rstrides.back());
// Launch CUDA kernel
const auto size = std::accumulate(
dims.begin(), dims.end(), 1, std::multiplies<int>());
if (size > 0) {
constexpr size_t block_size = 64;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
block_dims.y = 1;
block_dims.z = 1;
grid_dims.x = (rdims[0] + block_dims.x - 1) / block_dims.x;
grid_dims.y = (rdims[1] + block_dims.y - 1) / block_dims.y;
grid_dims.z = (rdims[2] + block_dims.z - 1) / block_dims.z;
grid_dims.y = El::Min(grid_dims.y, 65535);
grid_dims.z = El::Min(grid_dims.z, 65535);
hipLaunchKernelGGL(( copy_4d_kernel), dim3(grid_dims), dim3(block_dims), 0, stream,
{rdims[3], rdims[2], rdims[1], rdims[0]},
input,
{input_rstrides[3], input_rstrides[2],
input_rstrides[1], input_rstrides[0]},
output,
{output_rstrides[3], output_rstrides[2],
output_rstrides[1], output_rstrides[0]});
}
}
#if defined(LBANN_HAS_HALF) && defined(LBANN_HAS_GPU_HALF)
template <>
void copy_tensor<cpu_fp16>(
hipStream_t stream,
const std::vector<size_t>& dims,
const cpu_fp16* input,
const std::vector<size_t>& input_strides,
cpu_fp16* output,
const std::vector<size_t>& output_strides) {
copy_tensor<fp16>(
stream,
dims,
reinterpret_cast<const fp16*>(input),
input_strides,
reinterpret_cast<fp16*>(output),
output_strides);
}
#endif // defined(LBANN_HAS_HALF) && defined(LBANN_HAS_GPU_HALF)
// Explicit template instantiation
#define PROTO(T) \
template void copy_tensor<T>( \
hipStream_t stream, \
const std::vector<size_t>& dims, \
const T* input, \
const std::vector<size_t>& input_strides, \
T* output, \
const std::vector<size_t>& output_strides);
#define LBANN_INSTANTIATE_GPU_HALF
#define LBANN_INSTANTIATE_CPU_HALF
#include "lbann/macros/instantiate.hpp"
#undef PROTO
} // namespace cuda
} // namespace lbann
#endif // LBANN_HAS_GPU
| 11746445f7fcc0b8cd248bd634e651ece6cf9b3a.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/utils/cuda.hpp"
#ifdef LBANN_HAS_GPU
namespace lbann {
namespace cuda {
// -------------------------------------------------------------
// Utilities for CUDA events
// -------------------------------------------------------------
event_wrapper::event_wrapper() : m_event(nullptr), m_stream(0) {
CHECK_CUDA(cudaEventCreateWithFlags(&m_event, cudaEventDisableTiming));
}
event_wrapper::event_wrapper(const event_wrapper& other)
: m_event(nullptr), m_stream(other.m_stream) {
CHECK_CUDA(cudaEventCreateWithFlags(&m_event, cudaEventDisableTiming));
if (!other.query()) { record(m_stream); }
}
event_wrapper& event_wrapper::operator=(const event_wrapper& other) {
m_stream = other.m_stream;
if (!other.query()) { record(m_stream); }
return *this;
}
event_wrapper::~event_wrapper() {
cudaEventDestroy(m_event);
}
void event_wrapper::record(cudaStream_t stream) {
m_stream = stream;
CHECK_CUDA(cudaEventRecord(m_event, m_stream));
}
bool event_wrapper::query() const {
const auto& status = cudaEventQuery(m_event);
switch (status) {
case cudaSuccess: return true;
case cudaErrorNotReady: return false;
default:
CHECK_CUDA(status);
return false;
}
}
void event_wrapper::synchronize() {
CHECK_CUDA(cudaEventSynchronize(m_event));
}
cudaEvent_t& event_wrapper::get_event() { return m_event; }
// -------------------------------------------------------------
// Helper functions for tensor operations
// -------------------------------------------------------------
namespace {
using int4 = cuda::array<int, 4>;
/**
* Block dimensions: bdimx x bdimy x bdimz
*
* Grid dimensions: (dim[3] / bdimx) x (dim[2] / bdimy) x (dim[1] / bdimx)
*/
template <typename TensorDataType>
__global__ void copy_4d_kernel(
int4 dims,
const TensorDataType* __restrict__ input,
int4 input_strides,
TensorDataType* __restrict__ output,
int4 output_strides) {
// Indices
const auto& gidx = threadIdx.x + blockIdx.x * blockDim.x;
const auto& gidy = threadIdx.y + blockIdx.y * blockDim.y;
const auto& gidz = threadIdx.z + blockIdx.z * blockDim.z;
const auto& nthreadsx = gridDim.x * blockDim.x;
const auto& nthreadsy = gridDim.y * blockDim.y;
const auto& nthreadsz = gridDim.z * blockDim.z;
for (int i0=0; i0<dims[0]; ++i0) {
for (int i1=gidz; i1<dims[1]; i1+=nthreadsz) {
for (int i2=gidy; i2<dims[2]; i2+=nthreadsy) {
for (int i3=gidx; i3<dims[3]; i3+=nthreadsx) {
const auto& x = input[i0 * input_strides[0]
+ i1 * input_strides[1]
+ i2 * input_strides[2]
+ i3 * input_strides[3]];
auto& y = output[i0 * output_strides[0]
+ i1 * output_strides[1]
+ i2 * output_strides[2]
+ i3 * output_strides[3]];
y = x;
}
}
}
}
}
} // namespace <anon>
template <typename TensorDataType>
void copy_tensor(
cudaStream_t stream,
const std::vector<size_t>& dims,
const TensorDataType* input,
const std::vector<size_t>& input_strides,
TensorDataType* output,
const std::vector<size_t>& output_strides) {
// Check inputs
if (dims.empty() || dims.size() > 4) {
LBANN_ERROR("invalid number of tensor dimensions (",dims.size(),")");
}
if (dims.size() != input_strides.size()) {
LBANN_ERROR(
"number of input strides (",input_strides.size(),") ",
"does not match number of tensor dimensions (",dims.size(),")");
}
if (dims.size() != output_strides.size()) {
LBANN_ERROR(
"number of output strides (",output_strides.size(),") ",
"does not match number of tensor dimensions (",dims.size(),")");
}
// Pad tensor dimensions to 4D
std::vector<int>
rdims(dims.rbegin(), dims.rend()),
input_rstrides(input_strides.rbegin(), input_strides.rend()),
output_rstrides(output_strides.rbegin(), output_strides.rend());
rdims.resize(4, 1);
input_rstrides.resize(4, input_rstrides.back());
output_rstrides.resize(4, output_rstrides.back());
// Launch CUDA kernel
const auto size = std::accumulate(
dims.begin(), dims.end(), 1, std::multiplies<int>());
if (size > 0) {
constexpr size_t block_size = 64;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
block_dims.y = 1;
block_dims.z = 1;
grid_dims.x = (rdims[0] + block_dims.x - 1) / block_dims.x;
grid_dims.y = (rdims[1] + block_dims.y - 1) / block_dims.y;
grid_dims.z = (rdims[2] + block_dims.z - 1) / block_dims.z;
grid_dims.y = El::Min(grid_dims.y, 65535);
grid_dims.z = El::Min(grid_dims.z, 65535);
copy_4d_kernel<<<grid_dims, block_dims, 0, stream>>>(
{rdims[3], rdims[2], rdims[1], rdims[0]},
input,
{input_rstrides[3], input_rstrides[2],
input_rstrides[1], input_rstrides[0]},
output,
{output_rstrides[3], output_rstrides[2],
output_rstrides[1], output_rstrides[0]});
}
}
#if defined(LBANN_HAS_HALF) && defined(LBANN_HAS_GPU_HALF)
template <>
void copy_tensor<cpu_fp16>(
cudaStream_t stream,
const std::vector<size_t>& dims,
const cpu_fp16* input,
const std::vector<size_t>& input_strides,
cpu_fp16* output,
const std::vector<size_t>& output_strides) {
copy_tensor<fp16>(
stream,
dims,
reinterpret_cast<const fp16*>(input),
input_strides,
reinterpret_cast<fp16*>(output),
output_strides);
}
#endif // defined(LBANN_HAS_HALF) && defined(LBANN_HAS_GPU_HALF)
// Explicit template instantiation
#define PROTO(T) \
template void copy_tensor<T>( \
cudaStream_t stream, \
const std::vector<size_t>& dims, \
const T* input, \
const std::vector<size_t>& input_strides, \
T* output, \
const std::vector<size_t>& output_strides);
#define LBANN_INSTANTIATE_GPU_HALF
#define LBANN_INSTANTIATE_CPU_HALF
#include "lbann/macros/instantiate.hpp"
#undef PROTO
} // namespace cuda
} // namespace lbann
#endif // LBANN_HAS_GPU
|
0ad57e7240bcc246ba280ce46f4fd3fac6c0951d.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHBlas.h"
#include "THHGeneral.h"
#include "THHHalf.h"
float THCudaBlas_Sdot(THCState *state, int64_t n, float *x, int64_t incx, float *y, int64_t incy)
{
if (n == 1) {
incx = 1;
incy = 1;
}
if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) {
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
float result;
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasSdot(handle, i_n, x, i_incx, y, i_incy, &result));
return result;
}
THError("Cublas_Sdot only supports n, incx and incy "
"up to signed integer limits: %d", INT_MAX);
return 0;
}
double THCudaBlas_Ddot(THCState *state, int64_t n, double *x, int64_t incx, double *y, int64_t incy)
{
if (n == 1) {
incx = 1;
incy = 1;
}
if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) {
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
double result;
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasDdot(handle, i_n, x, i_incx, y, i_incy, &result));
return result;
}
THError("Cublas_Ddot only supports n, incx and incy "
"up to signed integer limits: %d", INT_MAX);
return 0;
}
#ifdef CUDA_HALF_TENSOR
half THCudaBlas_Hdot(THCState *state, int64_t n, half *x, int64_t incx, half *y, int64_t incy)
{
#if TORCH_HIP_VERSION >= 8000
if (n == 1) {
incx = 1;
incy = 1;
}
if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) {
half result;
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasDotEx_v2(handle, n,
x, HIP_R_16F, incx,
y, HIP_R_16F, incy,
&result, HIP_R_16F,
HIP_R_32F));
return result;
}
THError("Cublas_Hdot only supports n, incx and incy "
"up to signed integer limits: %d", INT_MAX);
return THC_float2half(0);
#else
THError("Cublas_Hdot requires CUDA 8.0+");
return THC_half2float(0);
#endif
}
#endif
/* Level 2 */
void THCudaBlas_Sgemv(THCState *state, char trans, int64_t m, int64_t n, float alpha, float *a, int64_t lda, float *x, int64_t incx, float beta, float *y, int64_t incy)
{
if(n == 1)
lda = m;
hipblasOperation_t op;
if (trans == 't') op = HIPBLAS_OP_T;
else if (trans == 'n') op = HIPBLAS_OP_N;
else if (trans == 'c') op = HIPBLAS_OP_C;
if( (m <= INT_MAX) && (n <= INT_MAX) &&
(lda > 0) && (lda <= INT_MAX) &&
(incx > 0) && (incx <= INT_MAX) &&
(incy > 0) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasSgemv(handle, op, i_m, i_n, &alpha, a, i_lda, x, i_incx, &beta, y, i_incy));
return;
}
THError("Cublas_Sgemv only supports m, n, lda, incx, incy"
"in the range 0 < [val] <= %d", INT_MAX);
}
void THCudaBlas_Dgemv(THCState *state, char trans, int64_t m, int64_t n, double alpha, double *a, int64_t lda, double *x, int64_t incx, double beta, double *y, int64_t incy)
{
if(n == 1)
lda = m;
hipblasOperation_t op;
if (trans == 't') op = HIPBLAS_OP_T;
else if (trans == 'n') op = HIPBLAS_OP_N;
else if (trans == 'c') op = HIPBLAS_OP_C;
if( (m <= INT_MAX) && (n <= INT_MAX) &&
(lda > 0) && (lda <= INT_MAX) &&
(incx > 0) && (incx <= INT_MAX) &&
(incy > 0) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasDgemv(handle, op, i_m, i_n, &alpha, a, i_lda, x, i_incx, &beta, y, i_incy));
return;
}
THError("Cublas_Dgemv only supports m, n, lda, incx, incy"
"in the range 0 < [val] <= %d", INT_MAX);
}
void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda)
{
if(n == 1)
lda = m;
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Sger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda)
{
if(n == 1)
lda = m;
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Dger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
hipblasOperation_t convertTransToCublasOperation(char trans) {
if (trans == 't') return HIPBLAS_OP_T;
else if (trans == 'n') return HIPBLAS_OP_N;
else if (trans == 'c') return HIPBLAS_OP_C;
else {
THError("trans must be one of: t, n, c");
return HIPBLAS_OP_T;
}
}
void adjustLd(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc)
{
int transa_ = ((transa == 't') || (transa == 'T'));
int transb_ = ((transb == 't') || (transb == 'T'));
if(n == 1)
*ldc = m;
if(transa_)
{
if(m == 1)
*lda = k;
}
else
{
if(k == 1)
*lda = m;
}
if(transb_)
{
if(k == 1)
*ldb = n;
}
else
{
if(n == 1)
*ldb = k;
}
}
/* Level 3 */
void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_lda = (int)lda;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasSgemm(handle, opa, opb, i_m, i_n, i_k, &alpha, a, i_lda, b, i_ldb, &beta, c, i_ldc));
return;
}
THError("Cublas_Sgemm only supports m, n, k, lda, ldb, ldc"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef CUDA_HALF_TENSOR
// In CUDA 8.0, definition of data types for sgemmex changed
#if TORCH_HIP_VERSION < 8000
# define HIP_R_16F HIPBLAS_DATA_HALF
#endif
void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, half alpha, half *a, int64_t lda, half *b, int64_t ldb, half beta, half *c, int64_t ldc)
{
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_lda = (int)lda;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
// Simulated Hgemm
float fAlpha = THC_half2float(alpha);
float fBeta = THC_half2float(beta);
#if TORCH_HIP_VERSION < 9000
THCublasCheck(cublasSgemmEx(handle, opa, opb,
i_m, i_n, i_k, &fAlpha,
a, HIP_R_16F, i_lda, b, HIP_R_16F,
i_ldb, &fBeta, c, HIP_R_16F, i_ldc));
#else
hipDeviceProp_t* prop = THCState_getCurrentDeviceProperties(state);
if (prop->major >= 5){
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
THCublasCheck(hipblasGemmEx(handle, opa, opb,
i_m, i_n, i_k, &fAlpha,
a, HIP_R_16F, i_lda, b, HIP_R_16F,
i_ldb, &fBeta, c, HIP_R_16F, i_ldc,
HIP_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
}else{
THCublasCheck(cublasSgemmEx(handle, opa, opb,
i_m, i_n, i_k, &fAlpha,
a, HIP_R_16F, i_lda, b, HIP_R_16F,
i_ldb, &fBeta, c, HIP_R_16F, i_ldc));
}
#endif
return;
}
THError("Cublas_Hgemm only supports m, n, k, lda, ldb, ldc"
"with th bound [val] <= %d", INT_MAX);
}
#endif
void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_lda = (int)lda;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasDgemm(handle, opa, opb, i_m, i_n, i_k, &alpha, a, i_lda, b, i_ldb, &beta, c, i_ldc));
return;
}
THError("Cublas_Dgemm only supports m, n, k, lda, ldb, ldc"
"with the bound [val] <= %d", INT_MAX);
}
void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb,
float beta, float *c[], int64_t ldc, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasSgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
}
#if TORCH_HIP_VERSION >= 8000
void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB,
float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasSgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
#endif
void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb,
double beta, double *c[], int64_t ldc, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasDgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
}
#if TORCH_HIP_VERSION >= 8000
void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB,
double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasDgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
#endif
/* Inverse */
void THCudaBlas_Sgetrf(THCState *state, int n, float **a, int lda, int *pivot, int *info, int batchSize) {
if( (n >= INT_MAX) || (lda >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Sgetrf only supports n, lda, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasSgetrfBatched(handle, n, a, lda, pivot, info, batchSize));
}
void THCudaBlas_Dgetrf(THCState *state, int n, double **a, int lda, int *pivot, int *info, int batchSize) {
if( (n >= INT_MAX) || (lda >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Dgetrf only supports n, lda, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasDgetrfBatched(handle, n, a, lda, pivot, info, batchSize));
}
THC_API void THCudaBlas_Sgetrs(THCState *state, char transa, int n, int nrhs, const float **a, int lda, int *pivot, float **b, int ldb, int *info, int batchSize)
{
if( (n >= INT_MAX) || (nrhs >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Dgetrs only supports n, nrhs, lda, ldb, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
// no need to adjust leading dimensions, since matrices are square
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasSgetrsBatched(handle, opa, n, nrhs, a, lda, pivot, b, ldb, info, batchSize));
}
THC_API void THCudaBlas_Dgetrs(THCState *state, char transa, int n, int nrhs, const double **a, int lda, int *pivot, double **b, int ldb, int *info, int batchSize)
{
if( (n >= INT_MAX) || (nrhs >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Dgetrs only supports n, nrhs, lda, ldb, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
// no need to adjust leading dimensions, since matrices are square
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasDgetrsBatched(handle, opa, n, nrhs, a, lda, pivot, b, ldb, info, batchSize));
}
void THCudaBlas_Sgetri(THCState *state, int n, const float **a, int lda, int *pivot, float **c, int ldc, int *info, int batchSize) {
if( (n >= INT_MAX) || (lda >= INT_MAX)|| (ldc >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Sgetri only supports n, lda, ldc, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasSgetriBatched(handle, n, a, lda, pivot, c, ldc, info, batchSize));
}
void THCudaBlas_Dgetri(THCState *state, int n, const double **a, int lda, int *pivot, double **c, int ldc, int *info, int batchSize) {
if( (n >= INT_MAX) || (lda >= INT_MAX)|| (ldc >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Dgetri only supports n, lda, ldc, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
hipblasHandle_t handle = THCState_getCurrentBlasHandle(state);
hipblasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(hipblasDgetriBatched(handle, n, a, lda, pivot, c, ldc, info, batchSize));
}
| 0ad57e7240bcc246ba280ce46f4fd3fac6c0951d.cu | #include "THCBlas.h"
#include "THCGeneral.h"
#include "THCHalf.h"
float THCudaBlas_Sdot(THCState *state, int64_t n, float *x, int64_t incx, float *y, int64_t incy)
{
if (n == 1) {
incx = 1;
incy = 1;
}
if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) {
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
float result;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSdot(handle, i_n, x, i_incx, y, i_incy, &result));
return result;
}
THError("Cublas_Sdot only supports n, incx and incy "
"up to signed integer limits: %d", INT_MAX);
return 0;
}
double THCudaBlas_Ddot(THCState *state, int64_t n, double *x, int64_t incx, double *y, int64_t incy)
{
if (n == 1) {
incx = 1;
incy = 1;
}
if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) {
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
double result;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDdot(handle, i_n, x, i_incx, y, i_incy, &result));
return result;
}
THError("Cublas_Ddot only supports n, incx and incy "
"up to signed integer limits: %d", INT_MAX);
return 0;
}
#ifdef CUDA_HALF_TENSOR
half THCudaBlas_Hdot(THCState *state, int64_t n, half *x, int64_t incx, half *y, int64_t incy)
{
#if CUDA_VERSION >= 8000
if (n == 1) {
incx = 1;
incy = 1;
}
if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) {
half result;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDotEx(handle, n,
x, CUDA_R_16F, incx,
y, CUDA_R_16F, incy,
&result, CUDA_R_16F,
CUDA_R_32F));
return result;
}
THError("Cublas_Hdot only supports n, incx and incy "
"up to signed integer limits: %d", INT_MAX);
return THC_float2half(0);
#else
THError("Cublas_Hdot requires CUDA 8.0+");
return THC_half2float(0);
#endif
}
#endif
/* Level 2 */
void THCudaBlas_Sgemv(THCState *state, char trans, int64_t m, int64_t n, float alpha, float *a, int64_t lda, float *x, int64_t incx, float beta, float *y, int64_t incy)
{
if(n == 1)
lda = m;
cublasOperation_t op;
if (trans == 't') op = CUBLAS_OP_T;
else if (trans == 'n') op = CUBLAS_OP_N;
else if (trans == 'c') op = CUBLAS_OP_C;
if( (m <= INT_MAX) && (n <= INT_MAX) &&
(lda > 0) && (lda <= INT_MAX) &&
(incx > 0) && (incx <= INT_MAX) &&
(incy > 0) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSgemv(handle, op, i_m, i_n, &alpha, a, i_lda, x, i_incx, &beta, y, i_incy));
return;
}
THError("Cublas_Sgemv only supports m, n, lda, incx, incy"
"in the range 0 < [val] <= %d", INT_MAX);
}
void THCudaBlas_Dgemv(THCState *state, char trans, int64_t m, int64_t n, double alpha, double *a, int64_t lda, double *x, int64_t incx, double beta, double *y, int64_t incy)
{
if(n == 1)
lda = m;
cublasOperation_t op;
if (trans == 't') op = CUBLAS_OP_T;
else if (trans == 'n') op = CUBLAS_OP_N;
else if (trans == 'c') op = CUBLAS_OP_C;
if( (m <= INT_MAX) && (n <= INT_MAX) &&
(lda > 0) && (lda <= INT_MAX) &&
(incx > 0) && (incx <= INT_MAX) &&
(incy > 0) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDgemv(handle, op, i_m, i_n, &alpha, a, i_lda, x, i_incx, &beta, y, i_incy));
return;
}
THError("Cublas_Dgemv only supports m, n, lda, incx, incy"
"in the range 0 < [val] <= %d", INT_MAX);
}
void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda)
{
if(n == 1)
lda = m;
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Sger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda)
{
if(n == 1)
lda = m;
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Dger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
cublasOperation_t convertTransToCublasOperation(char trans) {
if (trans == 't') return CUBLAS_OP_T;
else if (trans == 'n') return CUBLAS_OP_N;
else if (trans == 'c') return CUBLAS_OP_C;
else {
THError("trans must be one of: t, n, c");
return CUBLAS_OP_T;
}
}
void adjustLd(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc)
{
int transa_ = ((transa == 't') || (transa == 'T'));
int transb_ = ((transb == 't') || (transb == 'T'));
if(n == 1)
*ldc = m;
if(transa_)
{
if(m == 1)
*lda = k;
}
else
{
if(k == 1)
*lda = m;
}
if(transb_)
{
if(k == 1)
*ldb = n;
}
else
{
if(n == 1)
*ldb = k;
}
}
/* Level 3 */
void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_lda = (int)lda;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSgemm(handle, opa, opb, i_m, i_n, i_k, &alpha, a, i_lda, b, i_ldb, &beta, c, i_ldc));
return;
}
THError("Cublas_Sgemm only supports m, n, k, lda, ldb, ldc"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef CUDA_HALF_TENSOR
// In CUDA 8.0, definition of data types for sgemmex changed
#if CUDA_VERSION < 8000
# define CUDA_R_16F CUBLAS_DATA_HALF
#endif
void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, half alpha, half *a, int64_t lda, half *b, int64_t ldb, half beta, half *c, int64_t ldc)
{
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_lda = (int)lda;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
// Simulated Hgemm
float fAlpha = THC_half2float(alpha);
float fBeta = THC_half2float(beta);
#if CUDA_VERSION < 9000
THCublasCheck(cublasSgemmEx(handle, opa, opb,
i_m, i_n, i_k, &fAlpha,
a, CUDA_R_16F, i_lda, b, CUDA_R_16F,
i_ldb, &fBeta, c, CUDA_R_16F, i_ldc));
#else
cudaDeviceProp* prop = THCState_getCurrentDeviceProperties(state);
if (prop->major >= 5){
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
THCublasCheck(cublasGemmEx(handle, opa, opb,
i_m, i_n, i_k, &fAlpha,
a, CUDA_R_16F, i_lda, b, CUDA_R_16F,
i_ldb, &fBeta, c, CUDA_R_16F, i_ldc,
CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
}else{
THCublasCheck(cublasSgemmEx(handle, opa, opb,
i_m, i_n, i_k, &fAlpha,
a, CUDA_R_16F, i_lda, b, CUDA_R_16F,
i_ldb, &fBeta, c, CUDA_R_16F, i_ldc));
}
#endif
return;
}
THError("Cublas_Hgemm only supports m, n, k, lda, ldb, ldc"
"with th bound [val] <= %d", INT_MAX);
}
#endif
void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_lda = (int)lda;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDgemm(handle, opa, opb, i_m, i_n, i_k, &alpha, a, i_lda, b, i_ldb, &beta, c, i_ldc));
return;
}
THError("Cublas_Dgemm only supports m, n, k, lda, ldb, ldc"
"with the bound [val] <= %d", INT_MAX);
}
void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb,
float beta, float *c[], int64_t ldc, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
}
#if CUDA_VERSION >= 8000
void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB,
float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
#endif
void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb,
double beta, double *c[], int64_t ldc, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
}
#if CUDA_VERSION >= 8000
void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB,
double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
#endif
/* Inverse */
void THCudaBlas_Sgetrf(THCState *state, int n, float **a, int lda, int *pivot, int *info, int batchSize) {
if( (n >= INT_MAX) || (lda >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Sgetrf only supports n, lda, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSgetrfBatched(handle, n, a, lda, pivot, info, batchSize));
}
void THCudaBlas_Dgetrf(THCState *state, int n, double **a, int lda, int *pivot, int *info, int batchSize) {
if( (n >= INT_MAX) || (lda >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Dgetrf only supports n, lda, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDgetrfBatched(handle, n, a, lda, pivot, info, batchSize));
}
THC_API void THCudaBlas_Sgetrs(THCState *state, char transa, int n, int nrhs, const float **a, int lda, int *pivot, float **b, int ldb, int *info, int batchSize)
{
if( (n >= INT_MAX) || (nrhs >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Dgetrs only supports n, nrhs, lda, ldb, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
// no need to adjust leading dimensions, since matrices are square
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSgetrsBatched(handle, opa, n, nrhs, a, lda, pivot, b, ldb, info, batchSize));
}
THC_API void THCudaBlas_Dgetrs(THCState *state, char transa, int n, int nrhs, const double **a, int lda, int *pivot, double **b, int ldb, int *info, int batchSize)
{
if( (n >= INT_MAX) || (nrhs >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Dgetrs only supports n, nrhs, lda, ldb, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
// no need to adjust leading dimensions, since matrices are square
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDgetrsBatched(handle, opa, n, nrhs, a, lda, pivot, b, ldb, info, batchSize));
}
void THCudaBlas_Sgetri(THCState *state, int n, const float **a, int lda, int *pivot, float **c, int ldc, int *info, int batchSize) {
if( (n >= INT_MAX) || (lda >= INT_MAX)|| (ldc >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Sgetri only supports n, lda, ldc, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSgetriBatched(handle, n, a, lda, pivot, c, ldc, info, batchSize));
}
void THCudaBlas_Dgetri(THCState *state, int n, const double **a, int lda, int *pivot, double **c, int ldc, int *info, int batchSize) {
if( (n >= INT_MAX) || (lda >= INT_MAX)|| (ldc >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Dgetri only supports n, lda, ldc, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDgetriBatched(handle, n, a, lda, pivot, c, ldc, info, batchSize));
}
|
a6a365b213fa65532a699bb59a5fe9d4aee2183a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
texture<float4, 1, hipReadModeElementType> ga_tex_image;
__global__ void gaussDown(float4 *target, int width, int height) {
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < width && iy < height) {
int tx = 2 * ix;
int ty = 2 * iy;
int w = 2*width;
float4 p1 = tex1Dfetch(ga_tex_image, tx - 2 + (ty -2) * w );
float4 p2 = tex1Dfetch(ga_tex_image, tx - 1 + (ty -2) * w );
float4 p3 = tex1Dfetch(ga_tex_image, tx - 0 + (ty -2) * w );
float4 p4 = tex1Dfetch(ga_tex_image, tx + 1 + (ty -2) * w );
float4 p5 = tex1Dfetch(ga_tex_image, tx + 2 + (ty -2) * w );
float4 p6 = tex1Dfetch(ga_tex_image, tx - 2 + (ty -1) * w );
float4 p7 = tex1Dfetch(ga_tex_image, tx - 1 + (ty -1) * w );
float4 p8 = tex1Dfetch(ga_tex_image, tx - 0 + (ty -1) * w );
float4 p9 = tex1Dfetch(ga_tex_image, tx + 1 + (ty -1) * w );
float4 p10 = tex1Dfetch(ga_tex_image, tx + 2 + (ty -1) * w );
float4 p11 = tex1Dfetch(ga_tex_image, tx - 2 + (ty -0) * w );
float4 p12 = tex1Dfetch(ga_tex_image, tx - 1 + (ty -0) * w );
float4 p13 = tex1Dfetch(ga_tex_image, tx - 0 + (ty -0) * w );
float4 p14 = tex1Dfetch(ga_tex_image, tx + 1 + (ty -0) * w );
float4 p15 = tex1Dfetch(ga_tex_image, tx + 2 + (ty -0) * w );
float4 p16 = tex1Dfetch(ga_tex_image, tx - 2 + (ty +1) * w );
float4 p17 = tex1Dfetch(ga_tex_image, tx - 1 + (ty +1) * w );
float4 p18 = tex1Dfetch(ga_tex_image, tx - 0 + (ty +1) * w );
float4 p19 = tex1Dfetch(ga_tex_image, tx + 1 + (ty +1) * w );
float4 p20 = tex1Dfetch(ga_tex_image, tx + 2 + (ty +1) * w );
float4 p21 = tex1Dfetch(ga_tex_image, tx - 2 + (ty +2) * w );
float4 p22 = tex1Dfetch(ga_tex_image, tx - 1 + (ty +2) * w );
float4 p23 = tex1Dfetch(ga_tex_image, tx - 0 + (ty +2) * w );
float4 p24 = tex1Dfetch(ga_tex_image, tx + 1 + (ty +2) * w );
float4 p25 = tex1Dfetch(ga_tex_image, tx + 2 + (ty +2) * w );
/* float v_x = (1 * (p1.x + 4*p2.x + 6*p3.x + 4*p4.x + p5.x)
+ 4 * (p6.x + 4*p7.x + 6*p8.x + 4*p9.x + p10.x)
+ 6 * (p11.x + 4*p12.x + 6*p13.x + 4*p14.x + p15.x)
+ 4 * (p16.x + 4*p17.x + 6*p18.x + 4*p19.x + p20.x)
+ 1 * (p21.x + 4*p22.x + 6*p23.x + 4*p24.x + p25.x)) / 256;
float v_y = (1 * (p1.y + 4*p2.y + 6*p3.y + 4*p4.y + p5.y)
+ 4 * (p6.y + 4*p7.y + 6*p8.y + 4*p9.y + p10.y)
+ 6 * (p11.y + 4*p12.y + 6*p13.y + 4*p14.y + p15.y)
+ 4 * (p16.y + 4*p17.y + 6*p18.y + 4*p19.y + p20.y)
+ 1 * (p21.y + 4*p22.y + 6*p23.y + 4*p24.y + p25.y)) / 256;
float v_z = (1 * (p1.z + 4*p2.z + 6*p3.z + 4*p4.z + p5.z)
+ 4 * (p6.z + 4*p7.z + 6*p8.z + 4*p9.z + p10.z)
+ 6 * (p11.z + 4*p12.z + 6*p13.z + 4*p14.z + p15.z)
+ 4 * (p16.z + 4*p17.z + 6*p18.z + 4*p19.z + p20.z)
+ 1 * (p21.z + 4*p22.z + 6*p23.z + 4*p24.z + p25.z)) / 256;
*/
float4 v = (1 * (p1 + 4*p2 + 6*p3 + 4*p4 + p5)
+ 4 * (p6 + 4*p7 + 6*p8 + 4*p9 + p10)
+ 6 * (p11 + 4*p12 + 6*p13 + 4*p14 + p15)
+ 4 * (p16 + 4*p17 + 6*p18 + 4*p19 + p20)
+ 1 * (p21 + 4*p22 + 6*p23 + 4*p24 + p25)) / 256;
// target[width * iy + ix] = make_color(v_x, v_y, v_z, 0);
target[width * iy + ix] = v;
}
}
// 1D texture, direct 1D-float
texture<float, 1, hipReadModeElementType> ga_tex_image_f;
__global__ void gaussDown_f(float *target, int width, int height) {
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < width && iy < height) {
int tx = 2 * ix;
int ty = 2 * iy;
int w = 2*width;
float p1 = tex1Dfetch(ga_tex_image_f, tx - 2 + (ty -2) * w );
float p2 = tex1Dfetch(ga_tex_image_f, tx - 1 + (ty -2) * w );
float p3 = tex1Dfetch(ga_tex_image_f, tx - 0 + (ty -2) * w );
float p4 = tex1Dfetch(ga_tex_image_f, tx + 1 + (ty -2) * w );
float p5 = tex1Dfetch(ga_tex_image_f, tx + 2 + (ty -2) * w );
float p6 = tex1Dfetch(ga_tex_image_f, tx - 2 + (ty -1) * w );
float p7 = tex1Dfetch(ga_tex_image_f, tx - 1 + (ty -1) * w );
float p8 = tex1Dfetch(ga_tex_image_f, tx - 0 + (ty -1) * w );
float p9 = tex1Dfetch(ga_tex_image_f, tx + 1 + (ty -1) * w );
float p10 = tex1Dfetch(ga_tex_image_f, tx + 2 + (ty -1) * w );
float p11 = tex1Dfetch(ga_tex_image_f, tx - 2 + (ty -0) * w );
float p12 = tex1Dfetch(ga_tex_image_f, tx - 1 + (ty -0) * w );
float p13 = tex1Dfetch(ga_tex_image_f, tx - 0 + (ty -0) * w );
float p14 = tex1Dfetch(ga_tex_image_f, tx + 1 + (ty -0) * w );
float p15 = tex1Dfetch(ga_tex_image_f, tx + 2 + (ty -0) * w );
float p16 = tex1Dfetch(ga_tex_image_f, tx - 2 + (ty +1) * w );
float p17 = tex1Dfetch(ga_tex_image_f, tx - 1 + (ty +1) * w );
float p18 = tex1Dfetch(ga_tex_image_f, tx - 0 + (ty +1) * w );
float p19 = tex1Dfetch(ga_tex_image_f, tx + 1 + (ty +1) * w );
float p20 = tex1Dfetch(ga_tex_image_f, tx + 2 + (ty +1) * w );
float p21 = tex1Dfetch(ga_tex_image_f, tx - 2 + (ty +2) * w );
float p22 = tex1Dfetch(ga_tex_image_f, tx - 1 + (ty +2) * w );
float p23 = tex1Dfetch(ga_tex_image_f, tx - 0 + (ty +2) * w );
float p24 = tex1Dfetch(ga_tex_image_f, tx + 1 + (ty +2) * w );
float p25 = tex1Dfetch(ga_tex_image_f, tx + 2 + (ty +2) * w );
float v_x = (1 * (p1 + 4*p2 + 6*p3 + 4*p4 + p5)
+ 4 * (p6 + 4*p7 + 6*p8 + 4*p9 + p10)
+ 6 * (p11 + 4*p12 + 6*p13 + 4*p14 + p15)
+ 4 * (p16 + 4*p17 + 6*p18 + 4*p19 + p20)
+ 1 * (p21 + 4*p22 + 6*p23 + 4*p24 + p25)) /256 ;
target[width * iy + ix] = v_x;
}
}
/*
* src image pixels: a b c dst image pixels: A B C D E
* F G H I J
* d e f K L M N O
* P Q R S T
* g h i U V W X Y
* M = 1 * (a + 6b + c)
* + 6 * (d + 6e + f)
* + 1 * (g + 6h + i)
*
* N = 1 * (4b + 4c)
* + 6 * (4e + 4f)
* + 1 * (4h + 4i)
*
* R = 4 * (d + 6e + f)
* + 4 * (g + 6h + i)
*
* S = 4 * (4e + 4f)
* + 4 * (4h + 4i)
*/
__global__ void gaussUp(float4 *target, int width, int height) {
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < width && iy < height) {
int tx = 2 * ix;
int ty = 2 * iy;
float4 p1 = tex1Dfetch(ga_tex_image, ix-1 + (iy-1) * width);
float4 p2 = tex1Dfetch(ga_tex_image, ix + (iy-1) * width);
float4 p3 = tex1Dfetch(ga_tex_image, ix+1 + (iy-1) * width);
float4 p4 = tex1Dfetch(ga_tex_image, ix-1 + iy * width);
float4 p5 = tex1Dfetch(ga_tex_image, ix + iy * width);
float4 p6 = tex1Dfetch(ga_tex_image, ix+1 + iy * width);
float4 p7 = tex1Dfetch(ga_tex_image, ix-1 + (iy+1) * width);
float4 p8 = tex1Dfetch(ga_tex_image, ix + (iy+1) * width);
float4 p9 = tex1Dfetch(ga_tex_image, ix+1 + (iy+1) * width);
if (iy == (height-1)) {
p7 = p5;
p8 = p5;
p9 = p5;
}
if (ix == (width-1)) {
p3 = p5;
p6 = p5;
p9 = p5;
}
float4 t1 = 1 * (p1 + 6*p2 + p3) +
6 * (p4 + 6*p5 + p6) +
1 * (p7 + 6*p8 + p9);
float4 t2 = 1 * (4*p2 + 4*p3) +
6 * (4*p5 + 4*p6) +
1 * (4*p8 + 4*p9);
float4 t3 = 4 * (p4 + 6*p5 + p6) +
4 * (p7 + 6*p8 + p9);
float4 t4 = 4 * (4*p5 + 4*p6) +
4 * (4*p8 + 4*p9);
float4 n1 = t1 / 64;
float4 n2 = t2 / 64;
float4 n3 = t3 / 64;
float4 n4 = t4 / 64;
/*
target[2*width * (ty) + tx] = make_color(n1.x, n1.y, n1.z, 0);
target[2*width * (ty) + (ix * 2 + 1)] = make_color(n2.x, n2.y, n2.z, 0);
target[2*width * (ty + 1) + tx] = make_color(n3.x, n3.y, n3.z, 0);
target[2*width * (ty + 1) + (ix * 2 + 1)] = make_color(n4.x, n4.y, n4.z, 0);
*/
target[2*width * (ty) + tx] = n1;
target[2*width * (ty) + (ix * 2 + 1)] = n2;
target[2*width * (ty + 1) + tx] = n3;
target[2*width * (ty + 1) + (ix * 2 + 1)] = n4;
}
}
| a6a365b213fa65532a699bb59a5fe9d4aee2183a.cu |
texture<float4, 1, cudaReadModeElementType> ga_tex_image;
__global__ void gaussDown(float4 *target, int width, int height) {
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < width && iy < height) {
int tx = 2 * ix;
int ty = 2 * iy;
int w = 2*width;
float4 p1 = tex1Dfetch(ga_tex_image, tx - 2 + (ty -2) * w );
float4 p2 = tex1Dfetch(ga_tex_image, tx - 1 + (ty -2) * w );
float4 p3 = tex1Dfetch(ga_tex_image, tx - 0 + (ty -2) * w );
float4 p4 = tex1Dfetch(ga_tex_image, tx + 1 + (ty -2) * w );
float4 p5 = tex1Dfetch(ga_tex_image, tx + 2 + (ty -2) * w );
float4 p6 = tex1Dfetch(ga_tex_image, tx - 2 + (ty -1) * w );
float4 p7 = tex1Dfetch(ga_tex_image, tx - 1 + (ty -1) * w );
float4 p8 = tex1Dfetch(ga_tex_image, tx - 0 + (ty -1) * w );
float4 p9 = tex1Dfetch(ga_tex_image, tx + 1 + (ty -1) * w );
float4 p10 = tex1Dfetch(ga_tex_image, tx + 2 + (ty -1) * w );
float4 p11 = tex1Dfetch(ga_tex_image, tx - 2 + (ty -0) * w );
float4 p12 = tex1Dfetch(ga_tex_image, tx - 1 + (ty -0) * w );
float4 p13 = tex1Dfetch(ga_tex_image, tx - 0 + (ty -0) * w );
float4 p14 = tex1Dfetch(ga_tex_image, tx + 1 + (ty -0) * w );
float4 p15 = tex1Dfetch(ga_tex_image, tx + 2 + (ty -0) * w );
float4 p16 = tex1Dfetch(ga_tex_image, tx - 2 + (ty +1) * w );
float4 p17 = tex1Dfetch(ga_tex_image, tx - 1 + (ty +1) * w );
float4 p18 = tex1Dfetch(ga_tex_image, tx - 0 + (ty +1) * w );
float4 p19 = tex1Dfetch(ga_tex_image, tx + 1 + (ty +1) * w );
float4 p20 = tex1Dfetch(ga_tex_image, tx + 2 + (ty +1) * w );
float4 p21 = tex1Dfetch(ga_tex_image, tx - 2 + (ty +2) * w );
float4 p22 = tex1Dfetch(ga_tex_image, tx - 1 + (ty +2) * w );
float4 p23 = tex1Dfetch(ga_tex_image, tx - 0 + (ty +2) * w );
float4 p24 = tex1Dfetch(ga_tex_image, tx + 1 + (ty +2) * w );
float4 p25 = tex1Dfetch(ga_tex_image, tx + 2 + (ty +2) * w );
/* float v_x = (1 * (p1.x + 4*p2.x + 6*p3.x + 4*p4.x + p5.x)
+ 4 * (p6.x + 4*p7.x + 6*p8.x + 4*p9.x + p10.x)
+ 6 * (p11.x + 4*p12.x + 6*p13.x + 4*p14.x + p15.x)
+ 4 * (p16.x + 4*p17.x + 6*p18.x + 4*p19.x + p20.x)
+ 1 * (p21.x + 4*p22.x + 6*p23.x + 4*p24.x + p25.x)) / 256;
float v_y = (1 * (p1.y + 4*p2.y + 6*p3.y + 4*p4.y + p5.y)
+ 4 * (p6.y + 4*p7.y + 6*p8.y + 4*p9.y + p10.y)
+ 6 * (p11.y + 4*p12.y + 6*p13.y + 4*p14.y + p15.y)
+ 4 * (p16.y + 4*p17.y + 6*p18.y + 4*p19.y + p20.y)
+ 1 * (p21.y + 4*p22.y + 6*p23.y + 4*p24.y + p25.y)) / 256;
float v_z = (1 * (p1.z + 4*p2.z + 6*p3.z + 4*p4.z + p5.z)
+ 4 * (p6.z + 4*p7.z + 6*p8.z + 4*p9.z + p10.z)
+ 6 * (p11.z + 4*p12.z + 6*p13.z + 4*p14.z + p15.z)
+ 4 * (p16.z + 4*p17.z + 6*p18.z + 4*p19.z + p20.z)
+ 1 * (p21.z + 4*p22.z + 6*p23.z + 4*p24.z + p25.z)) / 256;
*/
float4 v = (1 * (p1 + 4*p2 + 6*p3 + 4*p4 + p5)
+ 4 * (p6 + 4*p7 + 6*p8 + 4*p9 + p10)
+ 6 * (p11 + 4*p12 + 6*p13 + 4*p14 + p15)
+ 4 * (p16 + 4*p17 + 6*p18 + 4*p19 + p20)
+ 1 * (p21 + 4*p22 + 6*p23 + 4*p24 + p25)) / 256;
// target[width * iy + ix] = make_color(v_x, v_y, v_z, 0);
target[width * iy + ix] = v;
}
}
// 1D texture, direct 1D-float
texture<float, 1, cudaReadModeElementType> ga_tex_image_f;
__global__ void gaussDown_f(float *target, int width, int height) {
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < width && iy < height) {
int tx = 2 * ix;
int ty = 2 * iy;
int w = 2*width;
float p1 = tex1Dfetch(ga_tex_image_f, tx - 2 + (ty -2) * w );
float p2 = tex1Dfetch(ga_tex_image_f, tx - 1 + (ty -2) * w );
float p3 = tex1Dfetch(ga_tex_image_f, tx - 0 + (ty -2) * w );
float p4 = tex1Dfetch(ga_tex_image_f, tx + 1 + (ty -2) * w );
float p5 = tex1Dfetch(ga_tex_image_f, tx + 2 + (ty -2) * w );
float p6 = tex1Dfetch(ga_tex_image_f, tx - 2 + (ty -1) * w );
float p7 = tex1Dfetch(ga_tex_image_f, tx - 1 + (ty -1) * w );
float p8 = tex1Dfetch(ga_tex_image_f, tx - 0 + (ty -1) * w );
float p9 = tex1Dfetch(ga_tex_image_f, tx + 1 + (ty -1) * w );
float p10 = tex1Dfetch(ga_tex_image_f, tx + 2 + (ty -1) * w );
float p11 = tex1Dfetch(ga_tex_image_f, tx - 2 + (ty -0) * w );
float p12 = tex1Dfetch(ga_tex_image_f, tx - 1 + (ty -0) * w );
float p13 = tex1Dfetch(ga_tex_image_f, tx - 0 + (ty -0) * w );
float p14 = tex1Dfetch(ga_tex_image_f, tx + 1 + (ty -0) * w );
float p15 = tex1Dfetch(ga_tex_image_f, tx + 2 + (ty -0) * w );
float p16 = tex1Dfetch(ga_tex_image_f, tx - 2 + (ty +1) * w );
float p17 = tex1Dfetch(ga_tex_image_f, tx - 1 + (ty +1) * w );
float p18 = tex1Dfetch(ga_tex_image_f, tx - 0 + (ty +1) * w );
float p19 = tex1Dfetch(ga_tex_image_f, tx + 1 + (ty +1) * w );
float p20 = tex1Dfetch(ga_tex_image_f, tx + 2 + (ty +1) * w );
float p21 = tex1Dfetch(ga_tex_image_f, tx - 2 + (ty +2) * w );
float p22 = tex1Dfetch(ga_tex_image_f, tx - 1 + (ty +2) * w );
float p23 = tex1Dfetch(ga_tex_image_f, tx - 0 + (ty +2) * w );
float p24 = tex1Dfetch(ga_tex_image_f, tx + 1 + (ty +2) * w );
float p25 = tex1Dfetch(ga_tex_image_f, tx + 2 + (ty +2) * w );
float v_x = (1 * (p1 + 4*p2 + 6*p3 + 4*p4 + p5)
+ 4 * (p6 + 4*p7 + 6*p8 + 4*p9 + p10)
+ 6 * (p11 + 4*p12 + 6*p13 + 4*p14 + p15)
+ 4 * (p16 + 4*p17 + 6*p18 + 4*p19 + p20)
+ 1 * (p21 + 4*p22 + 6*p23 + 4*p24 + p25)) /256 ;
target[width * iy + ix] = v_x;
}
}
/*
* src image pixels: a b c dst image pixels: A B C D E
* F G H I J
* d e f K L M N O
* P Q R S T
* g h i U V W X Y
* M = 1 * (a + 6b + c)
* + 6 * (d + 6e + f)
* + 1 * (g + 6h + i)
*
* N = 1 * (4b + 4c)
* + 6 * (4e + 4f)
* + 1 * (4h + 4i)
*
* R = 4 * (d + 6e + f)
* + 4 * (g + 6h + i)
*
* S = 4 * (4e + 4f)
* + 4 * (4h + 4i)
*/
__global__ void gaussUp(float4 *target, int width, int height) {
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < width && iy < height) {
int tx = 2 * ix;
int ty = 2 * iy;
float4 p1 = tex1Dfetch(ga_tex_image, ix-1 + (iy-1) * width);
float4 p2 = tex1Dfetch(ga_tex_image, ix + (iy-1) * width);
float4 p3 = tex1Dfetch(ga_tex_image, ix+1 + (iy-1) * width);
float4 p4 = tex1Dfetch(ga_tex_image, ix-1 + iy * width);
float4 p5 = tex1Dfetch(ga_tex_image, ix + iy * width);
float4 p6 = tex1Dfetch(ga_tex_image, ix+1 + iy * width);
float4 p7 = tex1Dfetch(ga_tex_image, ix-1 + (iy+1) * width);
float4 p8 = tex1Dfetch(ga_tex_image, ix + (iy+1) * width);
float4 p9 = tex1Dfetch(ga_tex_image, ix+1 + (iy+1) * width);
if (iy == (height-1)) {
p7 = p5;
p8 = p5;
p9 = p5;
}
if (ix == (width-1)) {
p3 = p5;
p6 = p5;
p9 = p5;
}
float4 t1 = 1 * (p1 + 6*p2 + p3) +
6 * (p4 + 6*p5 + p6) +
1 * (p7 + 6*p8 + p9);
float4 t2 = 1 * (4*p2 + 4*p3) +
6 * (4*p5 + 4*p6) +
1 * (4*p8 + 4*p9);
float4 t3 = 4 * (p4 + 6*p5 + p6) +
4 * (p7 + 6*p8 + p9);
float4 t4 = 4 * (4*p5 + 4*p6) +
4 * (4*p8 + 4*p9);
float4 n1 = t1 / 64;
float4 n2 = t2 / 64;
float4 n3 = t3 / 64;
float4 n4 = t4 / 64;
/*
target[2*width * (ty) + tx] = make_color(n1.x, n1.y, n1.z, 0);
target[2*width * (ty) + (ix * 2 + 1)] = make_color(n2.x, n2.y, n2.z, 0);
target[2*width * (ty + 1) + tx] = make_color(n3.x, n3.y, n3.z, 0);
target[2*width * (ty + 1) + (ix * 2 + 1)] = make_color(n4.x, n4.y, n4.z, 0);
*/
target[2*width * (ty) + tx] = n1;
target[2*width * (ty) + (ix * 2 + 1)] = n2;
target[2*width * (ty + 1) + tx] = n3;
target[2*width * (ty + 1) + (ix * 2 + 1)] = n4;
}
}
|
dd25c36ef3251a69074aa55d164596eec21934fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <time.h>
#include <float.h>
#include <hiprand/hiprand_kernel.h>
#include "vec3.h"
#include "ray.h"
#include "sphere.h"
#include "hitable_list.h"
#include "camera.h"
#include "material.h"
#include "device_launch_parameters.h"
#include <texture_fetch_functions.h>
#include <SDL.h>
#include <Windows.h>
#include <gl/GL.h>
using namespace std;
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(hipError_t result, char const* const func, const char* const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
hipDeviceReset();
exit(99);
}
}
// Matching the C++ code would recurse enough into color() calls that
// it was blowing up the stack, so we have to turn this into a
// limited-depth loop instead. Later code in the book limits to a max
// depth of 50, so we adapt this a few chapters early on the GPU.
__device__ vec3 color(const ray& r, hitable** world, hiprandState_t* local_rand_state) {
ray cur_ray = r;
vec3 cur_attenuation = vec3(1.0, 1.0, 1.0);
for (int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.1f, FLT_MAX, rec)) {
ray scattered;
vec3 attenuation;
if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, local_rand_state)) {
cur_attenuation *= attenuation;
cur_ray = scattered;
}
else {
return vec3(0.0, 0.0, 0.0);
}
}
else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f * (unit_direction.y() + 1.0f);
vec3 c = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0, 0.0, 0.0); // exceeded recursion
}
__global__ void rand_init(hiprandState_t* rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprand_init(1984, 0, 0, rand_state);
}
}
__global__ void render_init(int max_x, int max_y, hiprandState_t* rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
//Each thread gets same seed, a different sequence number, no offset
hiprand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
}
__global__ void render(vec3* fb, int max_x, int max_y, int ns, camera** cam, hitable** world, hiprandState_t* rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
hiprandState_t local_rand_state = rand_state[pixel_index];
vec3 col(0, 0, 0);
for (int s = 0; s < ns; s++) {
float u = float(i + hiprand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + hiprand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u, v, &local_rand_state);
col += color(r, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col;
}
#define RND (hiprand_uniform(&local_rand_state))
__global__ void create_world(hitable** d_list, hitable** d_world, camera** d_camera, int width, int height, hiprandState_t* rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprandState_t local_rand_state = *rand_state;
d_list[0] = new sphere(vec3(0, -1000.0, -1), 1000,
new lambertian(vec3(0.5, 0.5, 0.5)));
int i = 1;
for (int a = -11; a < 11; a++) {
for (int b = -11; b < 11; b++) {
float choose_mat = RND;
vec3 center(a + RND, 0.2, b + RND);
if (choose_mat < 0.8f) {
d_list[i++] = new sphere(center, 0.2,
new lambertian(vec3(RND * RND, RND * RND, RND * RND)));
}
else if (choose_mat < 0.95f) {
d_list[i++] = new sphere(center, 0.2,
new metal(vec3(0.5f * (1.0f + RND), 0.5f * (1.0f + RND), 0.5f * (1.0f + RND)), 0.5f * RND));
}
else {
d_list[i++] = new sphere(center, 0.2, new dielectric(1.5));
}
}
}
d_list[i++] = new sphere(vec3(0, 1, 0), 1.0, new dielectric(1.5));
d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1)));
d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0));
*rand_state = local_rand_state;
*d_world = new hitable_list(d_list, 22 * 22 + 1 + 3);
vec3 lookfrom(13, 2, 3);
vec3 lookat(0, 0, 0);
float dist_to_focus = 10.0; //(lookfrom - lookat).length();
float aperture = 0.01;
*d_camera = new camera(lookfrom,
lookat,
vec3(0, 1, 0),
20.0,
float(width) / float(height),
aperture,
dist_to_focus);
}
}
__global__ void free_world(hitable** d_list, hitable** d_world, camera** d_camera) {
for (int i = 0; i < 22 * 22 + 1 + 3; i++) {
delete ((sphere*)d_list[i])->mat_ptr;
delete d_list[i];
}
delete* d_world;
delete* d_camera;
}
void saveImage(int width, int height, vec3* fb, string output) {
ofstream img(output);
img << "P3\n" << width << " " << height << "\n255\n";
for (int j = height - 1; j >= 0; j--) {
for (int i = 0; i < width; i++) {
size_t pixel_index = j * width + i;
int ir = int(255.99 * fb[pixel_index].r());
int ig = int(255.99 * fb[pixel_index].g());
int ib = int(255.99 * fb[pixel_index].b());
img << ir << " " << ig << " " << ib << "\n";
}
}
}
int main(int argc, char* argv[]) {
int width = 1280;
int height = 720;
int ns = 100;
int tx = 16;
int ty = 16;
bool running = true, updateDisplay = true;
SDL_Window* window;
SDL_Renderer* renderer;
SDL_Texture* texture;
std::cerr << "Rendering a " << width << "x" << height << " image with " << ns << " samples per pixel ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = width * height;
size_t fb_size = num_pixels * sizeof(vec3);
// allocate FB
vec3* fb;
checkCudaErrors(hipMallocManaged((void**)&fb, fb_size));
// allocate random state
hiprandState_t* d_rand_state;
checkCudaErrors(hipMalloc((void**)&d_rand_state, num_pixels * sizeof(hiprandState_t)));
hiprandState_t* d_rand_state2;
checkCudaErrors(hipMalloc((void**)&d_rand_state2, 1 * sizeof(hiprandState_t)));
// we need that 2nd random state to be initialized for the world creation
rand_init << <1, 1 >> > (d_rand_state2);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// make our world of hitables & the camera
hitable** d_list;
int num_hitables = 22 * 22 + 1 + 3;
checkCudaErrors(hipMalloc((void**)&d_list, num_hitables * sizeof(hitable*)));
hitable** d_world;
checkCudaErrors(hipMalloc((void**)&d_world, sizeof(hitable*)));
camera** d_camera;
checkCudaErrors(hipMalloc((void**)&d_camera, sizeof(camera*)));
create_world << <1, 1 >> > (d_list, d_world, d_camera, width, height, d_rand_state2);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// Save image
//saveImage(width, height, fb, "output.ppm");
// Output FB as Image
if (SDL_Init(SDL_INIT_VIDEO) < 0) {
std::cout << "[Error] Failed to initialise SDL2";
return 1;
}
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
window = SDL_CreateWindow("Ray Tracing", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, width, height, SDL_WINDOW_OPENGL);
if (window == NULL) {
std::cout << SDL_GetError();
return 1;
}
renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED);
if (renderer == NULL) {
std::cout << SDL_GetError();
return 1;
}
texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_RGBA32, SDL_TEXTUREACCESS_STREAMING, width, height);
if (texture == NULL) {
std::cout << SDL_GetError();
return 1;
}
//Render Scene
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(width / tx + 1, height / ty + 1);
dim3 threads(tx, ty);
render_init << <blocks, threads >> > (width, height, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
render << <blocks, threads >> > (fb, width, height, ns, d_camera, d_world, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
int j = 0;
int i = 0;
while (running) {
// Poll for user input events
SDL_Event event;
while (SDL_PollEvent(&event)) {
switch (event.type) {
case SDL_QUIT: {
running = false;
}
break;
case SDL_WINDOWEVENT: {
if (event.window.event == SDL_WINDOWEVENT_RESIZED) {
width = (uint32_t)event.window.data1;
height = (uint32_t)event.window.data2;
}
}
break;
// ... Handle other events here. **IMPLEMENTATION OF MOUSE DETECTIONS**
}
}
size_t pixel_index = j * width + i;
int ir = int(255.99 * fb[pixel_index].r());
int ig = int(255.99 * fb[pixel_index].g());
int ib = int(255.99 * fb[pixel_index].b());
SDL_SetRenderDrawColor(renderer, ir, ig, ib, 255);
SDL_Rect rectangle;
rectangle.x = i;
rectangle.y = height - (j + 1);
rectangle.w = 1;
rectangle.h = 1;
SDL_RenderFillRect(renderer, &rectangle);
SDL_RenderPresent(renderer);
if (j == height) {
j = 0;
}
else {
if (i < width) {
i++;
}
else {
i = 0;
j++;
}
}
}
// clean up
checkCudaErrors(hipDeviceSynchronize());
free_world << <1, 1 >> > (d_list, d_world, d_camera);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_camera));
checkCudaErrors(hipFree(d_world));
checkCudaErrors(hipFree(d_list));
checkCudaErrors(hipFree(d_rand_state));
checkCudaErrors(hipFree(fb));
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(window);
SDL_Quit();
hipDeviceReset();
return 0;
} | dd25c36ef3251a69074aa55d164596eec21934fd.cu | #include <iostream>
#include <fstream>
#include <time.h>
#include <float.h>
#include <curand_kernel.h>
#include "vec3.h"
#include "ray.h"
#include "sphere.h"
#include "hitable_list.h"
#include "camera.h"
#include "material.h"
#include "device_launch_parameters.h"
#include <texture_fetch_functions.h>
#include <SDL.h>
#include <Windows.h>
#include <gl/GL.h>
using namespace std;
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(cudaError_t result, char const* const func, const char* const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(99);
}
}
// Matching the C++ code would recurse enough into color() calls that
// it was blowing up the stack, so we have to turn this into a
// limited-depth loop instead. Later code in the book limits to a max
// depth of 50, so we adapt this a few chapters early on the GPU.
__device__ vec3 color(const ray& r, hitable** world, curandState* local_rand_state) {
ray cur_ray = r;
vec3 cur_attenuation = vec3(1.0, 1.0, 1.0);
for (int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.1f, FLT_MAX, rec)) {
ray scattered;
vec3 attenuation;
if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, local_rand_state)) {
cur_attenuation *= attenuation;
cur_ray = scattered;
}
else {
return vec3(0.0, 0.0, 0.0);
}
}
else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f * (unit_direction.y() + 1.0f);
vec3 c = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0, 0.0, 0.0); // exceeded recursion
}
__global__ void rand_init(curandState* rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
curand_init(1984, 0, 0, rand_state);
}
}
__global__ void render_init(int max_x, int max_y, curandState* rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
//Each thread gets same seed, a different sequence number, no offset
curand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
}
__global__ void render(vec3* fb, int max_x, int max_y, int ns, camera** cam, hitable** world, curandState* rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
curandState local_rand_state = rand_state[pixel_index];
vec3 col(0, 0, 0);
for (int s = 0; s < ns; s++) {
float u = float(i + curand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + curand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u, v, &local_rand_state);
col += color(r, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col;
}
#define RND (curand_uniform(&local_rand_state))
__global__ void create_world(hitable** d_list, hitable** d_world, camera** d_camera, int width, int height, curandState* rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
curandState local_rand_state = *rand_state;
d_list[0] = new sphere(vec3(0, -1000.0, -1), 1000,
new lambertian(vec3(0.5, 0.5, 0.5)));
int i = 1;
for (int a = -11; a < 11; a++) {
for (int b = -11; b < 11; b++) {
float choose_mat = RND;
vec3 center(a + RND, 0.2, b + RND);
if (choose_mat < 0.8f) {
d_list[i++] = new sphere(center, 0.2,
new lambertian(vec3(RND * RND, RND * RND, RND * RND)));
}
else if (choose_mat < 0.95f) {
d_list[i++] = new sphere(center, 0.2,
new metal(vec3(0.5f * (1.0f + RND), 0.5f * (1.0f + RND), 0.5f * (1.0f + RND)), 0.5f * RND));
}
else {
d_list[i++] = new sphere(center, 0.2, new dielectric(1.5));
}
}
}
d_list[i++] = new sphere(vec3(0, 1, 0), 1.0, new dielectric(1.5));
d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1)));
d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0));
*rand_state = local_rand_state;
*d_world = new hitable_list(d_list, 22 * 22 + 1 + 3);
vec3 lookfrom(13, 2, 3);
vec3 lookat(0, 0, 0);
float dist_to_focus = 10.0; //(lookfrom - lookat).length();
float aperture = 0.01;
*d_camera = new camera(lookfrom,
lookat,
vec3(0, 1, 0),
20.0,
float(width) / float(height),
aperture,
dist_to_focus);
}
}
__global__ void free_world(hitable** d_list, hitable** d_world, camera** d_camera) {
for (int i = 0; i < 22 * 22 + 1 + 3; i++) {
delete ((sphere*)d_list[i])->mat_ptr;
delete d_list[i];
}
delete* d_world;
delete* d_camera;
}
void saveImage(int width, int height, vec3* fb, string output) {
ofstream img(output);
img << "P3\n" << width << " " << height << "\n255\n";
for (int j = height - 1; j >= 0; j--) {
for (int i = 0; i < width; i++) {
size_t pixel_index = j * width + i;
int ir = int(255.99 * fb[pixel_index].r());
int ig = int(255.99 * fb[pixel_index].g());
int ib = int(255.99 * fb[pixel_index].b());
img << ir << " " << ig << " " << ib << "\n";
}
}
}
int main(int argc, char* argv[]) {
int width = 1280;
int height = 720;
int ns = 100;
int tx = 16;
int ty = 16;
bool running = true, updateDisplay = true;
SDL_Window* window;
SDL_Renderer* renderer;
SDL_Texture* texture;
std::cerr << "Rendering a " << width << "x" << height << " image with " << ns << " samples per pixel ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = width * height;
size_t fb_size = num_pixels * sizeof(vec3);
// allocate FB
vec3* fb;
checkCudaErrors(cudaMallocManaged((void**)&fb, fb_size));
// allocate random state
curandState* d_rand_state;
checkCudaErrors(cudaMalloc((void**)&d_rand_state, num_pixels * sizeof(curandState)));
curandState* d_rand_state2;
checkCudaErrors(cudaMalloc((void**)&d_rand_state2, 1 * sizeof(curandState)));
// we need that 2nd random state to be initialized for the world creation
rand_init << <1, 1 >> > (d_rand_state2);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// make our world of hitables & the camera
hitable** d_list;
int num_hitables = 22 * 22 + 1 + 3;
checkCudaErrors(cudaMalloc((void**)&d_list, num_hitables * sizeof(hitable*)));
hitable** d_world;
checkCudaErrors(cudaMalloc((void**)&d_world, sizeof(hitable*)));
camera** d_camera;
checkCudaErrors(cudaMalloc((void**)&d_camera, sizeof(camera*)));
create_world << <1, 1 >> > (d_list, d_world, d_camera, width, height, d_rand_state2);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// Save image
//saveImage(width, height, fb, "output.ppm");
// Output FB as Image
if (SDL_Init(SDL_INIT_VIDEO) < 0) {
std::cout << "[Error] Failed to initialise SDL2";
return 1;
}
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
window = SDL_CreateWindow("Ray Tracing", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, width, height, SDL_WINDOW_OPENGL);
if (window == NULL) {
std::cout << SDL_GetError();
return 1;
}
renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED);
if (renderer == NULL) {
std::cout << SDL_GetError();
return 1;
}
texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_RGBA32, SDL_TEXTUREACCESS_STREAMING, width, height);
if (texture == NULL) {
std::cout << SDL_GetError();
return 1;
}
//Render Scene
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(width / tx + 1, height / ty + 1);
dim3 threads(tx, ty);
render_init << <blocks, threads >> > (width, height, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
render << <blocks, threads >> > (fb, width, height, ns, d_camera, d_world, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
int j = 0;
int i = 0;
while (running) {
// Poll for user input events
SDL_Event event;
while (SDL_PollEvent(&event)) {
switch (event.type) {
case SDL_QUIT: {
running = false;
}
break;
case SDL_WINDOWEVENT: {
if (event.window.event == SDL_WINDOWEVENT_RESIZED) {
width = (uint32_t)event.window.data1;
height = (uint32_t)event.window.data2;
}
}
break;
// ... Handle other events here. **IMPLEMENTATION OF MOUSE DETECTIONS**
}
}
size_t pixel_index = j * width + i;
int ir = int(255.99 * fb[pixel_index].r());
int ig = int(255.99 * fb[pixel_index].g());
int ib = int(255.99 * fb[pixel_index].b());
SDL_SetRenderDrawColor(renderer, ir, ig, ib, 255);
SDL_Rect rectangle;
rectangle.x = i;
rectangle.y = height - (j + 1);
rectangle.w = 1;
rectangle.h = 1;
SDL_RenderFillRect(renderer, &rectangle);
SDL_RenderPresent(renderer);
if (j == height) {
j = 0;
}
else {
if (i < width) {
i++;
}
else {
i = 0;
j++;
}
}
}
// clean up
checkCudaErrors(cudaDeviceSynchronize());
free_world << <1, 1 >> > (d_list, d_world, d_camera);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_camera));
checkCudaErrors(cudaFree(d_world));
checkCudaErrors(cudaFree(d_list));
checkCudaErrors(cudaFree(d_rand_state));
checkCudaErrors(cudaFree(fb));
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(window);
SDL_Quit();
cudaDeviceReset();
return 0;
} |
6080b222852d9eb867a873ea75b82eafe7ca8839.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCStorageCopy.cu"
#else
#include "THHTensorCopy.h"
void THCStorage_(rawCopy)(THCState *state, THCStorage *self, real *src)
{
THCudaCheck(hipMemcpyAsync(self->data, src, self->size * sizeof(real), hipMemcpyDeviceToDevice, THCState_getCurrentStream(state)));
}
// conversions are delegated to THCTensor implementation
#define THC_CUDA_STORAGE_IMPLEMENT_COPY(TYPEC,TYPECUDA) \
void THCStorage_(copyCuda##TYPEC)(THCState *state, THCStorage *self, struct THCuda##TYPECUDA##Storage *src) \
{ \
THArgCheck(self->size == src->size, 2, "size does not match"); \
THCTensor* selfTensor = THCTensor_(newWithStorage1d)(state, self, 0, self->size, 1); \
struct THCuda##TYPECUDA##Tensor* srcTensor = \
THCuda##TYPECUDA##Tensor_newWithStorage1d(state, src, 0, src->size, 1); \
THCTensor_(copyCuda##TYPEC)(state, selfTensor, srcTensor); \
THCuda##TYPECUDA##Tensor_free(state, srcTensor); \
THCTensor_(free)(state, selfTensor); \
}
#if defined(THC_REAL_IS_ZDOUBLE)
THC_CUDA_STORAGE_IMPLEMENT_COPY(ZDouble,ZDouble)
#elif defined(THC_REAL_IS_ZFLOAT)
THC_CUDA_STORAGE_IMPLEMENT_COPY(ZFloat,ZFloat)
#else
THC_CUDA_STORAGE_IMPLEMENT_COPY(Byte,Byte)
THC_CUDA_STORAGE_IMPLEMENT_COPY(Char,Char)
THC_CUDA_STORAGE_IMPLEMENT_COPY(Short,Short)
THC_CUDA_STORAGE_IMPLEMENT_COPY(Int,Int)
THC_CUDA_STORAGE_IMPLEMENT_COPY(Long,Long)
THC_CUDA_STORAGE_IMPLEMENT_COPY(Float,) // i.e. float
THC_CUDA_STORAGE_IMPLEMENT_COPY(Double,Double)
#ifdef CUDA_HALF_TENSOR
THC_CUDA_STORAGE_IMPLEMENT_COPY(Half,Half)
#endif
#endif
#undef THC_CUDA_STORAGE_IMPLEMENT_COPY
void THCStorage_(copyCuda)(THCState *state, THCStorage *self, THCStorage *src)
{
THCStorage_(TH_CONCAT_2(copyCuda, Real))(state, self, src);
}
void THCStorage_(copy)(THCState *state, THCStorage *self, THCStorage *src)
{
THCStorage_(copyCuda)(state, self, src);
}
#endif
| 6080b222852d9eb867a873ea75b82eafe7ca8839.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCStorageCopy.cu"
#else
#include "THCTensorCopy.h"
void THCStorage_(rawCopy)(THCState *state, THCStorage *self, real *src)
{
THCudaCheck(cudaMemcpyAsync(self->data, src, self->size * sizeof(real), cudaMemcpyDeviceToDevice, THCState_getCurrentStream(state)));
}
// conversions are delegated to THCTensor implementation
#define THC_CUDA_STORAGE_IMPLEMENT_COPY(TYPEC,TYPECUDA) \
void THCStorage_(copyCuda##TYPEC)(THCState *state, THCStorage *self, struct THCuda##TYPECUDA##Storage *src) \
{ \
THArgCheck(self->size == src->size, 2, "size does not match"); \
THCTensor* selfTensor = THCTensor_(newWithStorage1d)(state, self, 0, self->size, 1); \
struct THCuda##TYPECUDA##Tensor* srcTensor = \
THCuda##TYPECUDA##Tensor_newWithStorage1d(state, src, 0, src->size, 1); \
THCTensor_(copyCuda##TYPEC)(state, selfTensor, srcTensor); \
THCuda##TYPECUDA##Tensor_free(state, srcTensor); \
THCTensor_(free)(state, selfTensor); \
}
#if defined(THC_REAL_IS_ZDOUBLE)
THC_CUDA_STORAGE_IMPLEMENT_COPY(ZDouble,ZDouble)
#elif defined(THC_REAL_IS_ZFLOAT)
THC_CUDA_STORAGE_IMPLEMENT_COPY(ZFloat,ZFloat)
#else
THC_CUDA_STORAGE_IMPLEMENT_COPY(Byte,Byte)
THC_CUDA_STORAGE_IMPLEMENT_COPY(Char,Char)
THC_CUDA_STORAGE_IMPLEMENT_COPY(Short,Short)
THC_CUDA_STORAGE_IMPLEMENT_COPY(Int,Int)
THC_CUDA_STORAGE_IMPLEMENT_COPY(Long,Long)
THC_CUDA_STORAGE_IMPLEMENT_COPY(Float,) // i.e. float
THC_CUDA_STORAGE_IMPLEMENT_COPY(Double,Double)
#ifdef CUDA_HALF_TENSOR
THC_CUDA_STORAGE_IMPLEMENT_COPY(Half,Half)
#endif
#endif
#undef THC_CUDA_STORAGE_IMPLEMENT_COPY
void THCStorage_(copyCuda)(THCState *state, THCStorage *self, THCStorage *src)
{
THCStorage_(TH_CONCAT_2(copyCuda, Real))(state, self, src);
}
void THCStorage_(copy)(THCState *state, THCStorage *self, THCStorage *src)
{
THCStorage_(copyCuda)(state, self, src);
}
#endif
|
home-work5.hip | // !!! This is a file automatically generated by hipify!!!
//
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <iomanip>
#include <time.h>
#include <iostream>
#include "../utils/cuda_utils.cuh"
#include <cmath>
using namespace std;
#define BASE_TYPE float
#define M_PI 3.141592653
__global__ void calculateVector(int row, float* sourceAMatrix, float* targetBMatrix, float* buffetProjMatrix, int rows, int columns) {
int vectorNumber = threadIdx.x;
int vectorElement = threadIdx.y;
float* bVector = targetBMatrix + (row * columns);
float* aVector = sourceAMatrix + (row * columns);
if (vectorNumber == 0) {
atomicAdd(bVector + vectorElement, *(aVector + vectorElement));
}
else if (vectorNumber > row) {
// ,
}
else {
float upper = *(buffetProjMatrix + vectorNumber);
float lower = *(buffetProjMatrix + columns + vectorNumber);
printf("Upper: %f, Lower: %f, %d, %d, %d\n", upper, lower, row, vectorNumber, vectorElement);
atomicAdd(bVector + vectorElement, (*(aVector + vectorElement) * -upper / lower));
}
}
__global__ void calculateProj(int row, float* sourceAMatrix, float* targetBMatrix, float* buffetProjMatrix, int rows, int columns) {
int bVectorIndex = threadIdx.x - 1;
int vectorElement = threadIdx.y;
// 0, <a,b>
// 1, <b,b>
int lowerExpr = blockIdx.x;
float* target = buffetProjMatrix + (columns * lowerExpr) + (bVectorIndex + 1);
float* result = 0;
// proj(b-1,a_row)
if (bVectorIndex == -1)
{
//atomicAdd(target, *(sourceAMatrix + (row * columns) + vectorElement));
}
else if (bVectorIndex >= row) {
// ,
}
else {
float* vectorA;
if (lowerExpr == 1) {
vectorA = targetBMatrix + (bVectorIndex * columns);
}
else {
vectorA = sourceAMatrix + (row * columns);
}
float* vectorB = targetBMatrix + (bVectorIndex * columns);
atomicAdd(target, (*(vectorB + vectorElement) * *(vectorA + vectorElement)));
}
return;
}
void printMatrix(float* matrix, int rows, int columns) {
for (int row = 0; row < rows; row++) {
for (int column = 0; column < columns; column++) {
cout << *(matrix + (row * columns) + column) << ", ";
}
cout << endl;
}
}
void fillMatrix(float* matrix, int rows, int columns, bool allZero, bool upperTreangle) {
for (int row = 0; row < rows; row++) {
for (int column = 0; column < columns; column++) {
if (upperTreangle && row <= column) {
*(matrix + (row * columns) + column) = 1.0;
}
else if(upperTreangle && row < column || allZero) {
*(matrix + (row * columns) + column) = 0.0;
}
}
}
}
int homeWork5() {
size_t rows = 3;
size_t columns = 3;
dim3 gridSizeProj(2);
dim3 gridSize(1);
dim3 blockSize(rows, columns);
int elementCount = rows * columns;
float* sourceAMatrix = new float[elementCount];
fillMatrix(sourceAMatrix, rows, columns, true, false);
printMatrix(sourceAMatrix, rows, columns);
cout << "--------------------------" << endl;
fillMatrix(sourceAMatrix, rows, columns, false, true);
printMatrix(sourceAMatrix, rows, columns);
cout << "--------------------------" << endl;
float* deviceSourceAMatrix;
hipMalloc((void**)&deviceSourceAMatrix, elementCount * sizeof(float));
float* targetBMatrix = new float[elementCount];
fillMatrix(targetBMatrix, rows, columns, true, false);
float* deviceTargetBMatrix;
hipMalloc((void**)&deviceTargetBMatrix, elementCount * sizeof(float));
float* buffetProjMatrix = new float[columns * 2];
fillMatrix(targetBMatrix, 2, columns, true, false);
float* deviceBuffetProjMatrix;
hipMalloc((void**)&deviceBuffetProjMatrix, columns * 2 * sizeof(float));
float* buffetProjMatrixToPrint = new float[columns * 2];
hipMemcpy(deviceSourceAMatrix, sourceAMatrix, elementCount * sizeof(float), hipMemcpyHostToDevice);
for (int i = 0; i < rows; i++) {
hipMemcpy(deviceBuffetProjMatrix, buffetProjMatrix, columns * 2 * sizeof(float), hipMemcpyHostToDevice);
calculateProj << <gridSizeProj, blockSize >> > (i, deviceSourceAMatrix, deviceTargetBMatrix, deviceBuffetProjMatrix, rows, columns);
hipDeviceSynchronize();
calculateVector << <gridSize, blockSize >> > (i, deviceSourceAMatrix, deviceTargetBMatrix, deviceBuffetProjMatrix, rows, columns);
hipMemcpy(buffetProjMatrixToPrint, deviceBuffetProjMatrix, 2 * columns * sizeof(float), hipMemcpyDeviceToHost);
printMatrix(buffetProjMatrixToPrint, 2, columns);
cout << "--------------------------" << endl;
hipMemcpy(targetBMatrix, deviceTargetBMatrix, elementCount * sizeof(float), hipMemcpyDeviceToHost);
printMatrix(targetBMatrix, rows, columns);
cout << "--------------------------" << endl;
}
hipMemcpy(targetBMatrix, deviceTargetBMatrix, elementCount * sizeof(float), hipMemcpyDeviceToHost);
printMatrix(targetBMatrix, rows, columns);
hipFree(deviceSourceAMatrix);
hipFree(deviceTargetBMatrix);
hipFree(deviceBuffetProjMatrix);
delete[] sourceAMatrix;
delete[] targetBMatrix;
delete[] buffetProjMatrix;
hipError_t err = hipGetLastError();
if (err != hipSuccess) printf("%s ", hipGetErrorString(err));
return 0;
} | home-work5.cu | //пПЎклÑÑеМОе бОблОПÑек
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <iomanip>
#include <time.h>
#include <iostream>
#include "../utils/cuda_utils.cuh"
#include <cmath>
using namespace std;
#define BASE_TYPE float
#define M_PI 3.141592653
__global__ void calculateVector(int row, float* sourceAMatrix, float* targetBMatrix, float* buffetProjMatrix, int rows, int columns) {
int vectorNumber = threadIdx.x;
int vectorElement = threadIdx.y;
float* bVector = targetBMatrix + (row * columns);
float* aVector = sourceAMatrix + (row * columns);
if (vectorNumber == 0) {
atomicAdd(bVector + vectorElement, *(aVector + vectorElement));
}
else if (vectorNumber > row) {
//ÐОÑегП Ме ЎелаеЌ, ЎалÑÑе ÑлеЌеМÑÑ ÐœÐµ ÑÑОÑаеЌ
}
else {
float upper = *(buffetProjMatrix + vectorNumber);
float lower = *(buffetProjMatrix + columns + vectorNumber);
printf("Upper: %f, Lower: %f, %d, %d, %d\n", upper, lower, row, vectorNumber, vectorElement);
atomicAdd(bVector + vectorElement, (*(aVector + vectorElement) * -upper / lower));
}
}
__global__ void calculateProj(int row, float* sourceAMatrix, float* targetBMatrix, float* buffetProjMatrix, int rows, int columns) {
int bVectorIndex = threadIdx.x - 1;
int vectorElement = threadIdx.y;
//ÐÑлО 0, ÑП веÑÑ
Мее <a,b>
//ÐÑлО 1, ÑП МОжМее <b,b>
int lowerExpr = blockIdx.x;
float* target = buffetProjMatrix + (columns * lowerExpr) + (bVectorIndex + 1);
float* result = 0;
//ÐÑлО ÑÑП МÑлевПй ÑÐ»ÐµÐŒÐµÐœÑ proj(b-1,a_row)
if (bVectorIndex == -1)
{
//atomicAdd(target, *(sourceAMatrix + (row * columns) + vectorElement));
}
else if (bVectorIndex >= row) {
//ÐОÑегП Ме ЎелаеЌ, ЎалÑÑе ÑлеЌеМÑÑ ÐœÐµ ÑÑОÑаеЌ
}
else {
float* vectorA;
if (lowerExpr == 1) {
vectorA = targetBMatrix + (bVectorIndex * columns);
}
else {
vectorA = sourceAMatrix + (row * columns);
}
float* vectorB = targetBMatrix + (bVectorIndex * columns);
atomicAdd(target, (*(vectorB + vectorElement) * *(vectorA + vectorElement)));
}
return;
}
void printMatrix(float* matrix, int rows, int columns) {
for (int row = 0; row < rows; row++) {
for (int column = 0; column < columns; column++) {
cout << *(matrix + (row * columns) + column) << ", ";
}
cout << endl;
}
}
void fillMatrix(float* matrix, int rows, int columns, bool allZero, bool upperTreangle) {
for (int row = 0; row < rows; row++) {
for (int column = 0; column < columns; column++) {
if (upperTreangle && row <= column) {
*(matrix + (row * columns) + column) = 1.0;
}
else if(upperTreangle && row < column || allZero) {
*(matrix + (row * columns) + column) = 0.0;
}
}
}
}
int homeWork5() {
size_t rows = 3;
size_t columns = 3;
dim3 gridSizeProj(2);
dim3 gridSize(1);
dim3 blockSize(rows, columns);
int elementCount = rows * columns;
float* sourceAMatrix = new float[elementCount];
fillMatrix(sourceAMatrix, rows, columns, true, false);
printMatrix(sourceAMatrix, rows, columns);
cout << "--------------------------" << endl;
fillMatrix(sourceAMatrix, rows, columns, false, true);
printMatrix(sourceAMatrix, rows, columns);
cout << "--------------------------" << endl;
float* deviceSourceAMatrix;
cudaMalloc((void**)&deviceSourceAMatrix, elementCount * sizeof(float));
float* targetBMatrix = new float[elementCount];
fillMatrix(targetBMatrix, rows, columns, true, false);
float* deviceTargetBMatrix;
cudaMalloc((void**)&deviceTargetBMatrix, elementCount * sizeof(float));
float* buffetProjMatrix = new float[columns * 2];
fillMatrix(targetBMatrix, 2, columns, true, false);
float* deviceBuffetProjMatrix;
cudaMalloc((void**)&deviceBuffetProjMatrix, columns * 2 * sizeof(float));
float* buffetProjMatrixToPrint = new float[columns * 2];
cudaMemcpy(deviceSourceAMatrix, sourceAMatrix, elementCount * sizeof(float), cudaMemcpyHostToDevice);
for (int i = 0; i < rows; i++) {
cudaMemcpy(deviceBuffetProjMatrix, buffetProjMatrix, columns * 2 * sizeof(float), cudaMemcpyHostToDevice);
calculateProj << <gridSizeProj, blockSize >> > (i, deviceSourceAMatrix, deviceTargetBMatrix, deviceBuffetProjMatrix, rows, columns);
cudaDeviceSynchronize();
calculateVector << <gridSize, blockSize >> > (i, deviceSourceAMatrix, deviceTargetBMatrix, deviceBuffetProjMatrix, rows, columns);
cudaMemcpy(buffetProjMatrixToPrint, deviceBuffetProjMatrix, 2 * columns * sizeof(float), cudaMemcpyDeviceToHost);
printMatrix(buffetProjMatrixToPrint, 2, columns);
cout << "--------------------------" << endl;
cudaMemcpy(targetBMatrix, deviceTargetBMatrix, elementCount * sizeof(float), cudaMemcpyDeviceToHost);
printMatrix(targetBMatrix, rows, columns);
cout << "--------------------------" << endl;
}
cudaMemcpy(targetBMatrix, deviceTargetBMatrix, elementCount * sizeof(float), cudaMemcpyDeviceToHost);
printMatrix(targetBMatrix, rows, columns);
cudaFree(deviceSourceAMatrix);
cudaFree(deviceTargetBMatrix);
cudaFree(deviceBuffetProjMatrix);
delete[] sourceAMatrix;
delete[] targetBMatrix;
delete[] buffetProjMatrix;
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) printf("%s ", cudaGetErrorString(err));
return 0;
} |
elementwise_permute.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name(s) of the copyright holder(s) nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <iostream>
#include <string>
#include <unordered_map>
#include <vector>
#include <hip/hip_runtime.h>
#include <cutensor.h>
#define HANDLE_ERROR(x) \
{ const auto err = x; \
if( err != CUTENSOR_STATUS_SUCCESS ) \
{ printf("Error: %s\n", cutensorGetErrorString(err)); return err; } \
};
#define HANDLE_CUDA_ERROR(x) \
{ const auto err = x; \
if( err != hipSuccess ) \
{ printf("Error: %s\n", hipGetErrorString(err)); return err; } \
};
struct GPUTimer
{
GPUTimer()
{
hipEventCreate(&start_);
hipEventCreate(&stop_);
hipEventRecord(start_, 0);
}
~GPUTimer()
{
hipEventDestroy(start_);
hipEventDestroy(stop_);
}
void start()
{
hipEventRecord(start_, 0);
}
float seconds()
{
hipEventRecord(stop_, 0);
hipEventSynchronize(stop_);
float time;
hipEventElapsedTime(&time, start_, stop_);
return time * 1e-3;
}
private:
hipEvent_t start_, stop_;
};
int main()
{
typedef float floatTypeA;
typedef float floatTypeC;
typedef float floatTypeCompute;
hipDataType typeA = HIP_R_32F;
hipDataType typeC = HIP_R_32F;
hipDataType typeCompute = HIP_R_32F;
/**********************
* This example illustrates the use case where an input tensor A (in host memory) is
* permuted from an NCHW data layout to NHWC while moving the data from host to device
* memory C:
*
* C_{c,w,h,n} = A_{w,h,c,n}
**********************/
std::vector<int> modeC{'c','w','h','n'};
std::vector<int> modeA{'w','h','c','n'};
int nmodeA = modeA.size();
int nmodeC = modeC.size();
std::unordered_map<int, int64_t> extent;
extent['h'] = 128;
extent['w'] = 32;
extent['c'] = 128;
extent['n'] = 128;
std::vector<int64_t> extentA;
for (auto mode : modeA)
extentA.push_back(extent[mode]);
std::vector<int64_t> extentC;
for (auto mode : modeC)
extentC.push_back(extent[mode]);
/**********************
* Allocating data
**********************/
size_t elementsA = 1;
for (auto mode : modeA)
elementsA *= extent[mode];
size_t elementsC = 1;
for (auto mode : modeC)
elementsC *= extent[mode];
size_t sizeA = sizeof(floatTypeA) * elementsA;
size_t sizeC = sizeof(floatTypeC) * elementsC;
void *A_d, *C_d;
HANDLE_CUDA_ERROR(hipMalloc((void**) &A_d, sizeA));
HANDLE_CUDA_ERROR(hipMalloc((void**) &C_d, sizeC));
floatTypeA *A, *C;
HANDLE_CUDA_ERROR(hipHostMalloc((void**) &A, sizeof(floatTypeA) * elementsA));
HANDLE_CUDA_ERROR(hipHostMalloc((void**) &C, sizeof(floatTypeC) * elementsC));
/*******************
* Initialize data
*******************/
for (size_t i = 0; i < elementsA; i++)
{
A[i] = (((float) rand())/RAND_MAX)*100;
}
HANDLE_CUDA_ERROR(hipMemcpy2DAsync(A_d, sizeA, A, sizeA, sizeA, 1, hipMemcpyDefault, 0));
/*************************
* CUTENSOR
*************************/
cutensorStatus_t err;
cutensorHandle_t handle;
HANDLE_ERROR(cutensorInit(&handle));
/**********************
* Create Tensor Descriptors
**********************/
cutensorTensorDescriptor_t descA;
HANDLE_ERROR(cutensorInitTensorDescriptor(&handle,
&descA,
nmodeA,
extentA.data(),
NULL /* stride */,
typeA, CUTENSOR_OP_IDENTITY));
cutensorTensorDescriptor_t descC;
HANDLE_ERROR(cutensorInitTensorDescriptor(&handle,
&descC,
nmodeC,
extentC.data(),
NULL /* stride */,
typeC, CUTENSOR_OP_IDENTITY));
double minTimeCUTENSOR = 1e100;
for (int i = 0; i < 3; i++)
{
GPUTimer timer;
timer.start();
const floatTypeCompute one = 1.0f;
err = cutensorPermutation(&handle,
&one, A_d, &descA, modeA.data(),
C_d, &descC, modeC.data(),
typeCompute, 0 /* stream */);
auto time = timer.seconds();
if (err != CUTENSOR_STATUS_SUCCESS)
printf("ERROR: %s\n", cutensorGetErrorString(err));
minTimeCUTENSOR = (minTimeCUTENSOR < time) ? minTimeCUTENSOR : time;
}
/*************************/
double transferedBytes = 2.0 * sizeC;
transferedBytes /= 1e9;
printf("cuTensor: %.2f GB/s\n", transferedBytes / minTimeCUTENSOR);
if (A) hipHostFree(A);
if (C) hipHostFree(C);
if (A_d) hipFree(A_d);
if (C_d) hipFree(C_d);
return 0;
}
| elementwise_permute.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name(s) of the copyright holder(s) nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <iostream>
#include <string>
#include <unordered_map>
#include <vector>
#include <cuda_runtime.h>
#include <cutensor.h>
#define HANDLE_ERROR(x) \
{ const auto err = x; \
if( err != CUTENSOR_STATUS_SUCCESS ) \
{ printf("Error: %s\n", cutensorGetErrorString(err)); return err; } \
};
#define HANDLE_CUDA_ERROR(x) \
{ const auto err = x; \
if( err != cudaSuccess ) \
{ printf("Error: %s\n", cudaGetErrorString(err)); return err; } \
};
struct GPUTimer
{
GPUTimer()
{
cudaEventCreate(&start_);
cudaEventCreate(&stop_);
cudaEventRecord(start_, 0);
}
~GPUTimer()
{
cudaEventDestroy(start_);
cudaEventDestroy(stop_);
}
void start()
{
cudaEventRecord(start_, 0);
}
float seconds()
{
cudaEventRecord(stop_, 0);
cudaEventSynchronize(stop_);
float time;
cudaEventElapsedTime(&time, start_, stop_);
return time * 1e-3;
}
private:
cudaEvent_t start_, stop_;
};
int main()
{
typedef float floatTypeA;
typedef float floatTypeC;
typedef float floatTypeCompute;
cudaDataType_t typeA = CUDA_R_32F;
cudaDataType_t typeC = CUDA_R_32F;
cudaDataType_t typeCompute = CUDA_R_32F;
/**********************
* This example illustrates the use case where an input tensor A (in host memory) is
* permuted from an NCHW data layout to NHWC while moving the data from host to device
* memory C:
*
* C_{c,w,h,n} = A_{w,h,c,n}
**********************/
std::vector<int> modeC{'c','w','h','n'};
std::vector<int> modeA{'w','h','c','n'};
int nmodeA = modeA.size();
int nmodeC = modeC.size();
std::unordered_map<int, int64_t> extent;
extent['h'] = 128;
extent['w'] = 32;
extent['c'] = 128;
extent['n'] = 128;
std::vector<int64_t> extentA;
for (auto mode : modeA)
extentA.push_back(extent[mode]);
std::vector<int64_t> extentC;
for (auto mode : modeC)
extentC.push_back(extent[mode]);
/**********************
* Allocating data
**********************/
size_t elementsA = 1;
for (auto mode : modeA)
elementsA *= extent[mode];
size_t elementsC = 1;
for (auto mode : modeC)
elementsC *= extent[mode];
size_t sizeA = sizeof(floatTypeA) * elementsA;
size_t sizeC = sizeof(floatTypeC) * elementsC;
void *A_d, *C_d;
HANDLE_CUDA_ERROR(cudaMalloc((void**) &A_d, sizeA));
HANDLE_CUDA_ERROR(cudaMalloc((void**) &C_d, sizeC));
floatTypeA *A, *C;
HANDLE_CUDA_ERROR(cudaMallocHost((void**) &A, sizeof(floatTypeA) * elementsA));
HANDLE_CUDA_ERROR(cudaMallocHost((void**) &C, sizeof(floatTypeC) * elementsC));
/*******************
* Initialize data
*******************/
for (size_t i = 0; i < elementsA; i++)
{
A[i] = (((float) rand())/RAND_MAX)*100;
}
HANDLE_CUDA_ERROR(cudaMemcpy2DAsync(A_d, sizeA, A, sizeA, sizeA, 1, cudaMemcpyDefault, 0));
/*************************
* CUTENSOR
*************************/
cutensorStatus_t err;
cutensorHandle_t handle;
HANDLE_ERROR(cutensorInit(&handle));
/**********************
* Create Tensor Descriptors
**********************/
cutensorTensorDescriptor_t descA;
HANDLE_ERROR(cutensorInitTensorDescriptor(&handle,
&descA,
nmodeA,
extentA.data(),
NULL /* stride */,
typeA, CUTENSOR_OP_IDENTITY));
cutensorTensorDescriptor_t descC;
HANDLE_ERROR(cutensorInitTensorDescriptor(&handle,
&descC,
nmodeC,
extentC.data(),
NULL /* stride */,
typeC, CUTENSOR_OP_IDENTITY));
double minTimeCUTENSOR = 1e100;
for (int i = 0; i < 3; i++)
{
GPUTimer timer;
timer.start();
const floatTypeCompute one = 1.0f;
err = cutensorPermutation(&handle,
&one, A_d, &descA, modeA.data(),
C_d, &descC, modeC.data(),
typeCompute, 0 /* stream */);
auto time = timer.seconds();
if (err != CUTENSOR_STATUS_SUCCESS)
printf("ERROR: %s\n", cutensorGetErrorString(err));
minTimeCUTENSOR = (minTimeCUTENSOR < time) ? minTimeCUTENSOR : time;
}
/*************************/
double transferedBytes = 2.0 * sizeC;
transferedBytes /= 1e9;
printf("cuTensor: %.2f GB/s\n", transferedBytes / minTimeCUTENSOR);
if (A) cudaFreeHost(A);
if (C) cudaFreeHost(C);
if (A_d) cudaFree(A_d);
if (C_d) cudaFree(C_d);
return 0;
}
|
bef9467483f5f12b4314bfe11810d96963fdf534.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2016 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <libsgm.h>
#include "internal.h"
#include "utility.hpp"
namespace {
__global__ void correct_disparity_range_kernel(uint16_t* d_disp, int width, int height, int pitch, int min_disp_scaled, int invalid_disp_scaled) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
uint16_t d = d_disp[y * pitch + x];
if (d == sgm::INVALID_DISP) {
d = invalid_disp_scaled;
} else {
d += min_disp_scaled;
}
d_disp[y * pitch + x] = d;
}
}
namespace sgm {
namespace details {
void correct_disparity_range(uint16_t* d_disp, int width, int height, int pitch, bool subpixel, int min_disp) {
if (!subpixel && min_disp == 0) {
return;
}
static constexpr int SIZE = 16;
const dim3 blocks((width + SIZE - 1) / SIZE, (height + SIZE - 1) / SIZE);
const dim3 threads(SIZE, SIZE);
const int scale = subpixel ? StereoSGM::SUBPIXEL_SCALE : 1;
const int min_disp_scaled = min_disp * scale;
const int invalid_disp_scaled = (min_disp - 1) * scale;
hipLaunchKernelGGL(( correct_disparity_range_kernel), dim3(blocks), dim3(threads), 0, 0, d_disp, width, height, pitch, min_disp_scaled, invalid_disp_scaled);
}
}
}
| bef9467483f5f12b4314bfe11810d96963fdf534.cu | /*
Copyright 2016 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <libsgm.h>
#include "internal.h"
#include "utility.hpp"
namespace {
__global__ void correct_disparity_range_kernel(uint16_t* d_disp, int width, int height, int pitch, int min_disp_scaled, int invalid_disp_scaled) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
uint16_t d = d_disp[y * pitch + x];
if (d == sgm::INVALID_DISP) {
d = invalid_disp_scaled;
} else {
d += min_disp_scaled;
}
d_disp[y * pitch + x] = d;
}
}
namespace sgm {
namespace details {
void correct_disparity_range(uint16_t* d_disp, int width, int height, int pitch, bool subpixel, int min_disp) {
if (!subpixel && min_disp == 0) {
return;
}
static constexpr int SIZE = 16;
const dim3 blocks((width + SIZE - 1) / SIZE, (height + SIZE - 1) / SIZE);
const dim3 threads(SIZE, SIZE);
const int scale = subpixel ? StereoSGM::SUBPIXEL_SCALE : 1;
const int min_disp_scaled = min_disp * scale;
const int invalid_disp_scaled = (min_disp - 1) * scale;
correct_disparity_range_kernel<<<blocks, threads>>>(d_disp, width, height, pitch, min_disp_scaled, invalid_disp_scaled);
}
}
}
|
a1c9b9d8b9a9c8a72133185541ccbf2a92313928.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <ks_force_quda.h>
namespace quda {
template<typename Oprod, typename Gauge, typename Mom>
struct KSForceArg {
int threads;
int X[4]; // grid dimensions
#ifndef BUILD_TIFR_INTERFACE
#ifdef MULTI_GPU
int border[4];
#endif
#endif
Oprod oprod;
Gauge gauge;
Mom mom;
KSForceArg(Oprod& oprod, Gauge &gauge, Mom& mom, int dim[4])
: oprod(oprod), gauge(gauge), mom(mom){
threads = 1;
for(int dir=0; dir<4; ++dir) threads *= dim[dir];
for(int dir=0; dir<4; ++dir) X[dir] = dim[dir];
#ifndef BUILD_TIFR_INTERFACE
#ifdef MULTI_GPU
for(int dir=0; dir<4; ++dir) border[dir] = 2;
#endif
#endif
}
};
__device__ __host__ inline int linkIndex(int x[], int dx[], const int X[4]) {
int y[4];
for (int i=0; i<4; i++) y[i] = (x[i] + dx[i] + X[i]) % X[i];
int idx = (((y[3]*X[2] + y[2])*X[1] + y[1])*X[0] + y[0]) >> 1;
return idx;
}
__device__ __host__ inline void getCoords(int x[4], int cb_index, const int X[4], int parity)
{
x[3] = cb_index/(X[2]*X[1]*X[0]/2);
x[2] = (cb_index/(X[1]*X[0]/2)) % X[2];
x[1] = (cb_index/(X[0]/2)) % X[1];
x[0] = 2*(cb_index%(X[0]/2)) + ((x[3]+x[2]+x[1]+parity)&1);
return;
}
template<typename Float, typename Oprod, typename Gauge, typename Mom>
__host__ __device__ void completeKSForceCore(KSForceArg<Oprod,Gauge,Mom>& arg, int idx){
int parity = 0;
if(idx >= arg.threads/2){
parity = 1;
idx -= arg.threads/2;
}
int X[4];
for(int dir=0; dir<4; ++dir) X[dir] = arg.X[dir];
int x[4];
getCoords(x, idx, X, parity);
#ifndef BUILD_TIFR_INTERFACE
#ifdef MULTI_GPU
for(int dir=0; dir<4; ++dir){
x[dir] += arg.border[dir];
X[dir] += 2*arg.border[dir];
}
#endif
#endif
typedef typename ComplexTypeId<Float>::Type Cmplx;
Matrix<Cmplx,3> O;
Matrix<Cmplx,3> G;
Matrix<Cmplx,3> M;
int dx[4] = {0,0,0,0};
for(int dir=0; dir<4; ++dir){
arg.gauge.load((Float*)(G.data), linkIndex(x,dx,X), dir, parity);
arg.oprod.load((Float*)(O.data), linkIndex(x,dx,X), dir, parity);
if(parity==0){
M = G*O;
}else{
M = -G*O;
}
Float sub = getTrace(M).y/(static_cast<Float>(3));
Float temp[10];
temp[0] = (M.data[1].x - M.data[3].x)*0.5;
temp[1] = (M.data[1].y + M.data[3].y)*0.5;
temp[2] = (M.data[2].x - M.data[6].x)*0.5;
temp[3] = (M.data[2].y + M.data[6].y)*0.5;
temp[4] = (M.data[5].x - M.data[7].x)*0.5;
temp[5] = (M.data[5].y + M.data[7].y)*0.5;
temp[6] = (M.data[0].y-sub);
temp[7] = (M.data[4].y-sub);
temp[8] = (M.data[8].y-sub);
temp[9] = 0.0;
arg.mom.save(temp, idx, dir, parity);
}
}
template<typename Float, typename Oprod, typename Gauge, typename Mom>
__global__ void completeKSForceKernel(KSForceArg<Oprod,Gauge,Mom> arg)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= arg.threads) return;
completeKSForceCore<Float,Oprod,Gauge,Mom>(arg,idx);
}
template<typename Float, typename Oprod, typename Gauge, typename Mom>
void completeKSForceCPU(KSForceArg<Oprod,Gauge,Mom>& arg)
{
for(int idx=0; idx<arg.threads; idx++){
completeKSForceCore<Float,Oprod,Gauge,Mom>(arg,idx);
}
}
template<typename Float, typename Oprod, typename Gauge, typename Mom>
class KSForceComplete : Tunable {
KSForceArg<Oprod, Gauge, Mom> arg;
const GaugeField &meta;
const QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneSharedBytes() const { return false; } // Don't tune the shared memory.
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
KSForceComplete(KSForceArg<Oprod,Gauge,Mom> &arg, const GaugeField &meta, QudaFieldLocation location)
: arg(arg), meta(meta), location(location) {
writeAuxString("prec=%lu,stride=%d",sizeof(Float),arg.mom.stride);
}
virtual ~KSForceComplete() {}
void apply(const hipStream_t &stream) {
if(location == QUDA_CUDA_FIELD_LOCATION){
#if (__COMPUTE_CAPABILITY__ >= 200)
// Fix this
dim3 blockDim(128, 1, 1);
dim3 gridDim((arg.threads + blockDim.x - 1) / blockDim.x, 1, 1);
hipLaunchKernelGGL(( completeKSForceKernel<Float>), dim3(gridDim),dim3(blockDim), 0, 0, arg);
#else
errorQuda("completeKSForce not supported on pre-Fermi architecture");
#endif
}else{
completeKSForceCPU<Float>(arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
std::string paramString(const TuneParam ¶m) const { // Don't print the grid dim.
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 792*arg.X[0]*arg.X[1]*arg.X[2]*arg.X[3]; }
long long bytes() const { return 0; } // Fix this
};
template<typename Float, typename Oprod, typename Gauge, typename Mom>
void completeKSForce(Oprod oprod, Gauge gauge, Mom mom, int dim[4], const GaugeField &meta, QudaFieldLocation location, long long *flops)
{
KSForceArg<Oprod,Gauge,Mom> arg(oprod, gauge, mom, dim);
KSForceComplete<Float,Oprod,Gauge,Mom> completeForce(arg,meta,location);
completeForce.apply(0);
if(flops) *flops = completeForce.flops();
hipDeviceSynchronize();
}
template<typename Float>
void completeKSForce(GaugeField& mom, const GaugeField& oprod, const GaugeField& gauge, QudaFieldLocation location, long long *flops)
{
if(location != QUDA_CUDA_FIELD_LOCATION){
errorQuda("Only QUDA_CUDA_FIELD_LOCATION currently supported");
}else{
if((oprod.Reconstruct() != QUDA_RECONSTRUCT_NO) || (gauge.Reconstruct() != QUDA_RECONSTRUCT_NO) || (mom.Reconstruct() != QUDA_RECONSTRUCT_10)){
errorQuda("Reconstruct type not supported");
}else{
completeKSForce<Float>(FloatNOrder<Float, 18, 2, 18>(oprod),
FloatNOrder<Float, 18, 2, 18>(gauge),
FloatNOrder<Float, 10, 2, 10>(mom),
const_cast<int*>(mom.X()),
gauge, location, flops);
}
}
return;
}
void completeKSForce(GaugeField &mom, const GaugeField &oprod, const GaugeField &gauge, QudaFieldLocation location, long long *flops)
{
if(mom.Precision() == QUDA_HALF_PRECISION){
errorQuda("Half precision not supported");
}
if(mom.Precision() == QUDA_SINGLE_PRECISION){
completeKSForce<float>(mom, oprod, gauge, location, flops);
}else if(mom.Precision() == QUDA_DOUBLE_PRECISION){
completeKSForce<double>(mom, oprod, gauge, location, flops);
}else{
errorQuda("Precision %d not supported", mom.Precision());
}
return;
}
template<typename Result, typename Oprod, typename Gauge>
struct KSLongLinkArg {
int threads;
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
double coeff;
Result res;
Oprod oprod;
Gauge gauge;
KSLongLinkArg(Result& res, Oprod& oprod, Gauge &gauge, int dim[4])
: coeff(1.0), res(res), oprod(oprod), gauge(gauge){
threads = 1;
#ifdef MULTI_GPU
for(int dir=0; dir<4; ++dir) threads *= (dim[dir]-2);
for(int dir=0; dir<4; ++dir) X[dir] = dim[dir]-2;
for(int dir=0; dir<4; ++dir) border[dir] = 2;
#else
for(int dir=0; dir<4; ++dir) threads *= dim[dir];
for(int dir=0; dir<4; ++dir) X[dir] = dim[dir];
#endif
}
};
template<typename Float, typename Result, typename Oprod, typename Gauge>
__host__ __device__ void computeKSLongLinkForceCore(KSLongLinkArg<Result,Oprod,Gauge>& arg, int idx){
/*
int parity = 0;
if(idx >= arg.threads/2){
parity = 1;
idx -= arg.threads/2;
}
int X[4];
for(int dir=0; dir<4; ++dir) X[dir] = arg.X[dir];
int x[4];
getCoords(x, idx, X, parity);
#ifndef BUILD_TIFR_INTERFACE
#ifdef MULTI_GPU
for(int dir=0; dir<4; ++dir){
x[dir] += arg.border[dir];
X[dir] += 2*arg.border[dir];
}
#endif
#endif
typedef typename ComplexTypeId<Float>::Type Cmplx;
Matrix<Cmplx,3> O;
Matrix<Cmplx,3> G;
Matrix<Cmplx,3> M;
int dx[4] = {0,0,0,0};
for(int dir=0; dir<4; ++dir){
arg.gauge.load((Float*)(G.data), linkIndex(x,dx,X), dir, parity);
arg.oprod.load((Float*)(O.data), linkIndex(x,dx,X), dir, parity);
if(parity==0){
M = G*O;
}else{
M = -G*O;
}
Float sub = getTrace(M).y/(static_cast<Float>(3));
Float temp[10];
temp[0] = (M.data[1].x - M.data[3].x)*0.5;
temp[1] = (M.data[1].y + M.data[3].y)*0.5;
temp[2] = (M.data[2].x - M.data[6].x)*0.5;
temp[3] = (M.data[2].y + M.data[6].y)*0.5;
temp[4] = (M.data[5].x - M.data[7].x)*0.5;
temp[5] = (M.data[5].y + M.data[7].y)*0.5;
temp[6] = (M.data[0].y-sub);
temp[7] = (M.data[4].y-sub);
temp[8] = (M.data[8].y-sub);
temp[9] = 0.0;
arg.mom.save(temp, idx, dir, parity);
}
*/
}
template<typename Float, typename Result, typename Oprod, typename Gauge>
__global__ void computeKSLongLinkForceKernel(KSLongLinkArg<Result,Oprod,Gauge> arg)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= arg.threads) return;
computeKSLongLinkForceCore<Float,Result,Oprod,Gauge>(arg,idx);
}
template<typename Float, typename Result, typename Oprod, typename Gauge>
void computeKSLongLinkForceCPU(KSLongLinkArg<Result,Oprod,Gauge>& arg)
{
for(int idx=0; idx<arg.threads; idx++){
computeKSLongLinkForceCore<Float,Result,Oprod,Gauge>(arg,idx);
}
}
// should be tunable
template<typename Float, typename Result, typename Oprod, typename Gauge>
class KSLongLinkForce : Tunable {
KSLongLinkArg<Result,Oprod,Gauge> arg;
const GaugeField &meta;
const QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneSharedBytes() const { return false; } // Don't tune the shared memory.
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
KSLongLinkForce(KSLongLinkArg<Result,Oprod,Gauge> &arg, const GaugeField &meta, QudaFieldLocation location)
: arg(arg), meta(meta), location(location) {
writeAuxString("prec=%lu,stride=%d",sizeof(Float),arg.res.stride);
}
virtual ~KSLongLinkForce() {}
void apply(const hipStream_t &stream) {
if(location == QUDA_CUDA_FIELD_LOCATION){
#if (__COMPUTE_CAPABILITY__ >= 200)
// Fix this
dim3 blockDim(128, 1, 1);
dim3 gridDim((arg.threads + blockDim.x - 1) / blockDim.x, 1, 1);
hipLaunchKernelGGL(( computeKSLongLinkForceKernel<Float>), dim3(gridDim),dim3(blockDim), 0, 0, arg);
#else
errorQuda("computeKSLongLinkForce not supported on pre-Fermi architecture");
#endif
}else{
computeKSLongLinkForceCPU<Float>(arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
std::string paramString(const TuneParam ¶m) const { // Don't print the grid dim.
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune(){}
void postTune(){}
long long flops() const { return 0; } // Fix this
long long bytes() const { return 0; } // Fix this
};
template<typename Float, typename Result, typename Oprod, typename Gauge>
void computeKSLongLinkForce(Result res, Oprod oprod, Gauge gauge, int dim[4], const GaugeField &meta, QudaFieldLocation location)
{
KSLongLinkArg<Result,Oprod,Gauge> arg(res, oprod, gauge, dim);
KSLongLinkForce<Float,Result,Oprod,Gauge> computeLongLink(arg,meta,location);
computeLongLink.apply(0);
hipDeviceSynchronize();
}
template<typename Float>
void computeKSLongLinkForce(GaugeField& result, const GaugeField &oprod, const GaugeField &gauge, QudaFieldLocation location)
{
if(location != QUDA_CUDA_FIELD_LOCATION){
errorQuda("Only QUDA_CUDA_FIELD_LOCATION currently supported");
}else{
if((oprod.Reconstruct() != QUDA_RECONSTRUCT_NO) || (gauge.Reconstruct() != QUDA_RECONSTRUCT_NO) ||
(result.Reconstruct() != QUDA_RECONSTRUCT_10)){
errorQuda("Reconstruct type not supported");
}else{
computeKSLongLinkForce<Float>(FloatNOrder<Float, 18, 2, 18>(result),
FloatNOrder<Float, 18, 2, 18>(oprod),
FloatNOrder<Float, 18, 2, 18>(gauge),
const_cast<int*>(result.X()),
gauge, location);
}
}
return;
}
void computeKSLongLinkForce(GaugeField &result, const GaugeField &oprod, const GaugeField &gauge, QudaFieldLocation location)
{
if(result.Precision() == QUDA_HALF_PRECISION){
errorQuda("Half precision not supported");
}
if(result.Precision() == QUDA_SINGLE_PRECISION){
computeKSLongLinkForce<float>(result, oprod, gauge, location);
}else if(result.Precision() == QUDA_DOUBLE_PRECISION){
computeKSLongLinkForce<double>(result, oprod, gauge, location);
}
errorQuda("Precision %d not supported", result.Precision());
return;
}
} // namespace quda
| a1c9b9d8b9a9c8a72133185541ccbf2a92313928.cu | #include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <ks_force_quda.h>
namespace quda {
template<typename Oprod, typename Gauge, typename Mom>
struct KSForceArg {
int threads;
int X[4]; // grid dimensions
#ifndef BUILD_TIFR_INTERFACE
#ifdef MULTI_GPU
int border[4];
#endif
#endif
Oprod oprod;
Gauge gauge;
Mom mom;
KSForceArg(Oprod& oprod, Gauge &gauge, Mom& mom, int dim[4])
: oprod(oprod), gauge(gauge), mom(mom){
threads = 1;
for(int dir=0; dir<4; ++dir) threads *= dim[dir];
for(int dir=0; dir<4; ++dir) X[dir] = dim[dir];
#ifndef BUILD_TIFR_INTERFACE
#ifdef MULTI_GPU
for(int dir=0; dir<4; ++dir) border[dir] = 2;
#endif
#endif
}
};
__device__ __host__ inline int linkIndex(int x[], int dx[], const int X[4]) {
int y[4];
for (int i=0; i<4; i++) y[i] = (x[i] + dx[i] + X[i]) % X[i];
int idx = (((y[3]*X[2] + y[2])*X[1] + y[1])*X[0] + y[0]) >> 1;
return idx;
}
__device__ __host__ inline void getCoords(int x[4], int cb_index, const int X[4], int parity)
{
x[3] = cb_index/(X[2]*X[1]*X[0]/2);
x[2] = (cb_index/(X[1]*X[0]/2)) % X[2];
x[1] = (cb_index/(X[0]/2)) % X[1];
x[0] = 2*(cb_index%(X[0]/2)) + ((x[3]+x[2]+x[1]+parity)&1);
return;
}
template<typename Float, typename Oprod, typename Gauge, typename Mom>
__host__ __device__ void completeKSForceCore(KSForceArg<Oprod,Gauge,Mom>& arg, int idx){
int parity = 0;
if(idx >= arg.threads/2){
parity = 1;
idx -= arg.threads/2;
}
int X[4];
for(int dir=0; dir<4; ++dir) X[dir] = arg.X[dir];
int x[4];
getCoords(x, idx, X, parity);
#ifndef BUILD_TIFR_INTERFACE
#ifdef MULTI_GPU
for(int dir=0; dir<4; ++dir){
x[dir] += arg.border[dir];
X[dir] += 2*arg.border[dir];
}
#endif
#endif
typedef typename ComplexTypeId<Float>::Type Cmplx;
Matrix<Cmplx,3> O;
Matrix<Cmplx,3> G;
Matrix<Cmplx,3> M;
int dx[4] = {0,0,0,0};
for(int dir=0; dir<4; ++dir){
arg.gauge.load((Float*)(G.data), linkIndex(x,dx,X), dir, parity);
arg.oprod.load((Float*)(O.data), linkIndex(x,dx,X), dir, parity);
if(parity==0){
M = G*O;
}else{
M = -G*O;
}
Float sub = getTrace(M).y/(static_cast<Float>(3));
Float temp[10];
temp[0] = (M.data[1].x - M.data[3].x)*0.5;
temp[1] = (M.data[1].y + M.data[3].y)*0.5;
temp[2] = (M.data[2].x - M.data[6].x)*0.5;
temp[3] = (M.data[2].y + M.data[6].y)*0.5;
temp[4] = (M.data[5].x - M.data[7].x)*0.5;
temp[5] = (M.data[5].y + M.data[7].y)*0.5;
temp[6] = (M.data[0].y-sub);
temp[7] = (M.data[4].y-sub);
temp[8] = (M.data[8].y-sub);
temp[9] = 0.0;
arg.mom.save(temp, idx, dir, parity);
}
}
template<typename Float, typename Oprod, typename Gauge, typename Mom>
__global__ void completeKSForceKernel(KSForceArg<Oprod,Gauge,Mom> arg)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= arg.threads) return;
completeKSForceCore<Float,Oprod,Gauge,Mom>(arg,idx);
}
template<typename Float, typename Oprod, typename Gauge, typename Mom>
void completeKSForceCPU(KSForceArg<Oprod,Gauge,Mom>& arg)
{
for(int idx=0; idx<arg.threads; idx++){
completeKSForceCore<Float,Oprod,Gauge,Mom>(arg,idx);
}
}
template<typename Float, typename Oprod, typename Gauge, typename Mom>
class KSForceComplete : Tunable {
KSForceArg<Oprod, Gauge, Mom> arg;
const GaugeField &meta;
const QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneSharedBytes() const { return false; } // Don't tune the shared memory.
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
KSForceComplete(KSForceArg<Oprod,Gauge,Mom> &arg, const GaugeField &meta, QudaFieldLocation location)
: arg(arg), meta(meta), location(location) {
writeAuxString("prec=%lu,stride=%d",sizeof(Float),arg.mom.stride);
}
virtual ~KSForceComplete() {}
void apply(const cudaStream_t &stream) {
if(location == QUDA_CUDA_FIELD_LOCATION){
#if (__COMPUTE_CAPABILITY__ >= 200)
// Fix this
dim3 blockDim(128, 1, 1);
dim3 gridDim((arg.threads + blockDim.x - 1) / blockDim.x, 1, 1);
completeKSForceKernel<Float><<<gridDim,blockDim>>>(arg);
#else
errorQuda("completeKSForce not supported on pre-Fermi architecture");
#endif
}else{
completeKSForceCPU<Float>(arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
std::string paramString(const TuneParam ¶m) const { // Don't print the grid dim.
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 792*arg.X[0]*arg.X[1]*arg.X[2]*arg.X[3]; }
long long bytes() const { return 0; } // Fix this
};
template<typename Float, typename Oprod, typename Gauge, typename Mom>
void completeKSForce(Oprod oprod, Gauge gauge, Mom mom, int dim[4], const GaugeField &meta, QudaFieldLocation location, long long *flops)
{
KSForceArg<Oprod,Gauge,Mom> arg(oprod, gauge, mom, dim);
KSForceComplete<Float,Oprod,Gauge,Mom> completeForce(arg,meta,location);
completeForce.apply(0);
if(flops) *flops = completeForce.flops();
cudaDeviceSynchronize();
}
template<typename Float>
void completeKSForce(GaugeField& mom, const GaugeField& oprod, const GaugeField& gauge, QudaFieldLocation location, long long *flops)
{
if(location != QUDA_CUDA_FIELD_LOCATION){
errorQuda("Only QUDA_CUDA_FIELD_LOCATION currently supported");
}else{
if((oprod.Reconstruct() != QUDA_RECONSTRUCT_NO) || (gauge.Reconstruct() != QUDA_RECONSTRUCT_NO) || (mom.Reconstruct() != QUDA_RECONSTRUCT_10)){
errorQuda("Reconstruct type not supported");
}else{
completeKSForce<Float>(FloatNOrder<Float, 18, 2, 18>(oprod),
FloatNOrder<Float, 18, 2, 18>(gauge),
FloatNOrder<Float, 10, 2, 10>(mom),
const_cast<int*>(mom.X()),
gauge, location, flops);
}
}
return;
}
void completeKSForce(GaugeField &mom, const GaugeField &oprod, const GaugeField &gauge, QudaFieldLocation location, long long *flops)
{
if(mom.Precision() == QUDA_HALF_PRECISION){
errorQuda("Half precision not supported");
}
if(mom.Precision() == QUDA_SINGLE_PRECISION){
completeKSForce<float>(mom, oprod, gauge, location, flops);
}else if(mom.Precision() == QUDA_DOUBLE_PRECISION){
completeKSForce<double>(mom, oprod, gauge, location, flops);
}else{
errorQuda("Precision %d not supported", mom.Precision());
}
return;
}
template<typename Result, typename Oprod, typename Gauge>
struct KSLongLinkArg {
int threads;
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
double coeff;
Result res;
Oprod oprod;
Gauge gauge;
KSLongLinkArg(Result& res, Oprod& oprod, Gauge &gauge, int dim[4])
: coeff(1.0), res(res), oprod(oprod), gauge(gauge){
threads = 1;
#ifdef MULTI_GPU
for(int dir=0; dir<4; ++dir) threads *= (dim[dir]-2);
for(int dir=0; dir<4; ++dir) X[dir] = dim[dir]-2;
for(int dir=0; dir<4; ++dir) border[dir] = 2;
#else
for(int dir=0; dir<4; ++dir) threads *= dim[dir];
for(int dir=0; dir<4; ++dir) X[dir] = dim[dir];
#endif
}
};
template<typename Float, typename Result, typename Oprod, typename Gauge>
__host__ __device__ void computeKSLongLinkForceCore(KSLongLinkArg<Result,Oprod,Gauge>& arg, int idx){
/*
int parity = 0;
if(idx >= arg.threads/2){
parity = 1;
idx -= arg.threads/2;
}
int X[4];
for(int dir=0; dir<4; ++dir) X[dir] = arg.X[dir];
int x[4];
getCoords(x, idx, X, parity);
#ifndef BUILD_TIFR_INTERFACE
#ifdef MULTI_GPU
for(int dir=0; dir<4; ++dir){
x[dir] += arg.border[dir];
X[dir] += 2*arg.border[dir];
}
#endif
#endif
typedef typename ComplexTypeId<Float>::Type Cmplx;
Matrix<Cmplx,3> O;
Matrix<Cmplx,3> G;
Matrix<Cmplx,3> M;
int dx[4] = {0,0,0,0};
for(int dir=0; dir<4; ++dir){
arg.gauge.load((Float*)(G.data), linkIndex(x,dx,X), dir, parity);
arg.oprod.load((Float*)(O.data), linkIndex(x,dx,X), dir, parity);
if(parity==0){
M = G*O;
}else{
M = -G*O;
}
Float sub = getTrace(M).y/(static_cast<Float>(3));
Float temp[10];
temp[0] = (M.data[1].x - M.data[3].x)*0.5;
temp[1] = (M.data[1].y + M.data[3].y)*0.5;
temp[2] = (M.data[2].x - M.data[6].x)*0.5;
temp[3] = (M.data[2].y + M.data[6].y)*0.5;
temp[4] = (M.data[5].x - M.data[7].x)*0.5;
temp[5] = (M.data[5].y + M.data[7].y)*0.5;
temp[6] = (M.data[0].y-sub);
temp[7] = (M.data[4].y-sub);
temp[8] = (M.data[8].y-sub);
temp[9] = 0.0;
arg.mom.save(temp, idx, dir, parity);
}
*/
}
template<typename Float, typename Result, typename Oprod, typename Gauge>
__global__ void computeKSLongLinkForceKernel(KSLongLinkArg<Result,Oprod,Gauge> arg)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= arg.threads) return;
computeKSLongLinkForceCore<Float,Result,Oprod,Gauge>(arg,idx);
}
template<typename Float, typename Result, typename Oprod, typename Gauge>
void computeKSLongLinkForceCPU(KSLongLinkArg<Result,Oprod,Gauge>& arg)
{
for(int idx=0; idx<arg.threads; idx++){
computeKSLongLinkForceCore<Float,Result,Oprod,Gauge>(arg,idx);
}
}
// should be tunable
template<typename Float, typename Result, typename Oprod, typename Gauge>
class KSLongLinkForce : Tunable {
KSLongLinkArg<Result,Oprod,Gauge> arg;
const GaugeField &meta;
const QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneSharedBytes() const { return false; } // Don't tune the shared memory.
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
KSLongLinkForce(KSLongLinkArg<Result,Oprod,Gauge> &arg, const GaugeField &meta, QudaFieldLocation location)
: arg(arg), meta(meta), location(location) {
writeAuxString("prec=%lu,stride=%d",sizeof(Float),arg.res.stride);
}
virtual ~KSLongLinkForce() {}
void apply(const cudaStream_t &stream) {
if(location == QUDA_CUDA_FIELD_LOCATION){
#if (__COMPUTE_CAPABILITY__ >= 200)
// Fix this
dim3 blockDim(128, 1, 1);
dim3 gridDim((arg.threads + blockDim.x - 1) / blockDim.x, 1, 1);
computeKSLongLinkForceKernel<Float><<<gridDim,blockDim>>>(arg);
#else
errorQuda("computeKSLongLinkForce not supported on pre-Fermi architecture");
#endif
}else{
computeKSLongLinkForceCPU<Float>(arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
std::string paramString(const TuneParam ¶m) const { // Don't print the grid dim.
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune(){}
void postTune(){}
long long flops() const { return 0; } // Fix this
long long bytes() const { return 0; } // Fix this
};
template<typename Float, typename Result, typename Oprod, typename Gauge>
void computeKSLongLinkForce(Result res, Oprod oprod, Gauge gauge, int dim[4], const GaugeField &meta, QudaFieldLocation location)
{
KSLongLinkArg<Result,Oprod,Gauge> arg(res, oprod, gauge, dim);
KSLongLinkForce<Float,Result,Oprod,Gauge> computeLongLink(arg,meta,location);
computeLongLink.apply(0);
cudaDeviceSynchronize();
}
template<typename Float>
void computeKSLongLinkForce(GaugeField& result, const GaugeField &oprod, const GaugeField &gauge, QudaFieldLocation location)
{
if(location != QUDA_CUDA_FIELD_LOCATION){
errorQuda("Only QUDA_CUDA_FIELD_LOCATION currently supported");
}else{
if((oprod.Reconstruct() != QUDA_RECONSTRUCT_NO) || (gauge.Reconstruct() != QUDA_RECONSTRUCT_NO) ||
(result.Reconstruct() != QUDA_RECONSTRUCT_10)){
errorQuda("Reconstruct type not supported");
}else{
computeKSLongLinkForce<Float>(FloatNOrder<Float, 18, 2, 18>(result),
FloatNOrder<Float, 18, 2, 18>(oprod),
FloatNOrder<Float, 18, 2, 18>(gauge),
const_cast<int*>(result.X()),
gauge, location);
}
}
return;
}
void computeKSLongLinkForce(GaugeField &result, const GaugeField &oprod, const GaugeField &gauge, QudaFieldLocation location)
{
if(result.Precision() == QUDA_HALF_PRECISION){
errorQuda("Half precision not supported");
}
if(result.Precision() == QUDA_SINGLE_PRECISION){
computeKSLongLinkForce<float>(result, oprod, gauge, location);
}else if(result.Precision() == QUDA_DOUBLE_PRECISION){
computeKSLongLinkForce<double>(result, oprod, gauge, location);
}
errorQuda("Precision %d not supported", result.Precision());
return;
}
} // namespace quda
|
4bd88a0db40a35ffd2ef1b723868c3f1fd2158ff.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::cuda;
using namespace cv::cuda::device;
namespace arithm
{
template <typename T, typename S, typename D> struct AddScalar : unary_function<T, D>
{
S val;
__host__ explicit AddScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a + val);
}
};
}
namespace cv { namespace cuda { namespace device
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::AddScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void addScalar(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
AddScalar<T, S, D> op(static_cast<S>(val));
if (mask.data)
device::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, mask, stream);
else
device::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void addScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
}
#endif // CUDA_DISABLER
| 4bd88a0db40a35ffd2ef1b723868c3f1fd2158ff.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::cuda;
using namespace cv::cuda::device;
namespace arithm
{
template <typename T, typename S, typename D> struct AddScalar : unary_function<T, D>
{
S val;
__host__ explicit AddScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a + val);
}
};
}
namespace cv { namespace cuda { namespace device
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::AddScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void addScalar(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
AddScalar<T, S, D> op(static_cast<S>(val));
if (mask.data)
device::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, mask, stream);
else
device::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void addScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
}
#endif // CUDA_DISABLER
|
892d0cc912c68715e71254801b40b7ba489db827.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#include <hip/hip_runtime.h>
#define PRECISION_z
__global__ void
magma_zget_row_ptr_kernel(
const magma_int_t num_rows,
magma_int_t* nnz,
const magma_index_t* __restrict__ rowidx,
magma_index_t* rowptr)
{
//int i, j;
int k = blockDim.x * blockIdx.x + threadIdx.x;
//int nnz;
/*magma_int_t nnz_per_row;
if(k<num_rows){
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
nnz_per_row = __ldg( rowidx + k );
#else
nnz_per_row = rowidx[k];
#endif
atomicAdd(&nnz,nnz_per_row);
}
if (k < 2)
{
if(k==1)
{
rowptr[0] = 0;
rowptr[1] = rowidx[0];
for(int iter=2;iter<(num_rows+1)/2;++iter){
rowptr[iter] = rowptr[iter-1]+rowidx[iter-1];
}
}
else{
rowptr[num_rows] = nnz;
for(int iter=num_rows-1;iter>(num_rows+1)/2;iter--){
rowptr[iter] = rowptr[iter+1]-rowidx[iter];
}
}
}
*/
//naive implementation for now.
if (k==1) {
rowptr[0] = 0;
for(int iter=1;iter<=num_rows;++iter){
rowptr[iter] = rowptr[iter-1]+rowidx[iter-1];
}
nnz[0] = rowptr[num_rows];
}
} //kernel
extern "C" magma_int_t
magma_zget_row_ptr(
const magma_int_t num_rows,
magma_int_t *nnz,
const magma_index_t* rowidx,
magma_index_t* rowptr,
magma_queue_t queue)
{
/*
int blocksize = 128;
int gridsize = magma_ceildiv(num_rows, blocksize);
magma_int_t *nnz_dev, *tnnz;
magma_imalloc(&nnz_dev, 1);
magma_imalloc_cpu(&tnnz, 1);
dim3 block(blocksize,1,1);
dim3 grid(gridsize,1,1);
magma_zget_row_ptr_kernel<<<grid, block, 0, queue->cuda_stream()>>>
(num_rows, nnz_dev, rowidx, rowptr);
magma_igetvector(1,nnz_dev,1,tnnz,1,queue);
*nnz = tnnz[0];
magma_free(nnz_dev);
magma_free_cpu(tnnz);
*/
magma_index_t *hrowidx, *hrowptr;
magma_index_malloc_cpu(&hrowidx, num_rows);
magma_index_malloc_cpu(&hrowptr, num_rows+1);
magma_index_getvector(num_rows,rowidx,1,hrowidx,1,queue);
hrowptr[0] = 0;
for(int iter=1;iter<=num_rows;++iter){
hrowptr[iter] = hrowptr[iter-1]+hrowidx[iter-1];
}
*nnz = hrowptr[num_rows];
magma_index_setvector(num_rows+1,hrowptr,1,rowptr,1,queue);
magma_free_cpu(hrowidx);
magma_free_cpu(hrowptr);
return MAGMA_SUCCESS;
}
| 892d0cc912c68715e71254801b40b7ba489db827.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#include <cuda_runtime.h>
#define PRECISION_z
__global__ void
magma_zget_row_ptr_kernel(
const magma_int_t num_rows,
magma_int_t* nnz,
const magma_index_t* __restrict__ rowidx,
magma_index_t* rowptr)
{
//int i, j;
int k = blockDim.x * blockIdx.x + threadIdx.x;
//int nnz;
/*magma_int_t nnz_per_row;
if(k<num_rows){
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
nnz_per_row = __ldg( rowidx + k );
#else
nnz_per_row = rowidx[k];
#endif
atomicAdd(&nnz,nnz_per_row);
}
if (k < 2)
{
if(k==1)
{
rowptr[0] = 0;
rowptr[1] = rowidx[0];
for(int iter=2;iter<(num_rows+1)/2;++iter){
rowptr[iter] = rowptr[iter-1]+rowidx[iter-1];
}
}
else{
rowptr[num_rows] = nnz;
for(int iter=num_rows-1;iter>(num_rows+1)/2;iter--){
rowptr[iter] = rowptr[iter+1]-rowidx[iter];
}
}
}
*/
//naive implementation for now.
if (k==1) {
rowptr[0] = 0;
for(int iter=1;iter<=num_rows;++iter){
rowptr[iter] = rowptr[iter-1]+rowidx[iter-1];
}
nnz[0] = rowptr[num_rows];
}
} //kernel
extern "C" magma_int_t
magma_zget_row_ptr(
const magma_int_t num_rows,
magma_int_t *nnz,
const magma_index_t* rowidx,
magma_index_t* rowptr,
magma_queue_t queue)
{
/*
int blocksize = 128;
int gridsize = magma_ceildiv(num_rows, blocksize);
magma_int_t *nnz_dev, *tnnz;
magma_imalloc(&nnz_dev, 1);
magma_imalloc_cpu(&tnnz, 1);
dim3 block(blocksize,1,1);
dim3 grid(gridsize,1,1);
magma_zget_row_ptr_kernel<<<grid, block, 0, queue->cuda_stream()>>>
(num_rows, nnz_dev, rowidx, rowptr);
magma_igetvector(1,nnz_dev,1,tnnz,1,queue);
*nnz = tnnz[0];
magma_free(nnz_dev);
magma_free_cpu(tnnz);
*/
magma_index_t *hrowidx, *hrowptr;
magma_index_malloc_cpu(&hrowidx, num_rows);
magma_index_malloc_cpu(&hrowptr, num_rows+1);
magma_index_getvector(num_rows,rowidx,1,hrowidx,1,queue);
hrowptr[0] = 0;
for(int iter=1;iter<=num_rows;++iter){
hrowptr[iter] = hrowptr[iter-1]+hrowidx[iter-1];
}
*nnz = hrowptr[num_rows];
magma_index_setvector(num_rows+1,hrowptr,1,rowptr,1,queue);
magma_free_cpu(hrowidx);
magma_free_cpu(hrowptr);
return MAGMA_SUCCESS;
}
|
6281d9efeabb025af4995788c1c7ab2056b756cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//Training of the CNN is done using Keras. After training for 10 epochs, the obtained accuracy on the training data set is 99.70 and on the test data set is 99.14.
//This model implements the following layes in order- 2DConvolution---->Maxpooling---->2D Convolution---->Maxpooling---->Fully_connected layer---->Fully_connected layer.
//The image is a 28*28 greyscale image. The specifications of the layers are as follows:
//Layer_0: Convolution: 32 3*3 kernels with no padding and 1 stride.
//Layer_1: Maxpooling: 2*2 filters with with no padding and 1 stride.
//Layer_2: Convolution: 64 3*3 kernels with no padding and 1 stride.
//Layer_3: Maxpooling: 2*2 filters with with no padding and 1 stride.
//Layer_4: Flattening
//Layer_5: Fully connected / dense layer with 1024 output units.
//Layer_6: Dropout (done during training only).
//Layer_7: Fully connected / dense layer with 10 output units.
//All arrays and matrices are designed to be row ordered in this implementation.
//Kernel that does convolution. This convolution is done by each thread identifying that patch or portion of the image that it is responsible for its result and does the multiplication and addition of it's patche's values with the suitable kernel.
//The depth of the output image is the number of kernels.
//Kernel that does maxpooling.
//This kernel implements the fully connected layers.
__global__ void convolution_kernel(int h, int w, int d, double* gpu_in, int k_h, int k_w, int k_d, double* kernel_weights, double* kernel_biases, int num_kernels, int op_h, int op_w, int op_d, double* gpu_out)
{
//Identifying threads by their IDs.
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
int deep = blockDim.z *blockIdx.z + threadIdx.z;
//Return if thread out of bounds
if (row >= op_h || col >= op_w || deep >= op_d) return;
double out=0.0;
int kernel_pointer = 0;
//Each thread/each output node identifies the corresponding element in the matrix that it is responsible to multiply-add.
for (int depth_pointer = 0; depth_pointer < k_d; depth_pointer++) {
for (int row_pointer = 0; row_pointer < k_h; row_pointer++) {
for (int column_pointer = 0; column_pointer < k_w; column_pointer++) {
out += gpu_in[((row*w + col) + row_pointer * w + column_pointer + h * w*depth_pointer)] * kernel_weights[kernel_pointer + deep * k_h*k_w*k_d];
kernel_pointer++;
}
}
}
//Bias addition and relu activation. One bias is applied to one output image layer, since one bias is applicable to one kernel.
//Relu activation : relu(a)=max(0,a). If the value is less than 0 then it becomes 0, else it is retained.
if (out + kernel_biases[deep] < 0.0)
gpu_out[row*op_w + col + deep * op_h*op_w] = 0.0l;
else
gpu_out[row*op_w + col + deep * op_h*op_w] = out + kernel_biases[deep];
} | 6281d9efeabb025af4995788c1c7ab2056b756cd.cu | #include "includes.h"
//Training of the CNN is done using Keras. After training for 10 epochs, the obtained accuracy on the training data set is 99.70 and on the test data set is 99.14.
//This model implements the following layes in order- 2DConvolution---->Maxpooling---->2D Convolution---->Maxpooling---->Fully_connected layer---->Fully_connected layer.
//The image is a 28*28 greyscale image. The specifications of the layers are as follows:
//Layer_0: Convolution: 32 3*3 kernels with no padding and 1 stride.
//Layer_1: Maxpooling: 2*2 filters with with no padding and 1 stride.
//Layer_2: Convolution: 64 3*3 kernels with no padding and 1 stride.
//Layer_3: Maxpooling: 2*2 filters with with no padding and 1 stride.
//Layer_4: Flattening
//Layer_5: Fully connected / dense layer with 1024 output units.
//Layer_6: Dropout (done during training only).
//Layer_7: Fully connected / dense layer with 10 output units.
//All arrays and matrices are designed to be row ordered in this implementation.
//Kernel that does convolution. This convolution is done by each thread identifying that patch or portion of the image that it is responsible for its result and does the multiplication and addition of it's patche's values with the suitable kernel.
//The depth of the output image is the number of kernels.
//Kernel that does maxpooling.
//This kernel implements the fully connected layers.
__global__ void convolution_kernel(int h, int w, int d, double* gpu_in, int k_h, int k_w, int k_d, double* kernel_weights, double* kernel_biases, int num_kernels, int op_h, int op_w, int op_d, double* gpu_out)
{
//Identifying threads by their IDs.
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
int deep = blockDim.z *blockIdx.z + threadIdx.z;
//Return if thread out of bounds
if (row >= op_h || col >= op_w || deep >= op_d) return;
double out=0.0;
int kernel_pointer = 0;
//Each thread/each output node identifies the corresponding element in the matrix that it is responsible to multiply-add.
for (int depth_pointer = 0; depth_pointer < k_d; depth_pointer++) {
for (int row_pointer = 0; row_pointer < k_h; row_pointer++) {
for (int column_pointer = 0; column_pointer < k_w; column_pointer++) {
out += gpu_in[((row*w + col) + row_pointer * w + column_pointer + h * w*depth_pointer)] * kernel_weights[kernel_pointer + deep * k_h*k_w*k_d];
kernel_pointer++;
}
}
}
//Bias addition and relu activation. One bias is applied to one output image layer, since one bias is applicable to one kernel.
//Relu activation : relu(a)=max(0,a). If the value is less than 0 then it becomes 0, else it is retained.
if (out + kernel_biases[deep] < 0.0)
gpu_out[row*op_w + col + deep * op_h*op_w] = 0.0l;
else
gpu_out[row*op_w + col + deep * op_h*op_w] = out + kernel_biases[deep];
} |
a6b5df331827a13ebfc2d2bd71bc7bfc5e7a8ca2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <math.h> /* log2(), pow() */
#include <cstdint> /* uint64_t */
#include <cstdlib> /* malloc() */
#include <iostream>
#include "../include/utils2.h"
#include "../include/utils.h"
/* bit_reverse(), modExp(), modulo() */
#include "../include/ntt.cuh" //INCLUDE HEADER FILE
#include "../include/utils_device.cuh"
#include "../include/cuda_device.cuh"
/**
* Perform an in-place iterative breadth-first decimation-in-time Cooley-Tukey NTT on an input vector and return the result
*
* @param vec The input vector to be transformed
* @param n The size of the input vector
* @param p The prime to be used as the modulus of the transformation
* @param r The primitive root of the prime
* @param rev Whether to perform bit reversal on the input vector
* @return The transformed vector
*/
using namespace std;
__global__ void ntt_cuda_kernel_stepC(uint64_t *g_idata, int offset,int num_bits,uint64_t *table ,uint64_t *n, uint64_t *p, bool rev, uint64_t *g_odata)
{
uint64_t m, factor1, factor2;
//set thread ID
uint64_t tid = threadIdx.x;
unsigned idx = blockIdx.x*blockDim.x + threadIdx.x;
//boundary check
if (tid >= *n || idx >*n)return;
if (rev)
{
uint64_t reverse_num= 0;
for(uint64_t j = 0; j < num_bits; j++){
reverse_num = reverse_num << 1;
if(idx & (1 << j)){
reverse_num = reverse_num | 1;
}
}
g_odata[offset * *n + reverse_num] = g_idata[offset * *n +idx];
}
else
{
g_odata[offset * *n +idx] = g_idata[offset * *n +idx];
}
__syncthreads();
if (idx == 0)
{
for (uint64_t i = 1; i <= num_bits; i++)
{
m = pow_D(uint64_t(2), i);
for (uint64_t j = 0; j < *n; j += m)
{
for (uint64_t k = 0; k < m / 2; k++)
{
factor1 = g_odata[offset * *n +j + k];
factor2 = modulo_D(uint64_t(table[(i-1)*2048+k])*uint64_t(g_odata[offset * *n +j + k + m / 2]), *p);
g_odata[offset * *n +j + k] = modulo_D(factor1 + factor2, *p);
g_odata[offset * *n +j + k + m / 2] = modulo_D(factor1 - factor2, *p);
}
}
}
}
}
extern "C"
uint64_t *inPlaceNTT_DIT_stepC(uint64_t **vec, uint64_t batch_size,uint64_t n, uint64_t p, uint64_t r, bool rev)
{
int blocksize = 1024;
dim3 block(blocksize, 1);
dim3 grid((n - 1) / block.x + 1, 1);
//var init
size_t bytes = n * batch_size* sizeof(uint64_t);
uint64_t *vec_host = (uint64_t *)malloc(bytes);
uint64_t *outVec_host = (uint64_t *)malloc(bytes); //grid.x * sizeof(uint64_t));
for (int i=0;i<batch_size;i++){
memcpy(&vec_host[i*n],vec[i],n * sizeof(uint64_t));
}
// device memory declare
uint64_t *vec_dev = NULL;
uint64_t *outVec_dev = NULL;
//device memory allocate
CHECK(hipMalloc((void **)&vec_dev, bytes));
CHECK(hipMalloc((void **)&outVec_dev, bytes));
//remove bitreversal
uint64_t num_bits = log2(n);
num_bits = log2(n);
uint64_t a_table [32];
int i,j;
for (i=1;i<=32;i++){
a_table[i-1] = modExp(r,(p-1)/pow(2,i),p);
}
uint64_t ak_table [65536] ;
for (i=0;i<32;i++){
for (j=0;j<2048;j++){
ak_table[i*2048+j] = modExp(a_table[i],j,p);
}
}
uint64_t *ak_table_dev =NULL;
uint64_t *n_dev =NULL;
uint64_t *p_dev =NULL;
CHECK(hipMalloc((void **)&ak_table_dev, sizeof(ak_table)));
CHECK(hipMalloc((void **)&n_dev, sizeof(n)));
CHECK(hipMalloc((void **)&p_dev, sizeof(p)));
CHECK(hipMemcpy(ak_table_dev, ak_table, sizeof(ak_table), hipMemcpyHostToDevice));
CHECK(hipMemcpy(n_dev, &n, sizeof(n), hipMemcpyHostToDevice));
CHECK(hipMemcpy(p_dev, &p, sizeof(p), hipMemcpyHostToDevice));
CHECK(hipMemset(vec_dev,0,bytes))
CHECK(hipMemset(outVec_dev,0,bytes))
CHECK(hipMemcpy(vec_dev, vec_host, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
for (int offset = 0;offset<batch_size;offset++){
hipLaunchKernelGGL(( ntt_cuda_kernel_stepC), dim3(grid), dim3(block), 0, 0, vec_dev,offset,num_bits,ak_table_dev,n_dev, p_dev,rev, outVec_dev);
}
CHECK(hipDeviceSynchronize());
CHECK(hipMemcpy(outVec_host, outVec_dev, bytes, hipMemcpyDeviceToHost));
CHECK(hipFree(vec_dev));
CHECK(hipFree(ak_table_dev));
CHECK(hipFree(n_dev));
CHECK(hipFree(p_dev));
CHECK(hipFree(vec_dev));
CHECK(hipFree(outVec_dev));
return outVec_host;
}
| a6b5df331827a13ebfc2d2bd71bc7bfc5e7a8ca2.cu | #include <cuda_runtime.h>
#include <math.h> /* log2(), pow() */
#include <cstdint> /* uint64_t */
#include <cstdlib> /* malloc() */
#include <iostream>
#include "../include/utils2.h"
#include "../include/utils.h"
/* bit_reverse(), modExp(), modulo() */
#include "../include/ntt.cuh" //INCLUDE HEADER FILE
#include "../include/utils_device.cuh"
#include "../include/cuda_device.cuh"
/**
* Perform an in-place iterative breadth-first decimation-in-time Cooley-Tukey NTT on an input vector and return the result
*
* @param vec The input vector to be transformed
* @param n The size of the input vector
* @param p The prime to be used as the modulus of the transformation
* @param r The primitive root of the prime
* @param rev Whether to perform bit reversal on the input vector
* @return The transformed vector
*/
using namespace std;
__global__ void ntt_cuda_kernel_stepC(uint64_t *g_idata, int offset,int num_bits,uint64_t *table ,uint64_t *n, uint64_t *p, bool rev, uint64_t *g_odata)
{
uint64_t m, factor1, factor2;
//set thread ID
uint64_t tid = threadIdx.x;
unsigned idx = blockIdx.x*blockDim.x + threadIdx.x;
//boundary check
if (tid >= *n || idx >*n)return;
if (rev)
{
uint64_t reverse_num= 0;
for(uint64_t j = 0; j < num_bits; j++){
reverse_num = reverse_num << 1;
if(idx & (1 << j)){
reverse_num = reverse_num | 1;
}
}
g_odata[offset * *n + reverse_num] = g_idata[offset * *n +idx];
}
else
{
g_odata[offset * *n +idx] = g_idata[offset * *n +idx];
}
__syncthreads();
if (idx == 0)
{
for (uint64_t i = 1; i <= num_bits; i++)
{
m = pow_D(uint64_t(2), i);
for (uint64_t j = 0; j < *n; j += m)
{
for (uint64_t k = 0; k < m / 2; k++)
{
factor1 = g_odata[offset * *n +j + k];
factor2 = modulo_D(uint64_t(table[(i-1)*2048+k])*uint64_t(g_odata[offset * *n +j + k + m / 2]), *p);
g_odata[offset * *n +j + k] = modulo_D(factor1 + factor2, *p);
g_odata[offset * *n +j + k + m / 2] = modulo_D(factor1 - factor2, *p);
}
}
}
}
}
extern "C"
uint64_t *inPlaceNTT_DIT_stepC(uint64_t **vec, uint64_t batch_size,uint64_t n, uint64_t p, uint64_t r, bool rev)
{
int blocksize = 1024;
dim3 block(blocksize, 1);
dim3 grid((n - 1) / block.x + 1, 1);
//var init
size_t bytes = n * batch_size* sizeof(uint64_t);
uint64_t *vec_host = (uint64_t *)malloc(bytes);
uint64_t *outVec_host = (uint64_t *)malloc(bytes); //grid.x * sizeof(uint64_t));
for (int i=0;i<batch_size;i++){
memcpy(&vec_host[i*n],vec[i],n * sizeof(uint64_t));
}
// device memory declare
uint64_t *vec_dev = NULL;
uint64_t *outVec_dev = NULL;
//device memory allocate
CHECK(cudaMalloc((void **)&vec_dev, bytes));
CHECK(cudaMalloc((void **)&outVec_dev, bytes));
//remove bitreversal
uint64_t num_bits = log2(n);
num_bits = log2(n);
uint64_t a_table [32];
int i,j;
for (i=1;i<=32;i++){
a_table[i-1] = modExp(r,(p-1)/pow(2,i),p);
}
uint64_t ak_table [65536] ;
for (i=0;i<32;i++){
for (j=0;j<2048;j++){
ak_table[i*2048+j] = modExp(a_table[i],j,p);
}
}
uint64_t *ak_table_dev =NULL;
uint64_t *n_dev =NULL;
uint64_t *p_dev =NULL;
CHECK(cudaMalloc((void **)&ak_table_dev, sizeof(ak_table)));
CHECK(cudaMalloc((void **)&n_dev, sizeof(n)));
CHECK(cudaMalloc((void **)&p_dev, sizeof(p)));
CHECK(cudaMemcpy(ak_table_dev, ak_table, sizeof(ak_table), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(n_dev, &n, sizeof(n), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(p_dev, &p, sizeof(p), cudaMemcpyHostToDevice));
CHECK(cudaMemset(vec_dev,0,bytes))
CHECK(cudaMemset(outVec_dev,0,bytes))
CHECK(cudaMemcpy(vec_dev, vec_host, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
for (int offset = 0;offset<batch_size;offset++){
ntt_cuda_kernel_stepC<<<grid, block>>>(vec_dev,offset,num_bits,ak_table_dev,n_dev, p_dev,rev, outVec_dev);
}
CHECK(cudaDeviceSynchronize());
CHECK(cudaMemcpy(outVec_host, outVec_dev, bytes, cudaMemcpyDeviceToHost));
CHECK(cudaFree(vec_dev));
CHECK(cudaFree(ak_table_dev));
CHECK(cudaFree(n_dev));
CHECK(cudaFree(p_dev));
CHECK(cudaFree(vec_dev));
CHECK(cudaFree(outVec_dev));
return outVec_host;
}
|
855d708f4406f716a747741074ee339a05f9ab28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#define THREADS_PER_BLOCK 512
#define N (2048*2048)
__global__ void add(int *a, int *b, int *c){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < N)
c[index] = a[index] + b[index];
printf('index: %d',threadIdx.x);
}
int main(void ){
int *a,*b,*c; // host copies of a, b and c
int *dev_a,*dev_b, *dev_c; // device copies of a, b and c
int size = N * sizeof(int); // we need space for an integer
//allocate device copies of a, b , c
hipMalloc((void**) &dev_a, size);
hipMalloc((void**) &dev_b, size);
hipMalloc((void**) &dev_c, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
//random_ints(a,N);
//random_ints(b,N);
for (int i= 0; i<N ; i++){
a[i]=i;
b[i]=i*2;
}
//copy inputs to device (GPU)
hipMemcpy(dev_a, a, size , hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice);
// launch add() kernel on GPU, passing parameters
hipLaunchKernelGGL(( add), dim3(N/THREADS_PER_BLOCK) , dim3(THREADS_PER_BLOCK) , 0, 0, dev_a,dev_b,dev_c);
//copy device result back to host copy of c
hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost);
/*for(int i =0; i<N; i++){
printf("The value of the %d plus %d is : %d\n", a[i], b[i], c[i]);
}*/
free(a);
free(b);
free(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| 855d708f4406f716a747741074ee339a05f9ab28.cu | #include "stdio.h"
#define THREADS_PER_BLOCK 512
#define N (2048*2048)
__global__ void add(int *a, int *b, int *c){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < N)
c[index] = a[index] + b[index];
printf('index: %d',threadIdx.x);
}
int main(void ){
int *a,*b,*c; // host copies of a, b and c
int *dev_a,*dev_b, *dev_c; // device copies of a, b and c
int size = N * sizeof(int); // we need space for an integer
//allocate device copies of a, b , c
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b, size);
cudaMalloc((void**) &dev_c, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
//random_ints(a,N);
//random_ints(b,N);
for (int i= 0; i<N ; i++){
a[i]=i;
b[i]=i*2;
}
//copy inputs to device (GPU)
cudaMemcpy(dev_a, a, size , cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
// launch add() kernel on GPU, passing parameters
add<<<N/THREADS_PER_BLOCK , THREADS_PER_BLOCK >>> (dev_a,dev_b,dev_c);
//copy device result back to host copy of c
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
/*for(int i =0; i<N; i++){
printf("The value of the %d plus %d is : %d\n", a[i], b[i], c[i]);
}*/
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
2b9e160d815f076dd15e8ad1dc4b0403c0a2bd79.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#define BDIMX 32
#define BDIMY 32
#define PAD 1
// function for checking the CUDA runtime API results.
inline
void checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess)
{
printf_s("Error: %s : %d", __FILE__, __LINE__);
printf_s("CUDA Runtime Error: %d: %s\n", result, hipGetErrorString(result));
exit(1);
}
#endif
}
void printData(char *msg, int *in, const int size)
{
printf_s("%s: ", msg);
for (int i = 0; i < size; i++)
{
printf_s("%5d", in[i]);
}
printf_s("\n");
return;
}
__global__ void setColReadRowPad(int *out)
{
// statically allocate shared memory
__shared__ int tile[BDIMX][BDIMY + PAD];
// global memory index
int idx = threadIdx.y * blockDim.x + threadIdx.x;
// shared memory store padding the column will result in coalesced stores
tile[threadIdx.x][threadIdx.y] = idx;
// wait for all threads to finish
__syncthreads();
// read shared memory by row this creates coalesced loads
out[idx] = tile[threadIdx.y][threadIdx.x];
}
int main(int argc, char **argv)
{
int dev = 0;
hipDeviceProp_t deviceProp;
// get device information
checkCuda(hipGetDeviceProperties(&deviceProp, dev));
printf_s("> %s starting on ", argv[0]);
printf("device %d: %s\n\n", dev, deviceProp.name);
checkCuda(hipSetDevice(dev));
bool iprintf = 0;
if (argc > 1) iprintf = atoi(argv[1]);
// define matrix 32x32 = 1024 total elements
int nx = BDIMX;
int ny = BDIMY;
size_t nBytes = nx * ny * sizeof(int);
// allocate memory
int *d_in;
checkCuda(hipMalloc(&d_in, nBytes));
int *gpuRef = (int *)malloc(nBytes);
// define kernel configuration
dim3 block(BDIMX, BDIMY);
dim3 grid(1, 1);
printf_s("> setColReadRowPad <<< grid (%d,%d) block (%d,%d)>>>\n", grid.x, grid.y, block.x, block.y);
hipLaunchKernelGGL(( setColReadRowPad) , dim3(grid), dim3(block), 0, 0, d_in);
checkCuda(hipMemcpy(gpuRef, d_in, nBytes, hipMemcpyDeviceToHost));
if (iprintf) printData("set column read row", gpuRef, nx * ny);
// free memory
checkCuda(hipFree(d_in));
free(gpuRef);
// reset device
checkCuda(hipDeviceReset());
return EXIT_SUCCESS;
} | 2b9e160d815f076dd15e8ad1dc4b0403c0a2bd79.cu | #include <stdio.h>
#include <stdlib.h>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
#define BDIMX 32
#define BDIMY 32
#define PAD 1
// function for checking the CUDA runtime API results.
inline
void checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess)
{
printf_s("Error: %s : %d", __FILE__, __LINE__);
printf_s("CUDA Runtime Error: %d: %s\n", result, cudaGetErrorString(result));
exit(1);
}
#endif
}
void printData(char *msg, int *in, const int size)
{
printf_s("%s: ", msg);
for (int i = 0; i < size; i++)
{
printf_s("%5d", in[i]);
}
printf_s("\n");
return;
}
__global__ void setColReadRowPad(int *out)
{
// statically allocate shared memory
__shared__ int tile[BDIMX][BDIMY + PAD];
// global memory index
int idx = threadIdx.y * blockDim.x + threadIdx.x;
// shared memory store padding the column will result in coalesced stores
tile[threadIdx.x][threadIdx.y] = idx;
// wait for all threads to finish
__syncthreads();
// read shared memory by row this creates coalesced loads
out[idx] = tile[threadIdx.y][threadIdx.x];
}
int main(int argc, char **argv)
{
int dev = 0;
cudaDeviceProp deviceProp;
// get device information
checkCuda(cudaGetDeviceProperties(&deviceProp, dev));
printf_s("> %s starting on ", argv[0]);
printf("device %d: %s\n\n", dev, deviceProp.name);
checkCuda(cudaSetDevice(dev));
bool iprintf = 0;
if (argc > 1) iprintf = atoi(argv[1]);
// define matrix 32x32 = 1024 total elements
int nx = BDIMX;
int ny = BDIMY;
size_t nBytes = nx * ny * sizeof(int);
// allocate memory
int *d_in;
checkCuda(cudaMalloc(&d_in, nBytes));
int *gpuRef = (int *)malloc(nBytes);
// define kernel configuration
dim3 block(BDIMX, BDIMY);
dim3 grid(1, 1);
printf_s("> setColReadRowPad <<< grid (%d,%d) block (%d,%d)>>>\n", grid.x, grid.y, block.x, block.y);
setColReadRowPad <<<grid, block>>> (d_in);
checkCuda(cudaMemcpy(gpuRef, d_in, nBytes, cudaMemcpyDeviceToHost));
if (iprintf) printData("set column read row", gpuRef, nx * ny);
// free memory
checkCuda(cudaFree(d_in));
free(gpuRef);
// reset device
checkCuda(cudaDeviceReset());
return EXIT_SUCCESS;
} |
5cb27d126ed39a90f04f8b55a5bedfcaafcd8f05.hip | // !!! This is a file automatically generated by hipify!!!
//1365913659
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#include "hip/device_functions.h"
//#include "sm_12_atomic_functions.h"
//#include "sm_13_double_functions.h"
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <conio.h>
#include <time.h>
#include <windows.h>
#include <iostream>
#include <assert.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "nicslu.h"
//#include "Utilities.cuh"
#define M_PI 3.14159269589793
//__shared__ double G[13659*13659],B[13659*13659];
__global__ void formYKernel(double *G,double *B,int *lineN,int *from,int *to,double *r,double *x,double *c,double *tr,double *g1,double *b1){
int i = blockIdx.x * blockDim.x + threadIdx.x;
//// printf("%d %d ",i,j);
//// int N=13659;
//// double trmax;
//// double gij=r[N*i+j]/(r[N*i+j]*r[N*i+j]+x[N*i+j]*x[N*i+j]);
//// double bij=-x[N*i+j]/(r[N*i+j]*r[N*i+j]+x[N*i+j]*x[N*i+j]);
//// if(i!=j){
//// if (tr[N*i+j]>tr[N*i+j]) trmax=tr[N*i+j];
//// else trmax=tr[N*i+j];
////// printf("%f ",trmax);
//// G[N*i+j]=-trmax*gij;
//// B[N*i+j]=-trmax*bij;
//// }
// int N=13659;
// if(i<N*N){
//// if(i==2) printf("%f ",gi);
// if((i-i/N)/N==i/N){//
// double gi=r[i]/(r[i]*r[i]+x[i]*x[i]);
// double bi=-x[i]/(r[i]*r[i]+x[i]*x[i]);
// if((i-i/N) % N !=0){//
// G[i]=-tr[i]*gi;
//B[i]=-tr[i]*bi;
// }
// else{//
// double cntg=0,cntb=0;
// int j=i/N;//j
// for(int k=0;k<N;k++){
// double trdirec;
// if (trsign[N*j+k]) trdirec=tr[N*j+k];
// else trdirec=1.0;
// cntg=cntg+trdirec*trdirec*(r[N*j+k]/(r[N*j+k]*r[N*j+k]+x[N*j+k]*x[N*j+k]));
// cntb=cntb+trdirec*trdirec*(-x[N*j+k]/(r[N*j+k]*r[N*j+k]+x[N*j+k]*x[N*j+k])+0.5*c[N*j+k]);
// }
// G[i]=cntg+g1[j];
// B[i]=cntb+b1[j];
// }
// }
// else {
// G[i]=0;B[i]=0;
// }
// }
int N=13659,j;
double cntg,cntb;
if(i<*lineN){//lineN
//if(from[i]<to[i]){//
// G[from[i]*N+to[i]]=-tr[i]*(r[i]/(r[i]*r[i]+x[i]*x[i]));
// B[from[i]*N+to[i]]=-tr[i]*(-x[i]/(r[i]*r[i]+x[i]*x[i]));
//}else{
// G[to[i]*N+from[i]]=-tr[i]*(r[i]/(r[i]*r[i]+x[i]*x[i]));
// B[to[i]*N+from[i]]=-tr[i]*(-x[i]/(r[i]*r[i]+x[i]*x[i]));
//}
G[i]=-(r[i]/(r[i]*r[i]+x[i]*x[i]))/tr[i];
B[i]=-(-x[i]/(r[i]*r[i]+x[i]*x[i]))/tr[i];
}
else
if(i<*lineN+N){//N
j=i-*lineN;
cntg=0;cntb=0;
for(int k=0;k<*lineN;k++){
if(from[k]==j){
cntg=cntg+(r[k]/(r[k]*r[k]+x[k]*x[k]))/(tr[k]*tr[k]);
cntb=cntb+(-x[k]/(r[k]*r[k]+x[k]*x[k])+0.5*c[k])/(tr[k]*tr[k]);
}
if(to[k]==j){
cntg=cntg+r[k]/(r[k]*r[k]+x[k]*x[k]);
cntb=cntb-x[k]/(r[k]*r[k]+x[k]*x[k])+0.5*c[k];
}
}
G[i]=cntg+g1[j];
B[i]=cntb+b1[j];
}
}
//double findmax(double *a){
// double maxa=0.0;
// for(int i=0;i<n;i++){
// if (a[i]>maxa)
// maxa=a[i];
// }
// return(maxa);
//}
//#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
//#else
__device__ double MyatomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
//#endif
__global__ void calc_cntI(double *cntI,int *lineN,int *from,int*to,double *G,double *B,double *V,double *angle,int *NodetoFuncP,int *type)
{
//double deltat=0.5;x
int N=13659;
int n=N-1;
int nPV=4091;
int nfunc=2*n-nPV;
//double Vj,Vi;
// double *fxplus,*fxminus;
//x[*k]=x[*k]+deltaq;
long int i = blockIdx.x * blockDim.x + threadIdx.x;
double deltaP,deltaQ;
if(i<*lineN){
if(type[from[i]]!=1){
deltaP=V[to[i]]*(G[i]*cos(angle[from[i]]-angle[to[i]])+B[i]*sin(angle[from[i]]-angle[to[i]]));
MyatomicAdd(&cntI[NodetoFuncP[from[i]]],deltaP);
deltaQ=V[to[i]]*(G[i]*sin(angle[from[i]]-angle[to[i]])-B[i]*cos(angle[from[i]]-angle[to[i]]));
MyatomicAdd(&cntI[NodetoFuncP[from[i]]+n],deltaQ);
}
if(type[to[i]]!=1){
deltaP=V[from[i]]*(G[i]*cos(angle[to[i]]-angle[from[i]])+B[i]*sin(angle[to[i]]-angle[from[i]]));
MyatomicAdd(&cntI[NodetoFuncP[to[i]]],deltaP);
deltaQ=V[from[i]]*(G[i]*sin(angle[to[i]]-angle[from[i]])-B[i]*cos(angle[to[i]]-angle[from[i]]));
MyatomicAdd(&cntI[NodetoFuncP[to[i]]+n],deltaQ);
}
}
else if(i<*lineN+N){
int j=i-(*lineN);
if(type[j]!=1){
MyatomicAdd(&cntI[NodetoFuncP[j]],V[j]*G[i]);
MyatomicAdd(&cntI[NodetoFuncP[j]+n],-V[j]*B[i]);
}
//if(NodetoFuncP[j]==1) printf("%f %f",V[j]*G[i],cntI[NodetoFuncP[j]]);
}
}
__global__ void calc_PQ(double *Ptot,double *Qtot,double *V,double *cntI,int *FunctoNode){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int N=13659;
if(i<N-1)
Ptot[i]=V[FunctoNode[i]]*cntI[i];
else if(i<2*(N-1))
Qtot[i-N+1]=V[FunctoNode[i-N+1]]*cntI[i];
}
__global__ void calc_pf(double *pf,double *Ptot,double *Qtot,int *FunctoNode,int *NodetoFuncP,double *Pg,double *Qg,double *Pl,double *Ql){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int N=13659;
int n=N-1;
int nPV=4091;
int nfunc=2*n-nPV;
if(i<n){
int node=FunctoNode[i];
pf[i]=-(Pg[node]-Pl[node]-Ptot[i]);
}
else if(i<nfunc){
int node=FunctoNode[i];
pf[i]=-(Qg[node]-Ql[node]-Qtot[NodetoFuncP[node]]);
}
}
__global__ void changeVAng1(double *V,double *Ang,int *FunctoNode,double *deltat,double *fx1){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int N=13659;
int n=N-1;
int nPV=4091;
int nfunc=2*n-nPV;
//printf(" %f %f\n",*deltat,fx1[0]);
if(i<N-1)
Ang[FunctoNode[i]]+=(*deltat)*fx1[i];
else if(i<nfunc)
V[FunctoNode[i]]+=(*deltat)*fx1[i]*V[FunctoNode[i]];
//if(i==38)
// printf("angle[%d]=%f\n",i,V[nodeV[i]]);
}
__global__ void changeVAng2(double *V,double *Ang,int *FunctoNode,double *deltat,double *fx1,double *fx2){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int N=13659;
int n=N-1;
int nPV=4091;
int nfunc=2*n-nPV;
if(i<N-1)
Ang[FunctoNode[i]]=Ang[FunctoNode[i]]-(*deltat)*fx1[i]+0.5*(*deltat)*(fx2[i]+fx1[i]);
// Ang[FunctoNode[i]]=Ang[FunctoNode[i]]+0.5*(*deltat)*(fx2[i]+fx1[i]);
else if(i<nfunc)
V[FunctoNode[i]]=V[FunctoNode[i]]-(*deltat)*fx1[i]+(V[FunctoNode[i]]-(*deltat)*fx1[i])*0.5*(*deltat)*(fx2[i]+fx1[i]);
// V[FunctoNode[i]]=V[FunctoNode[i]]+(V[FunctoNode[i]]-(*deltat)*fx1[i])*0.5*(*deltat)*(fx2[i]+fx1[i]);
//-(*deltat)*fx1[i]
}
int mergeY(int *from,int *to,double *G,double *B,int lineN,int N){
int i=0;
while (i<lineN){
for(int j=0;j<i;j++){
if(((from[i]==from[j])&&(to[i]==to[j]))||((from[i]==to[j])&&(to[i]==from[j]))){
G[j]+=G[i];
B[j]+=B[i];
for(int k=i;k<lineN-1;k++){
from[k]=from[k+1];
to[k]=to[k+1];
G[k]=G[k+1];
B[k]=B[k+1];
}
for(int k=lineN-1;k<lineN+N-1;k++){
G[k]=G[k+1];
B[k]=B[k+1];
}
lineN--;
i--;
}
}
i++;
}
return lineN;
}
int formJ(int *Ji,int *Jj,double *J,int *from,int *to,double *G,double *B,double *V,double *ang,double *P,double *Q,int n,int r,int lineN,int *NodetoFuncP,int *NodetoFuncQ,int *FunctoNode,int *type){
int nnzJ=-1;
double value;
for(int i=0;i<lineN;i++){
if((type[from[i]]!=1)&&(type[to[i]]!=1)){
//H
value=V[from[i]]*(B[i]*cos(ang[from[i]]-ang[to[i]])-G[i]*sin(ang[from[i]]-ang[to[i]]))*V[to[i]];
if(abs(value)>0.000000001){
nnzJ++;
Ji[nnzJ]=NodetoFuncP[from[i]];
Jj[nnzJ]=NodetoFuncP[to[i]];
J[nnzJ]=value;
//if(nnzJ==985)
// printf("//");
}
value=V[to[i]]*(B[i]*cos(ang[to[i]]-ang[from[i]])-G[i]*sin(ang[to[i]]-ang[from[i]]))*V[from[i]];
if(abs(value)>0.000000001){
nnzJ++;
Ji[nnzJ]=NodetoFuncP[to[i]];
Jj[nnzJ]=NodetoFuncP[from[i]];
J[nnzJ]=value;
//if(nnzJ==985)
// printf("//");
}
//L
if((type[from[i]]==3)&&(type[to[i]]==3)){
value=V[from[i]]*(B[i]*cos(ang[from[i]]-ang[to[i]])-G[i]*sin(ang[from[i]]-ang[to[i]]))*V[to[i]];
if(abs(value)>0.000000001){
nnzJ++;
Ji[nnzJ]=NodetoFuncQ[from[i]];
Jj[nnzJ]=NodetoFuncQ[to[i]];
J[nnzJ]=value;
//if(nnzJ==985)
//printf("//");
}
value=V[to[i]]*(B[i]*cos(ang[to[i]]-ang[from[i]])-G[i]*sin(ang[to[i]]-ang[from[i]]))*V[from[i]];
if(abs(value)>0.000000001){
nnzJ++;
Ji[nnzJ]=NodetoFuncQ[to[i]];
Jj[nnzJ]=NodetoFuncQ[from[i]];
J[nnzJ]=value;
//if(nnzJ==985)
//printf("//");
}
}
//N
if(type[to[i]]==3){
value=V[from[i]]*(-G[i]*cos(ang[from[i]]-ang[to[i]])-B[i]*sin(ang[from[i]]-ang[to[i]]))*V[to[i]];
if(abs(value)>0.000000001){
nnzJ++;
Ji[nnzJ]=NodetoFuncP[from[i]];
Jj[nnzJ]=NodetoFuncQ[to[i]];
J[nnzJ]=value;
//if(nnzJ==985)
//printf("//");
}
}
if(type[from[i]]==3){
value=V[to[i]]*(-G[i]*cos(ang[to[i]]-ang[from[i]])-B[i]*sin(ang[to[i]]-ang[from[i]]))*V[from[i]];
if(abs(value)>0.000000001){
nnzJ++;
Ji[nnzJ]=NodetoFuncP[to[i]];
Jj[nnzJ]=NodetoFuncQ[from[i]];
J[nnzJ]=value;
//if(nnzJ==985)
//printf("//");
}
}
//M
if(type[from[i]]==3){
value=V[from[i]]*(G[i]*cos(ang[from[i]]-ang[to[i]])+B[i]*sin(ang[from[i]]-ang[to[i]]))*V[to[i]];
if(abs(value)>0.000000001){
nnzJ++;
Ji[nnzJ]=NodetoFuncQ[from[i]];
Jj[nnzJ]=NodetoFuncP[to[i]];
J[nnzJ]=value;
//if(nnzJ==985)
//printf("//");
}
}
if(type[to[i]]==3){
value=V[to[i]]*(G[i]*cos(ang[to[i]]-ang[from[i]])+B[i]*sin(ang[to[i]]-ang[from[i]]))*V[from[i]];
if(abs(value)>0.000000001){
nnzJ++;
Ji[nnzJ]=NodetoFuncQ[to[i]];
Jj[nnzJ]=NodetoFuncP[from[i]];
J[nnzJ]=value;
//if(nnzJ==985)
//printf("//");
}
}
}
}
for(int i=0;i<n;i++){//H
nnzJ++;
Ji[nnzJ]=i;
Jj[nnzJ]=i;
J[nnzJ]=V[FunctoNode[i]]*V[FunctoNode[i]]*B[FunctoNode[i]+lineN]+Q[i];
//if(nnzJ==985)
// printf("//");
}
for(int i=0;i<n-r;i++){//L
nnzJ++;
Ji[nnzJ]=i+n;
Jj[nnzJ]=i+n;
J[nnzJ]=V[FunctoNode[i+n]]*V[FunctoNode[i+n]]*B[FunctoNode[i+n]+lineN]-Q[NodetoFuncP[FunctoNode[i+n]]];
//if(nnzJ==985)
// printf("//");
}
for(int i=0;i<n-r;i++){//NM
//if(type[FunctoNode[i]]==3){
nnzJ++;
Ji[nnzJ]=NodetoFuncP[FunctoNode[i+n]];
Jj[nnzJ]=i+n;
J[nnzJ]=-V[FunctoNode[i+n]]*V[FunctoNode[i+n]]*G[FunctoNode[i+n]+lineN]-P[NodetoFuncP[FunctoNode[i+n]]];
//if(nnzJ==985)
// printf("//");
nnzJ++;
Ji[nnzJ]=i+n;
Jj[nnzJ]=NodetoFuncP[FunctoNode[i+n]];
J[nnzJ]=V[FunctoNode[i+n]]*V[FunctoNode[i+n]]*G[FunctoNode[i+n]+lineN]-P[NodetoFuncP[FunctoNode[i+n]]];
//if(nnzJ==985)
// printf("//");
}
//for(int i=0;i<n+1+lineN;i++)
// printf("%d %f %f\n",i,G[i],B[i]);
//for(int i=0;i<nnzJ;i++)
// printf("%d %d %f\n",Ji[i],Jj[i],J[i]);
return nnzJ+1;
}
void sort(int *col_idx, double *a, int start, int end)
{
int i, j, it;
double dt;
for (i=end-1; i>start; i--)
for(j=start; j<i; j++)
if (col_idx[j] > col_idx[j+1]){
if (a){
dt=a[j];
a[j]=a[j+1];
a[j+1]=dt;
}
it=col_idx[j];
col_idx[j]=col_idx[j+1];
col_idx[j+1]=it;
}
}
void coo2csr(int n, int nz, double *a, int *i_idx, int *j_idx,
double *csr_a, int *col_idx, int *row_start)
{
int i, l;
for (i=0; i<=n; i++) row_start[i] = 0;
/* determine row lengths */
for (i=0; i<nz; i++) row_start[i_idx[i]+1]++;
for (i=0; i<n; i++) row_start[i+1] += row_start[i];
/* go through the structure once more. Fill in output matrix. */
for (l=0; l<nz; l++){
i = row_start[i_idx[l]];
csr_a[i] = a[l];
col_idx[i] = j_idx[l];
row_start[i_idx[l]]++;
}
/* shift back row_start */
for (i=n; i>0; i--) row_start[i] = row_start[i-1];
row_start[0] = 0;
for (i=0; i<n; i++){
sort (col_idx, csr_a, row_start[i], row_start[i+1]);
}
}
int main()
{
// hipDeviceReset();
// getchar();
const int N=13659;
int n=N-1;
double tLU=0,tanaly=0,tpf=0,tsolve=0,tformY=0,tformJ=0,tchange=0,ttotal=0;
double t;
int iteration=100;
for(int ite=0;ite<iteration;ite++){
hipDeviceReset ( );
//struct busstation
//{
// double V,ang,Pg,Qg,Pl,Ql;
// int type;
//}bus[N];
// int k;
// double R[N*N],X[N*N],C[N*N]={0},tr[N*N],shift[N*N];
double *R = (double*)malloc(5*N*sizeof(double));
double *X = (double*)malloc(5*N*sizeof(double));
double *C = (double*)malloc(5*N*sizeof(double));
double *tr = (double*)malloc(5*N*sizeof(double));
double *shift = (double*)malloc(5*N*sizeof(double));
int *from = (int*)malloc(5*N*sizeof(int));
int *to = (int*)malloc(5*N*sizeof(int));
double *V = (double*)malloc(N*sizeof(double));
double *ang = (double*)malloc(N*sizeof(double));
double *Pg = (double*)malloc(N*sizeof(double));
double *Qg = (double*)malloc(N*sizeof(double));
double *Pl = (double*)malloc(N*sizeof(double));
double *Ql = (double*)malloc(N*sizeof(double));
double *GG = (double*)malloc(N*sizeof(double));
double *BB = (double*)malloc(N*sizeof(double));
int *type = (int*)malloc(N*sizeof(int));
//double V[N],ang[N],Pg[N],Qg[N],Pl[N],Ql[N],GG[N],BB[N];
//int type[N];
long int *node = (long int*)malloc(N*sizeof(long int));
// int from[N*N],to[N*N];
//double inix[2*N];
int *FunctoNode = (int*)malloc(2*N*sizeof(int));
int *NodetoFuncP = (int*)malloc(N*sizeof(int));
int *NodetoFuncQ = (int*)malloc(N*sizeof(int));
//int FunctoNode[2*N];//nodeAng[i]angi
//int NodetoFuncP[N],NodetoFuncQ[N];
//double cstV,cstth;
// for(long int i=0;i<N*N;i++){
// R[i]=1.0e308;
// X[i]=1.0e308;
// tr[i]=1;
//}
FILE *fp;
if((fp=fopen("net_13659ill_lamda1.002.txt","rt+"))==NULL){
printf("Cannot open file strike any key exit!");
getch();
exit(1);
}
int nPV=0,nPQ=0;
for (int i=0;i<N;i++){
//fscanf(fp,"%d ",&node);
fscanf(fp,"%d %lf %lf %lf %lf %lf %lf %lf %lf %d\n",&node[i],&V[i],&ang[i],&Pg[i],&Qg[i],&Pl[i],&Ql[i],&GG[i],&BB[i],&type[i]);
ang[i]=ang[i]*M_PI/180;
if(type[i]==2){ //PV
//inix[nPQ+nPV]=ang[node-1];
ang[i]=0;
FunctoNode[nPQ+nPV]=i;
NodetoFuncP[i]=nPQ+nPV;
nPV++;
}
if(type[i]==3){ //PQ
//inix[nPQ+N-1]=V[node-1];
ang[i]=0;
V[i]=1;
FunctoNode[nPQ+N-1]=i;
//inix[nPQ+nPV]=ang[node-1];
FunctoNode[nPQ+nPV]=i;
NodetoFuncP[i]=nPQ+nPV;
NodetoFuncQ[i]=nPQ+N-1;
nPQ++;
}
//if(type[node-1]==1){ //
// cstV=V[node-1];
// cstth=ang[node-1];
//}
}
//for(int i=0;i<N;i++)
// printf("%f ",ang[i]);
int nfunc=2*(N-1)-nPV;
//printf("%d ",nPV);
int lineN=0;
long int fromNode,toNode;
while (!feof(fp)){
//fscanf(fp,"%d %d ",&from,&to);
fscanf(fp,"%d %d %lf %lf %lf %lf %lf\n",&fromNode,&toNode,&R[lineN],&X[lineN],&C[lineN],&tr[lineN],&shift[lineN]);
for(int i=0;i<N;i++){
if (node[i]==fromNode) from[lineN]=i;
if (node[i]==toNode) to[lineN]=i;
}
lineN++;
//R[(to-1)*N+from-1]=R[(from-1)*N+to-1];
//X[(to-1)*N+from-1]=X[(from-1)*N+to-1];
//C[(to-1)*N+from-1]=C[(from-1)*N+to-1];
//trsign[(from-1)*N+to-1]=1;//
//tr[(to-1)*N+from-1]=tr[(from-1)*N+to-1];
// fscanf(fp,"%d",&from);
}
fclose(fp);
double *dev_r,*dev_x,*dev_c,*dev_tr,*dev_b1,*dev_g1,*dev_G,*dev_B;
int *dev_lineN,*dev_from,*dev_to;
//double* G = (double *)malloc( N*N*sizeof(double));
//double* B = (double *)malloc( N*N*sizeof(double));
/*double G[N*N]={0},B[N*N]={0};*/
//for(int i=0;i<N*N;i++) G[i]=0;
/* clock_t start=clock()*/
//hipEvent_t start, stop;
// hipEventCreate(&start);
// hipEventCreate(&stop);
// hipEventRecord(start, 0);
hipMalloc((void**)&dev_r,lineN * sizeof(double));
hipMemcpy(dev_r,R,lineN * sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_x,lineN* sizeof(double));
hipMemcpy(dev_x,X,lineN* sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_c,lineN * sizeof(double));
hipMemcpy(dev_c,C,lineN* sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_tr,lineN * sizeof(double));
hipMemcpy(dev_tr,tr,lineN* sizeof(double), hipMemcpyHostToDevice);
//hipMalloc((void**)&dev_trsign,N*N * sizeof(double));
// hipMemcpy(dev_trsign,trsign,N*N * sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_b1,N * sizeof(double));
hipMemcpy(dev_b1,BB,N * sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_g1,N * sizeof(double));
hipMemcpy(dev_g1,GG,N * sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_G,(lineN+N) * sizeof(double));
hipMalloc((void**)&dev_B,(lineN+N) * sizeof(double));
hipMalloc((void**)&dev_lineN,sizeof(int));
hipMemcpy(dev_lineN,&lineN,sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_from,lineN*sizeof(int));
hipMemcpy(dev_from,from,lineN*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_to,lineN*sizeof(int));
hipMemcpy(dev_to,to,lineN*sizeof(int), hipMemcpyHostToDevice);
//hipEventRecord(stop, 0);
// hipEventSynchronize(stop);
// double elapsedTime;
// hipEventElapsedTime(&elapsedTime, start, stop);
//printf("time = %f ",elapsedTime);
/* clock_t stop=clock()*/;
//clock_t stop=clock();
//double time=(double) (stop-start);
//printf("time = %f ",time);
//double G[N*N]={0},B[N*N]={0};
//hipMemcpy(G, dev_G,(lineN+N)*sizeof(double), hipMemcpyDeviceToHost);
//hipMemcpy(B, dev_B,(lineN+N)*sizeof(double), hipMemcpyDeviceToHost);
////
//for(int i=0;i<(lineN+N);i++){
// if(i<lineN)
// printf("%d %d %f\n",from[i],to[i],G[i]);
// else
// printf("%d %d %f\n",i-lineN,i-lineN,G[i]);
//}
//printf("%f ",G[36*N+36]);
//if((fp=fopen("csrLU13659.txt","rt+"))==NULL){
// printf("Cannot open file strike any key exit!");
// getch();
// exit(1);
// }
//int nfunc,nnz;
//fscanf(fp,"%d %d",&nfunc,&nnz);
//double* val = (double *)malloc(nnz*sizeof(double));
//int* colind = (int *)malloc(nnz*sizeof(int));
//int* rowptr = (int *)malloc((nfunc+1)*sizeof(int));
//for(int i=0;i<nnz;i++)
// fscanf(fp,"%lf",&val[i]);
//for(int i=0;i<nnz;i++)
// fscanf(fp,"%d",&colind[i]);
//for(int i=0;i<nfunc+1;i++)
// fscanf(fp,"%d",&rowptr[i]);
//fclose(fp);
////for (int i=0;i<nnzL;i++)
//// printf("%f\n",valL[i]);
//double *d_val;
//int *d_colind,*d_rowptr;
//hipMalloc((void**)&d_val, nnz*sizeof(double));
//hipMemcpy(d_val, val, nnz*sizeof(double), hipMemcpyHostToDevice);
//hipMalloc((void**)&d_colind, nnz*sizeof(int));
//hipMemcpy(d_colind, colind, nnz*sizeof(int), hipMemcpyHostToDevice);
//hipMalloc((void**)&d_rowptr, (nfunc+1)*sizeof(int));
//hipMemcpy(d_rowptr, rowptr, (nfunc+1)*sizeof(int), hipMemcpyHostToDevice);
double *dev_G2,*dev_B2,*dev_Pg,*dev_Qg,*dev_Pl,*dev_Ql,*dev_V,*dev_angle;
int *dev_FunctoNode;
double *dev_fx1,*dev_fx2;
double *dev_pf,*dev_cntI;
int *dev_NodetoFuncP,*dev_NodetoFuncQ,*dev_type;
//hipMalloc((void**)&dev_G2, N*N*sizeof(double));
//hipMemcpy(dev_G2, G,N*N*sizeof(double), hipMemcpyHostToDevice);
//hipMalloc((void**)&dev_B2, N*N*sizeof(double));
//hipMemcpy(dev_B2, B,N*N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_Pg, N*sizeof(double));
hipMemcpy(dev_Pg, Pg,N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_Qg, N*sizeof(double));
hipMemcpy(dev_Qg, Qg,N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_Pl, N*sizeof(double));
hipMemcpy(dev_Pl, Pl,N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_Ql, N*sizeof(double));
hipMemcpy(dev_Ql, Ql,N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_V, N*sizeof(double));
//hipMemcpy(dev_V, V,N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_FunctoNode, 2*N*sizeof(int));
hipMemcpy(dev_FunctoNode, FunctoNode,2*N*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_angle, N*sizeof(double));
hipMemcpy(dev_angle, ang,N*sizeof(double), hipMemcpyHostToDevice);
//hipMalloc((void**)&dev_nodeAng, N*sizeof(int));
//hipMemcpy(dev_nodeAng, nodeAng,N*sizeof(int), hipMemcpyHostToDevice);
//hipMalloc((void**)&dev_pfplus,nfunc * sizeof(double));
//hipMalloc((void**)&dev_pfminus,nfunc * sizeof(double));
hipMalloc((void**)&dev_fx1,nfunc * sizeof(double));
hipMalloc((void**)&dev_fx2,nfunc * sizeof(double));
hipMalloc((void**)&dev_pf,nfunc * sizeof(double));
hipMalloc((void**)&dev_cntI,2*n * sizeof(double));
hipMalloc((void**)&dev_NodetoFuncP,N * sizeof(int));
hipMemcpy(dev_NodetoFuncP,NodetoFuncP,N*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_NodetoFuncQ,N * sizeof(int));
hipMemcpy(dev_NodetoFuncQ,NodetoFuncQ,N*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_type,N * sizeof(int));
hipMemcpy(dev_type,type,N*sizeof(int), hipMemcpyHostToDevice);
double *dev_delt;
hipMalloc((void**)&dev_delt, sizeof(double));
double *fxzeros = (double*)malloc(2*N*sizeof(double));
for(int i=0;i<2*N;i++){
fxzeros[i]=0;
}
//dim3 threadsPerBlock(256);
//dim3 numBlocks(N / threadsPerBlock.x, N / threadsPerBlock.y);
int threads=256;
int blocksformY=(lineN+N)/threads+1;
//hipSetDeviceFlags(hipDeviceScheduleBlockingSync);
//hipDeviceSynchronize();
t=0;
double tmax=50;
//double delt;
//hipEvent_t start,stop;
double *deltat = (double*)malloc(sizeof(double));
*deltat=0.01;
hipMemcpy(dev_delt, deltat,sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_V, V,N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_angle, ang,N*sizeof(double), hipMemcpyHostToDevice);
LARGE_INTEGER t1,t2,tc;
QueryPerformanceFrequency(&tc);
QueryPerformanceCounter(&t1);
LARGE_INTEGER ts,te;
hipLaunchKernelGGL(( formYKernel), dim3(blocksformY),dim3(threads), 0, 0, dev_G,dev_B,dev_lineN,dev_from,dev_to,dev_r,dev_x,dev_c,dev_tr,dev_g1,dev_b1);
hipDeviceSynchronize();
QueryPerformanceCounter(&t2);
tformY+=(t2.QuadPart - t1.QuadPart)*1000.0/tc.QuadPart;
double *G=(double*)malloc((lineN+N)*sizeof(double));
hipMemcpy(G, dev_G,(lineN+N)*sizeof(double), hipMemcpyDeviceToHost);
double *B=(double*)malloc((lineN+N)*sizeof(double));
hipMemcpy(B, dev_B,(lineN+N)*sizeof(double), hipMemcpyDeviceToHost);
lineN=mergeY(from,to,G,B,lineN,N);
hipMemcpy(dev_G, G,(lineN+N)*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_B, B,(lineN+N)*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_lineN,&lineN,sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_from,from,lineN*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_to,to,lineN*sizeof(int), hipMemcpyHostToDevice);
//for(int i=0;i<lineN+N;i++)
// if(i<lineN)
// printf("%d %d %f %f\n",from[i],to[i],G[i],B[i]);
// else
// printf("%d %d %f %f\n",i-lineN,i-lineN,G[i],B[i]);
double *J=(double*)malloc((N*N)*sizeof(double));
int *Ji=(int*)malloc((N*N)*sizeof(int));
int *Jj=(int*)malloc((N*N)*sizeof(int));
//for(int i=0;i<2*(N-1);i++)
// printf("%f ",cntI_all[i]);
// printf("formY Time:%f ms\n",(t2.QuadPart - t1.QuadPart)*1000.0/tc.QuadPart);
free(R);
free(X);
free(C);
free(tr);
free(shift);
//// part X://
//if((fp=fopen("csrJ13659.txt","rt+"))==NULL){
// printf("Cannot open file strike any key exit!");
// getch();
// exit(1);
// }
//int nnz;
//fscanf(fp,"%d %d",&nfunc,&nnz);
//for(int i=0;i<nnzJ;i++)
// printf("%f ",val[i]);
//printf("\n");
//for(int i=0;i<nnzJ;i++)
// printf("%d ",colind[i]);
//printf("\n");
//for(int i=0;i<nfunc+1;i++)
// printf("%d ",rowptr[i]);
//
double *angle0 = (double*)malloc(2*sizeof(double));
double *anglelast = (double*)malloc(sizeof(double));
*anglelast=0;
//hipblasHandle_t cublasHandle = NULL;
//hipblasCreate(&cublasHandle);
const double alpha=1.0,beta=0.0;
int blocksode=nfunc/threads+1;
//QueryPerformanceCounter(&t22);
//tanaly+=(t22.QuadPart - t11.QuadPart)*1000.0/tc.QuadPart;
//
double *d_z;
hipMalloc((void**)&d_z, nfunc * sizeof(double));
double *test=(double*)malloc(nfunc*sizeof(double));
double *pf=(double*)malloc(nfunc*sizeof(double));
double *permuPf=(double*)malloc(nfunc*sizeof(double));
//LARGE_INTEGER t3,t4,tstart,tend;
//QueryPerformanceFrequency(&tc);
// QueryPerformanceCounter(&t3);
int blocksPQ=(2*n)/threads+1;
int blockscntI=(N+lineN)/threads+1;
double *d_Ptot,*d_Qtot;
hipMalloc((void**)&d_Ptot, n * sizeof(double));
hipMalloc((void**)&d_Qtot, n * sizeof(double));
double *Ptot=(double*)malloc(N*sizeof(double));
double *Qtot=(double*)malloc(N*sizeof(double));
int nnzJ;
_handle_t solver = NULL;
_uint_t i;
_double_t *cfg;
const _double_t *stat;
if (__FAIL(NicsLU_Initialize(&solver, &cfg, &stat)))
{
printf("Failed to initialize\n");
system("pause");
return -1;
}
int* rowptrJ = (int *)malloc((nfunc+1)*sizeof(int));
unsigned int* rowptr = (unsigned int *)malloc((nfunc+1)*sizeof(unsigned int));
double *d_valL;
int *d_colindL,*d_rowptrL;
hipError_t err;
err=hipMalloc((void**)&d_rowptrL, (nfunc+1)*sizeof(int));
double *d_valU;
int *d_colindU,*d_rowptrU;
err=hipMalloc((void**)&d_rowptrU, (nfunc+1)*sizeof(int));
//float time_elapsed=0;
//hipEvent_t start,stop;
//hipEventCreate(&start); //Event
// hipEventCreate(&stop);
double *fx1=(double *)malloc(nfunc*sizeof(double));
double *fx2=(double *)malloc(nfunc*sizeof(double));
QueryPerformanceCounter(&ts);
while (t<tmax){
hipMemcpy(dev_cntI, fxzeros,2*n*sizeof(double), hipMemcpyHostToDevice);
/*QueryPerformanceCounter(&tstart);*/
//time_elapsed=0;
//hipEventRecord( start,0);
hipLaunchKernelGGL(( calc_cntI), dim3(blockscntI),dim3(threads), 0, 0, dev_cntI,dev_lineN,dev_from,dev_to,dev_G,dev_B,dev_V,dev_angle,dev_NodetoFuncP,dev_type);
hipDeviceSynchronize();
//hipMemcpy(test, dev_cntI,nfunc*sizeof(double), hipMemcpyDeviceToHost);
//for (int i=0;i<nfunc;i++)
// printf("%f ",test[i]);
hipLaunchKernelGGL(( calc_PQ), dim3(blocksPQ),dim3(threads), 0, 0, d_Ptot,d_Qtot,dev_V,dev_cntI,dev_FunctoNode);
hipDeviceSynchronize();
hipLaunchKernelGGL(( calc_pf), dim3(blocksode),dim3(threads), 0, 0, dev_pf,d_Ptot,d_Qtot,dev_FunctoNode,dev_NodetoFuncP,dev_Pg,dev_Qg,dev_Pl,dev_Ql);
// hipDeviceSynchronize();
hipMemcpy(pf, dev_pf,nfunc*sizeof(double), hipMemcpyDeviceToHost);
//for (int i=0;i<nfunc;i++)
// printf("%f\n",pf[i]);
//hipEventRecord( stop,0); //
//hipEventSynchronize(start); //Waits for an event to complete.
//hipEventSynchronize(stop); //Waits for an event to complete.Record
//hipEventElapsedTime(&time_elapsed,start,stop); //
//tpf+=time_elapsed;
hipMemcpy(Ptot, d_Ptot,n*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(Qtot, d_Qtot,n*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(V, dev_V,N*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(ang, dev_angle,N*sizeof(double), hipMemcpyDeviceToHost);
QueryPerformanceCounter(&t1);
nnzJ=formJ(Ji,Jj,J,from,to,G,B,V,ang,Ptot,Qtot,N-1,nPV,lineN,NodetoFuncP,NodetoFuncQ,FunctoNode,type);
double* val = (double *)malloc(nnzJ*sizeof(double));
int* colindJ = (int *)malloc(nnzJ*sizeof(int));
coo2csr(nfunc,nnzJ,J,Ji,Jj,val,colindJ,rowptrJ);
QueryPerformanceCounter(&t2);
tformJ+=(t2.QuadPart - t1.QuadPart)*1000.0/tc.QuadPart;
unsigned int* colind = (unsigned int *)malloc(nnzJ*sizeof(unsigned int));
for(int i=0;i<nnzJ;i++)
colind[i]=(unsigned int)colindJ[i];
for(int i=0;i<nfunc+1;i++)
rowptr[i]=(unsigned int)rowptrJ[i];
//for(int i=0;i<nnzJ;i++)
// printf("%f ",val[i]);
//printf("\n");
//for(int i=0;i<nnzJ;i++)
// printf("%d ",colind[i]);
//printf("\n");
//for(int i=0;i<nfunc+1;i++)
// printf("%d ",rowptr[i]);
QueryPerformanceFrequency(&tc);
QueryPerformanceCounter(&t1);
// printf("Version %.0lf\nLicense to %.0lf\n", stat[31], stat[29]);
int cao=NicsLU_Analyze(solver, nfunc, val, colind, rowptr, MATRIX_ROW_REAL, NULL, NULL);
int cao2=NicsLU_Factorize(solver, val, 1);
//for(int i=0;i<20;i++)
// printf("%f ",stat[i]);
QueryPerformanceCounter(&t2);
tLU+=(t2.QuadPart - t1.QuadPart)*1000.0/tc.QuadPart;
QueryPerformanceCounter(&t1);
NicsLU_Solve(solver, pf, fx1);
QueryPerformanceCounter(&t2);
tsolve+=(t2.QuadPart - t1.QuadPart)*1000.0/tc.QuadPart;
hipMemcpy(dev_fx1, fx1,nfunc*sizeof(double), hipMemcpyHostToDevice);
//hipMemcpy(test, dev_fx1,nfunc*sizeof(double), hipMemcpyDeviceToHost);
//for (int i=0;i<nfunc;i++)
// printf("%f\n",test[i]);
//hipEventRecord( start,0);
hipLaunchKernelGGL(( changeVAng1), dim3(blocksode),dim3(threads), 0, 0, dev_V,dev_angle,dev_FunctoNode,dev_delt,dev_fx1);
//hipEventRecord( stop,0); //
//hipEventSynchronize(start); //Waits for an event to complete.
//hipEventSynchronize(stop); //Waits for an event to complete.Record
//hipEventElapsedTime(&time_elapsed,start,stop); //
//tchange+=time_elapsed;
hipMemcpy(dev_cntI, fxzeros,nfunc*sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( calc_cntI), dim3(blockscntI),dim3(threads), 0, 0, dev_cntI,dev_lineN,dev_from,dev_to,dev_G,dev_B,dev_V,dev_angle,dev_NodetoFuncP,dev_type);
hipDeviceSynchronize();
//hipMemcpy(test, dev_cntI,nfunc*sizeof(double), hipMemcpyDeviceToHost);
//for (int i=0;i<nfunc;i++)
// printf("cntI[%d]=%f\n",i,test[i]);
hipLaunchKernelGGL(( calc_PQ), dim3(blocksPQ),dim3(threads), 0, 0, d_Ptot,d_Qtot,dev_V,dev_cntI,dev_FunctoNode);
hipDeviceSynchronize();
hipLaunchKernelGGL(( calc_pf), dim3(blocksode),dim3(threads), 0, 0, dev_pf,d_Ptot,d_Qtot,dev_FunctoNode,dev_NodetoFuncP,dev_Pg,dev_Qg,dev_Pl,dev_Ql);
// hipDeviceSynchronize();
hipMemcpy(pf, dev_pf,nfunc*sizeof(double), hipMemcpyDeviceToHost);
QueryPerformanceCounter(&t1);
NicsLU_Solve(solver, pf, fx2);
QueryPerformanceCounter(&t2);
tsolve+=(t2.QuadPart - t1.QuadPart)*1000.0/tc.QuadPart;
hipMemcpy(dev_fx2, fx2,nfunc*sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( changeVAng2), dim3(blocksode),dim3(threads), 0, 0, dev_V,dev_angle,dev_FunctoNode,dev_delt,dev_fx1,dev_fx2);
hipMemcpy(angle0, dev_angle,2*sizeof(double), hipMemcpyDeviceToHost);
t=t+*deltat;
printf("t= %f angle[1]=%f\n",t,angle0[FunctoNode[0]]);
if(abs(angle0[FunctoNode[0]]-*anglelast)<0.0001) break;
*anglelast=angle0[FunctoNode[0]];
}
QueryPerformanceCounter(&te);
ttotal+=(te.QuadPart - ts.QuadPart)*1000.0/tc.QuadPart;
//QueryPerformanceCounter(&t4);
// printf("solve total Time:%f ms\n",(t4.QuadPart - t3.QuadPart)*1000.0/tc.QuadPart);
//double time=(double) (stop-start);
//printf("time = %f ",time);
free(fx1);
free(fx2);
free(from);
free(to);
free(V);
free(ang);
free(Pg);
free(Qg);
free(Pl);
free(Ql);
free(GG);
free(BB);
free(type);
free(node);
free(NodetoFuncP);
free(NodetoFuncQ);
free(FunctoNode);
free(J);
free(Ji);
free(Jj);
NicsLU_Free(solver);
hipFree(dev_fx1);
hipFree(dev_fx2);
hipFree(dev_r);
hipFree(dev_x);
hipFree(dev_c);
hipFree(dev_tr);
hipFree(dev_b1);
hipFree(dev_g1);
//hipFree(dev_k);
//hipFree(dev_delt);
//hipFree(fxplus);
//hipFree(fxminus);
hipFree(d_valL);
hipFree(d_rowptrL);
hipFree(d_colindL);
hipFree(d_valU);
hipFree(d_rowptrU);
hipFree(d_colindU);
//hipFree(dev_G2);
//hipFree(dev_B2);
hipFree(dev_Pg);
hipFree(dev_Qg);
hipFree(dev_Pl);
hipFree(dev_Ql);
//hipFree(dev_pfplus);
//hipFree(dev_pfminus);
hipFree(dev_V);
hipFree(dev_angle);
hipFree(dev_FunctoNode);
hipFree(dev_type);
hipFree(dev_NodetoFuncP);
hipFree(dev_NodetoFuncQ);
hipFree(dev_type);
hipFree(dev_cntI);
hipFree(dev_lineN);
hipFree(dev_from);
hipFree(dev_to);
hipFree(dev_G);
hipFree(dev_B);
//hipEventDestroy(start); //destory the event
//hipEventDestroy(stop);
}
printf("iteration times: %f\n",t);
printf("formY Time:%f ms\n",tformY/iteration);
printf("formJ Time:%f ms\n",tformJ/iteration);
printf("JLU Time:%f ms\n",tLU/iteration);
printf("solve Time:%f ms\n",tsolve/iteration);
printf("total time:%f ms\n",ttotal/iteration);
//free(val);
//free(rowptr);
//free(colind);
//free(valU);
//free(rowptrU);
//free(colindU);
//free(inimax);
//free(delt);
//getchar();
//free(G);
//free(B);
return 0;
} | 5cb27d126ed39a90f04f8b55a5bedfcaafcd8f05.cu | //æåç宿äº13659èç¹å13659èç¹ç计ç®ïŒç»ææ¯èŸåç¡®
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#include "device_functions.h"
//#include "sm_12_atomic_functions.h"
//#include "sm_13_double_functions.h"
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <conio.h>
#include <time.h>
#include <windows.h>
#include <iostream>
#include <assert.h>
#include <string.h>
#include <cuda_runtime.h>
#include "nicslu.h"
//#include "Utilities.cuh"
#define M_PI 3.14159269589793
//__shared__ double G[13659*13659],B[13659*13659];
__global__ void formYKernel(double *G,double *B,int *lineN,int *from,int *to,double *r,double *x,double *c,double *tr,double *g1,double *b1){
int i = blockIdx.x * blockDim.x + threadIdx.x;
//// printf("%d %d ",i,j);
//// int N=13659;
//// double trmax;
//// double gij=r[N*i+j]/(r[N*i+j]*r[N*i+j]+x[N*i+j]*x[N*i+j]);
//// double bij=-x[N*i+j]/(r[N*i+j]*r[N*i+j]+x[N*i+j]*x[N*i+j]);
//// if(i!=j){
//// if (tr[N*i+j]>tr[N*i+j]) trmax=tr[N*i+j];
//// else trmax=tr[N*i+j];
////// printf("%f ",trmax);
//// G[N*i+j]=-trmax*gij;
//// B[N*i+j]=-trmax*bij;
//// }
// int N=13659;
// if(i<N*N){
//// if(i==2) printf("%f ",gi);
// if((i-i/N)/N==i/N){//åªè®¡ç®äžäžè§
// double gi=r[i]/(r[i]*r[i]+x[i]*x[i]);
// double bi=-x[i]/(r[i]*r[i]+x[i]*x[i]);
// if((i-i/N) % N !=0){//äžåšå¯¹è§çº¿äž
// G[i]=-tr[i]*gi;
//B[i]=-tr[i]*bi;
// }
// else{//计ç®å¯¹è§çº¿å
çŽ
// double cntg=0,cntb=0;
// int j=i/N;//第j䞪对è§å
// for(int k=0;k<N;k++){
// double trdirec;
// if (trsign[N*j+k]) trdirec=tr[N*j+k];
// else trdirec=1.0;
// cntg=cntg+trdirec*trdirec*(r[N*j+k]/(r[N*j+k]*r[N*j+k]+x[N*j+k]*x[N*j+k]));
// cntb=cntb+trdirec*trdirec*(-x[N*j+k]/(r[N*j+k]*r[N*j+k]+x[N*j+k]*x[N*j+k])+0.5*c[N*j+k]);
// }
// G[i]=cntg+g1[j];
// B[i]=cntb+b1[j];
// }
// }
// else {
// G[i]=0;B[i]=0;
// }
// }
int N=13659,j;
double cntg,cntb;
if(i<*lineN){//ålineN䞪线çšçšäºè®¡ç®é对è§å
//if(from[i]<to[i]){//åªç®äžäžè§
// G[from[i]*N+to[i]]=-tr[i]*(r[i]/(r[i]*r[i]+x[i]*x[i]));
// B[from[i]*N+to[i]]=-tr[i]*(-x[i]/(r[i]*r[i]+x[i]*x[i]));
//}else{
// G[to[i]*N+from[i]]=-tr[i]*(r[i]/(r[i]*r[i]+x[i]*x[i]));
// B[to[i]*N+from[i]]=-tr[i]*(-x[i]/(r[i]*r[i]+x[i]*x[i]));
//}
G[i]=-(r[i]/(r[i]*r[i]+x[i]*x[i]))/tr[i];
B[i]=-(-x[i]/(r[i]*r[i]+x[i]*x[i]))/tr[i];
}
else
if(i<*lineN+N){//åN䞪线çšçšäºè®¡ç®å¯¹è§å
j=i-*lineN;
cntg=0;cntb=0;
for(int k=0;k<*lineN;k++){
if(from[k]==j){
cntg=cntg+(r[k]/(r[k]*r[k]+x[k]*x[k]))/(tr[k]*tr[k]);
cntb=cntb+(-x[k]/(r[k]*r[k]+x[k]*x[k])+0.5*c[k])/(tr[k]*tr[k]);
}
if(to[k]==j){
cntg=cntg+r[k]/(r[k]*r[k]+x[k]*x[k]);
cntb=cntb-x[k]/(r[k]*r[k]+x[k]*x[k])+0.5*c[k];
}
}
G[i]=cntg+g1[j];
B[i]=cntb+b1[j];
}
}
//double findmax(double *a){
// double maxa=0.0;
// for(int i=0;i<n;i++){
// if (a[i]>maxa)
// maxa=a[i];
// }
// return(maxa);
//}
//#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
//#else
__device__ double MyatomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
//#endif
__global__ void calc_cntI(double *cntI,int *lineN,int *from,int*to,double *G,double *B,double *V,double *angle,int *NodetoFuncP,int *type)
{
//double deltat=0.5;x
int N=13659;
int n=N-1;
int nPV=4091;
int nfunc=2*n-nPV;
//double Vj,Vi;
// double *fxplus,*fxminus;
//x[*k]=x[*k]+deltaq;
long int i = blockIdx.x * blockDim.x + threadIdx.x;
double deltaP,deltaQ;
if(i<*lineN){
if(type[from[i]]!=1){
deltaP=V[to[i]]*(G[i]*cos(angle[from[i]]-angle[to[i]])+B[i]*sin(angle[from[i]]-angle[to[i]]));
MyatomicAdd(&cntI[NodetoFuncP[from[i]]],deltaP);
deltaQ=V[to[i]]*(G[i]*sin(angle[from[i]]-angle[to[i]])-B[i]*cos(angle[from[i]]-angle[to[i]]));
MyatomicAdd(&cntI[NodetoFuncP[from[i]]+n],deltaQ);
}
if(type[to[i]]!=1){
deltaP=V[from[i]]*(G[i]*cos(angle[to[i]]-angle[from[i]])+B[i]*sin(angle[to[i]]-angle[from[i]]));
MyatomicAdd(&cntI[NodetoFuncP[to[i]]],deltaP);
deltaQ=V[from[i]]*(G[i]*sin(angle[to[i]]-angle[from[i]])-B[i]*cos(angle[to[i]]-angle[from[i]]));
MyatomicAdd(&cntI[NodetoFuncP[to[i]]+n],deltaQ);
}
}
else if(i<*lineN+N){
int j=i-(*lineN);
if(type[j]!=1){
MyatomicAdd(&cntI[NodetoFuncP[j]],V[j]*G[i]);
MyatomicAdd(&cntI[NodetoFuncP[j]+n],-V[j]*B[i]);
}
//if(NodetoFuncP[j]==1) printf("%f %f",V[j]*G[i],cntI[NodetoFuncP[j]]);
}
}
__global__ void calc_PQ(double *Ptot,double *Qtot,double *V,double *cntI,int *FunctoNode){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int N=13659;
if(i<N-1)
Ptot[i]=V[FunctoNode[i]]*cntI[i];
else if(i<2*(N-1))
Qtot[i-N+1]=V[FunctoNode[i-N+1]]*cntI[i];
}
__global__ void calc_pf(double *pf,double *Ptot,double *Qtot,int *FunctoNode,int *NodetoFuncP,double *Pg,double *Qg,double *Pl,double *Ql){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int N=13659;
int n=N-1;
int nPV=4091;
int nfunc=2*n-nPV;
if(i<n){
int node=FunctoNode[i];
pf[i]=-(Pg[node]-Pl[node]-Ptot[i]);
}
else if(i<nfunc){
int node=FunctoNode[i];
pf[i]=-(Qg[node]-Ql[node]-Qtot[NodetoFuncP[node]]);
}
}
__global__ void changeVAng1(double *V,double *Ang,int *FunctoNode,double *deltat,double *fx1){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int N=13659;
int n=N-1;
int nPV=4091;
int nfunc=2*n-nPV;
//printf(" %f %f\n",*deltat,fx1[0]);
if(i<N-1)
Ang[FunctoNode[i]]+=(*deltat)*fx1[i];
else if(i<nfunc)
V[FunctoNode[i]]+=(*deltat)*fx1[i]*V[FunctoNode[i]];
//if(i==38)
// printf("angle[%d]=%f\n",i,V[nodeV[i]]);
}
__global__ void changeVAng2(double *V,double *Ang,int *FunctoNode,double *deltat,double *fx1,double *fx2){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int N=13659;
int n=N-1;
int nPV=4091;
int nfunc=2*n-nPV;
if(i<N-1)
Ang[FunctoNode[i]]=Ang[FunctoNode[i]]-(*deltat)*fx1[i]+0.5*(*deltat)*(fx2[i]+fx1[i]);
// Ang[FunctoNode[i]]=Ang[FunctoNode[i]]+0.5*(*deltat)*(fx2[i]+fx1[i]);
else if(i<nfunc)
V[FunctoNode[i]]=V[FunctoNode[i]]-(*deltat)*fx1[i]+(V[FunctoNode[i]]-(*deltat)*fx1[i])*0.5*(*deltat)*(fx2[i]+fx1[i]);
// V[FunctoNode[i]]=V[FunctoNode[i]]+(V[FunctoNode[i]]-(*deltat)*fx1[i])*0.5*(*deltat)*(fx2[i]+fx1[i]);
//-(*deltat)*fx1[i]
}
int mergeY(int *from,int *to,double *G,double *B,int lineN,int N){
int i=0;
while (i<lineN){
for(int j=0;j<i;j++){
if(((from[i]==from[j])&&(to[i]==to[j]))||((from[i]==to[j])&&(to[i]==from[j]))){
G[j]+=G[i];
B[j]+=B[i];
for(int k=i;k<lineN-1;k++){
from[k]=from[k+1];
to[k]=to[k+1];
G[k]=G[k+1];
B[k]=B[k+1];
}
for(int k=lineN-1;k<lineN+N-1;k++){
G[k]=G[k+1];
B[k]=B[k+1];
}
lineN--;
i--;
}
}
i++;
}
return lineN;
}
int formJ(int *Ji,int *Jj,double *J,int *from,int *to,double *G,double *B,double *V,double *ang,double *P,double *Q,int n,int r,int lineN,int *NodetoFuncP,int *NodetoFuncQ,int *FunctoNode,int *type){
int nnzJ=-1;
double value;
for(int i=0;i<lineN;i++){
if((type[from[i]]!=1)&&(type[to[i]]!=1)){
//Häžäž€äžªéé¶å
çŽ
value=V[from[i]]*(B[i]*cos(ang[from[i]]-ang[to[i]])-G[i]*sin(ang[from[i]]-ang[to[i]]))*V[to[i]];
if(abs(value)>0.000000001){
nnzJ++;
Ji[nnzJ]=NodetoFuncP[from[i]];
Jj[nnzJ]=NodetoFuncP[to[i]];
J[nnzJ]=value;
//if(nnzJ==985)
// printf("//");
}
value=V[to[i]]*(B[i]*cos(ang[to[i]]-ang[from[i]])-G[i]*sin(ang[to[i]]-ang[from[i]]))*V[from[i]];
if(abs(value)>0.000000001){
nnzJ++;
Ji[nnzJ]=NodetoFuncP[to[i]];
Jj[nnzJ]=NodetoFuncP[from[i]];
J[nnzJ]=value;
//if(nnzJ==985)
// printf("//");
}
//Läžäž€äžªéé¶å
çŽ
if((type[from[i]]==3)&&(type[to[i]]==3)){
value=V[from[i]]*(B[i]*cos(ang[from[i]]-ang[to[i]])-G[i]*sin(ang[from[i]]-ang[to[i]]))*V[to[i]];
if(abs(value)>0.000000001){
nnzJ++;
Ji[nnzJ]=NodetoFuncQ[from[i]];
Jj[nnzJ]=NodetoFuncQ[to[i]];
J[nnzJ]=value;
//if(nnzJ==985)
//printf("//");
}
value=V[to[i]]*(B[i]*cos(ang[to[i]]-ang[from[i]])-G[i]*sin(ang[to[i]]-ang[from[i]]))*V[from[i]];
if(abs(value)>0.000000001){
nnzJ++;
Ji[nnzJ]=NodetoFuncQ[to[i]];
Jj[nnzJ]=NodetoFuncQ[from[i]];
J[nnzJ]=value;
//if(nnzJ==985)
//printf("//");
}
}
//Näžäž€äžªéé¶å
çŽ
if(type[to[i]]==3){
value=V[from[i]]*(-G[i]*cos(ang[from[i]]-ang[to[i]])-B[i]*sin(ang[from[i]]-ang[to[i]]))*V[to[i]];
if(abs(value)>0.000000001){
nnzJ++;
Ji[nnzJ]=NodetoFuncP[from[i]];
Jj[nnzJ]=NodetoFuncQ[to[i]];
J[nnzJ]=value;
//if(nnzJ==985)
//printf("//");
}
}
if(type[from[i]]==3){
value=V[to[i]]*(-G[i]*cos(ang[to[i]]-ang[from[i]])-B[i]*sin(ang[to[i]]-ang[from[i]]))*V[from[i]];
if(abs(value)>0.000000001){
nnzJ++;
Ji[nnzJ]=NodetoFuncP[to[i]];
Jj[nnzJ]=NodetoFuncQ[from[i]];
J[nnzJ]=value;
//if(nnzJ==985)
//printf("//");
}
}
//Mäžäž€äžªéé¶å
çŽ
if(type[from[i]]==3){
value=V[from[i]]*(G[i]*cos(ang[from[i]]-ang[to[i]])+B[i]*sin(ang[from[i]]-ang[to[i]]))*V[to[i]];
if(abs(value)>0.000000001){
nnzJ++;
Ji[nnzJ]=NodetoFuncQ[from[i]];
Jj[nnzJ]=NodetoFuncP[to[i]];
J[nnzJ]=value;
//if(nnzJ==985)
//printf("//");
}
}
if(type[to[i]]==3){
value=V[to[i]]*(G[i]*cos(ang[to[i]]-ang[from[i]])+B[i]*sin(ang[to[i]]-ang[from[i]]))*V[from[i]];
if(abs(value)>0.000000001){
nnzJ++;
Ji[nnzJ]=NodetoFuncQ[to[i]];
Jj[nnzJ]=NodetoFuncP[from[i]];
J[nnzJ]=value;
//if(nnzJ==985)
//printf("//");
}
}
}
}
for(int i=0;i<n;i++){//H对è§çº¿å
çŽ
nnzJ++;
Ji[nnzJ]=i;
Jj[nnzJ]=i;
J[nnzJ]=V[FunctoNode[i]]*V[FunctoNode[i]]*B[FunctoNode[i]+lineN]+Q[i];
//if(nnzJ==985)
// printf("//");
}
for(int i=0;i<n-r;i++){//L对è§çº¿å
çŽ
nnzJ++;
Ji[nnzJ]=i+n;
Jj[nnzJ]=i+n;
J[nnzJ]=V[FunctoNode[i+n]]*V[FunctoNode[i+n]]*B[FunctoNode[i+n]+lineN]-Q[NodetoFuncP[FunctoNode[i+n]]];
//if(nnzJ==985)
// printf("//");
}
for(int i=0;i<n-r;i++){//NåM对è§çº¿å
çŽ
//if(type[FunctoNode[i]]==3){
nnzJ++;
Ji[nnzJ]=NodetoFuncP[FunctoNode[i+n]];
Jj[nnzJ]=i+n;
J[nnzJ]=-V[FunctoNode[i+n]]*V[FunctoNode[i+n]]*G[FunctoNode[i+n]+lineN]-P[NodetoFuncP[FunctoNode[i+n]]];
//if(nnzJ==985)
// printf("//");
nnzJ++;
Ji[nnzJ]=i+n;
Jj[nnzJ]=NodetoFuncP[FunctoNode[i+n]];
J[nnzJ]=V[FunctoNode[i+n]]*V[FunctoNode[i+n]]*G[FunctoNode[i+n]+lineN]-P[NodetoFuncP[FunctoNode[i+n]]];
//if(nnzJ==985)
// printf("//");
}
//for(int i=0;i<n+1+lineN;i++)
// printf("%d %f %f\n",i,G[i],B[i]);
//for(int i=0;i<nnzJ;i++)
// printf("%d %d %f\n",Ji[i],Jj[i],J[i]);
return nnzJ+1;
}
void sort(int *col_idx, double *a, int start, int end)
{
int i, j, it;
double dt;
for (i=end-1; i>start; i--)
for(j=start; j<i; j++)
if (col_idx[j] > col_idx[j+1]){
if (a){
dt=a[j];
a[j]=a[j+1];
a[j+1]=dt;
}
it=col_idx[j];
col_idx[j]=col_idx[j+1];
col_idx[j+1]=it;
}
}
void coo2csr(int n, int nz, double *a, int *i_idx, int *j_idx,
double *csr_a, int *col_idx, int *row_start)
{
int i, l;
for (i=0; i<=n; i++) row_start[i] = 0;
/* determine row lengths */
for (i=0; i<nz; i++) row_start[i_idx[i]+1]++;
for (i=0; i<n; i++) row_start[i+1] += row_start[i];
/* go through the structure once more. Fill in output matrix. */
for (l=0; l<nz; l++){
i = row_start[i_idx[l]];
csr_a[i] = a[l];
col_idx[i] = j_idx[l];
row_start[i_idx[l]]++;
}
/* shift back row_start */
for (i=n; i>0; i--) row_start[i] = row_start[i-1];
row_start[0] = 0;
for (i=0; i<n; i++){
sort (col_idx, csr_a, row_start[i], row_start[i+1]);
}
}
int main()
{
// cudaDeviceReset();
// getchar();
const int N=13659;
int n=N-1;
double tLU=0,tanaly=0,tpf=0,tsolve=0,tformY=0,tformJ=0,tchange=0,ttotal=0;
double t;
int iteration=100;
for(int ite=0;ite<iteration;ite++){
cudaDeviceReset ( );
//struct busstation
//{
// double V,ang,Pg,Qg,Pl,Ql;
// int type;
//}bus[N];
// int k;
// double R[N*N],X[N*N],C[N*N]={0},tr[N*N],shift[N*N];
double *R = (double*)malloc(5*N*sizeof(double));
double *X = (double*)malloc(5*N*sizeof(double));
double *C = (double*)malloc(5*N*sizeof(double));
double *tr = (double*)malloc(5*N*sizeof(double));
double *shift = (double*)malloc(5*N*sizeof(double));
int *from = (int*)malloc(5*N*sizeof(int));
int *to = (int*)malloc(5*N*sizeof(int));
double *V = (double*)malloc(N*sizeof(double));
double *ang = (double*)malloc(N*sizeof(double));
double *Pg = (double*)malloc(N*sizeof(double));
double *Qg = (double*)malloc(N*sizeof(double));
double *Pl = (double*)malloc(N*sizeof(double));
double *Ql = (double*)malloc(N*sizeof(double));
double *GG = (double*)malloc(N*sizeof(double));
double *BB = (double*)malloc(N*sizeof(double));
int *type = (int*)malloc(N*sizeof(int));
//double V[N],ang[N],Pg[N],Qg[N],Pl[N],Ql[N],GG[N],BB[N];
//int type[N];
long int *node = (long int*)malloc(N*sizeof(long int));
// int from[N*N],to[N*N];
//double inix[2*N];
int *FunctoNode = (int*)malloc(2*N*sizeof(int));
int *NodetoFuncP = (int*)malloc(N*sizeof(int));
int *NodetoFuncQ = (int*)malloc(N*sizeof(int));
//int FunctoNode[2*N];//nodeAng[i]衚瀺åŸ
æ±angäžç¬¬i䞪çå®é
èç¹çŒå·
//int NodetoFuncP[N],NodetoFuncQ[N];
//double cstV,cstth;
// for(long int i=0;i<N*N;i++){
// R[i]=1.0e308;
// X[i]=1.0e308;
// tr[i]=1;
//}
FILE *fp;
if((fp=fopen("net_13659ill_lamda1.002.txt","rt+"))==NULL){
printf("Cannot open file strike any key exit!");
getch();
exit(1);
}
int nPV=0,nPQ=0;
for (int i=0;i<N;i++){
//fscanf(fp,"%d ",&node);
fscanf(fp,"%d %lf %lf %lf %lf %lf %lf %lf %lf %d\n",&node[i],&V[i],&ang[i],&Pg[i],&Qg[i],&Pl[i],&Ql[i],&GG[i],&BB[i],&type[i]);
ang[i]=ang[i]*M_PI/180;
if(type[i]==2){ //PVèç¹
//inix[nPQ+nPV]=ang[node-1];
ang[i]=0;
FunctoNode[nPQ+nPV]=i;
NodetoFuncP[i]=nPQ+nPV;
nPV++;
}
if(type[i]==3){ //PQèç¹
//inix[nPQ+N-1]=V[node-1];
ang[i]=0;
V[i]=1;
FunctoNode[nPQ+N-1]=i;
//inix[nPQ+nPV]=ang[node-1];
FunctoNode[nPQ+nPV]=i;
NodetoFuncP[i]=nPQ+nPV;
NodetoFuncQ[i]=nPQ+N-1;
nPQ++;
}
//if(type[node-1]==1){ //åèèç¹
// cstV=V[node-1];
// cstth=ang[node-1];
//}
}
//for(int i=0;i<N;i++)
// printf("%f ",ang[i]);
int nfunc=2*(N-1)-nPV;
//printf("%d ",nPV);
int lineN=0;
long int fromNode,toNode;
while (!feof(fp)){
//fscanf(fp,"%d %d ",&from,&to);
fscanf(fp,"%d %d %lf %lf %lf %lf %lf\n",&fromNode,&toNode,&R[lineN],&X[lineN],&C[lineN],&tr[lineN],&shift[lineN]);
for(int i=0;i<N;i++){
if (node[i]==fromNode) from[lineN]=i;
if (node[i]==toNode) to[lineN]=i;
}
lineN++;
//R[(to-1)*N+from-1]=R[(from-1)*N+to-1];
//X[(to-1)*N+from-1]=X[(from-1)*N+to-1];
//C[(to-1)*N+from-1]=C[(from-1)*N+to-1];
//trsign[(from-1)*N+to-1]=1;//䞺æ è®°ç»ç»æ¹å
//tr[(to-1)*N+from-1]=tr[(from-1)*N+to-1];
// fscanf(fp,"%d",&from);
}
fclose(fp);
double *dev_r,*dev_x,*dev_c,*dev_tr,*dev_b1,*dev_g1,*dev_G,*dev_B;
int *dev_lineN,*dev_from,*dev_to;
//double* G = (double *)malloc( N*N*sizeof(double));
//double* B = (double *)malloc( N*N*sizeof(double));
/*double G[N*N]={0},B[N*N]={0};*/
//for(int i=0;i<N*N;i++) G[i]=0;
/* clock_t start=clock()*/
//cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// cudaEventRecord(start, 0);
cudaMalloc((void**)&dev_r,lineN * sizeof(double));
cudaMemcpy(dev_r,R,lineN * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_x,lineN* sizeof(double));
cudaMemcpy(dev_x,X,lineN* sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_c,lineN * sizeof(double));
cudaMemcpy(dev_c,C,lineN* sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_tr,lineN * sizeof(double));
cudaMemcpy(dev_tr,tr,lineN* sizeof(double), cudaMemcpyHostToDevice);
//cudaMalloc((void**)&dev_trsign,N*N * sizeof(double));
// cudaMemcpy(dev_trsign,trsign,N*N * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_b1,N * sizeof(double));
cudaMemcpy(dev_b1,BB,N * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_g1,N * sizeof(double));
cudaMemcpy(dev_g1,GG,N * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_G,(lineN+N) * sizeof(double));
cudaMalloc((void**)&dev_B,(lineN+N) * sizeof(double));
cudaMalloc((void**)&dev_lineN,sizeof(int));
cudaMemcpy(dev_lineN,&lineN,sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_from,lineN*sizeof(int));
cudaMemcpy(dev_from,from,lineN*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_to,lineN*sizeof(int));
cudaMemcpy(dev_to,to,lineN*sizeof(int), cudaMemcpyHostToDevice);
//cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// double elapsedTime;
// cudaEventElapsedTime(&elapsedTime, start, stop);
//printf("time = %f ",elapsedTime);
/* clock_t stop=clock()*/;
//clock_t stop=clock();
//double time=(double) (stop-start);
//printf("time = %f ",time);
//double G[N*N]={0},B[N*N]={0};
//cudaMemcpy(G, dev_G,(lineN+N)*sizeof(double), cudaMemcpyDeviceToHost);
//cudaMemcpy(B, dev_B,(lineN+N)*sizeof(double), cudaMemcpyDeviceToHost);
////æ±åŸå¯Œçº³ç©éµ
//for(int i=0;i<(lineN+N);i++){
// if(i<lineN)
// printf("%d %d %f\n",from[i],to[i],G[i]);
// else
// printf("%d %d %f\n",i-lineN,i-lineN,G[i]);
//}
//printf("%f ",G[36*N+36]);
//if((fp=fopen("csrLU13659.txt","rt+"))==NULL){
// printf("Cannot open file strike any key exit!");
// getch();
// exit(1);
// }
//int nfunc,nnz;
//fscanf(fp,"%d %d",&nfunc,&nnz);
//double* val = (double *)malloc(nnz*sizeof(double));
//int* colind = (int *)malloc(nnz*sizeof(int));
//int* rowptr = (int *)malloc((nfunc+1)*sizeof(int));
//for(int i=0;i<nnz;i++)
// fscanf(fp,"%lf",&val[i]);
//for(int i=0;i<nnz;i++)
// fscanf(fp,"%d",&colind[i]);
//for(int i=0;i<nfunc+1;i++)
// fscanf(fp,"%d",&rowptr[i]);
//fclose(fp);
////for (int i=0;i<nnzL;i++)
//// printf("%f\n",valL[i]);
//double *d_val;
//int *d_colind,*d_rowptr;
//cudaMalloc((void**)&d_val, nnz*sizeof(double));
//cudaMemcpy(d_val, val, nnz*sizeof(double), cudaMemcpyHostToDevice);
//cudaMalloc((void**)&d_colind, nnz*sizeof(int));
//cudaMemcpy(d_colind, colind, nnz*sizeof(int), cudaMemcpyHostToDevice);
//cudaMalloc((void**)&d_rowptr, (nfunc+1)*sizeof(int));
//cudaMemcpy(d_rowptr, rowptr, (nfunc+1)*sizeof(int), cudaMemcpyHostToDevice);
double *dev_G2,*dev_B2,*dev_Pg,*dev_Qg,*dev_Pl,*dev_Ql,*dev_V,*dev_angle;
int *dev_FunctoNode;
double *dev_fx1,*dev_fx2;
double *dev_pf,*dev_cntI;
int *dev_NodetoFuncP,*dev_NodetoFuncQ,*dev_type;
//cudaMalloc((void**)&dev_G2, N*N*sizeof(double));
//cudaMemcpy(dev_G2, G,N*N*sizeof(double), cudaMemcpyHostToDevice);
//cudaMalloc((void**)&dev_B2, N*N*sizeof(double));
//cudaMemcpy(dev_B2, B,N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_Pg, N*sizeof(double));
cudaMemcpy(dev_Pg, Pg,N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_Qg, N*sizeof(double));
cudaMemcpy(dev_Qg, Qg,N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_Pl, N*sizeof(double));
cudaMemcpy(dev_Pl, Pl,N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_Ql, N*sizeof(double));
cudaMemcpy(dev_Ql, Ql,N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_V, N*sizeof(double));
//cudaMemcpy(dev_V, V,N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_FunctoNode, 2*N*sizeof(int));
cudaMemcpy(dev_FunctoNode, FunctoNode,2*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_angle, N*sizeof(double));
cudaMemcpy(dev_angle, ang,N*sizeof(double), cudaMemcpyHostToDevice);
//cudaMalloc((void**)&dev_nodeAng, N*sizeof(int));
//cudaMemcpy(dev_nodeAng, nodeAng,N*sizeof(int), cudaMemcpyHostToDevice);
//cudaMalloc((void**)&dev_pfplus,nfunc * sizeof(double));
//cudaMalloc((void**)&dev_pfminus,nfunc * sizeof(double));
cudaMalloc((void**)&dev_fx1,nfunc * sizeof(double));
cudaMalloc((void**)&dev_fx2,nfunc * sizeof(double));
cudaMalloc((void**)&dev_pf,nfunc * sizeof(double));
cudaMalloc((void**)&dev_cntI,2*n * sizeof(double));
cudaMalloc((void**)&dev_NodetoFuncP,N * sizeof(int));
cudaMemcpy(dev_NodetoFuncP,NodetoFuncP,N*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_NodetoFuncQ,N * sizeof(int));
cudaMemcpy(dev_NodetoFuncQ,NodetoFuncQ,N*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_type,N * sizeof(int));
cudaMemcpy(dev_type,type,N*sizeof(int), cudaMemcpyHostToDevice);
double *dev_delt;
cudaMalloc((void**)&dev_delt, sizeof(double));
double *fxzeros = (double*)malloc(2*N*sizeof(double));
for(int i=0;i<2*N;i++){
fxzeros[i]=0;
}
//dim3 threadsPerBlock(256);
//dim3 numBlocks(N / threadsPerBlock.x, N / threadsPerBlock.y);
int threads=256;
int blocksformY=(lineN+N)/threads+1;
//cudaSetDeviceFlags(cudaDeviceBlockingSync);
//cudaThreadSynchronize();
t=0;
double tmax=50;
//double delt;
//cudaEvent_t start,stop;
double *deltat = (double*)malloc(sizeof(double));
*deltat=0.01;
cudaMemcpy(dev_delt, deltat,sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_V, V,N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_angle, ang,N*sizeof(double), cudaMemcpyHostToDevice);
LARGE_INTEGER t1,t2,tc;
QueryPerformanceFrequency(&tc);
QueryPerformanceCounter(&t1);
LARGE_INTEGER ts,te;
formYKernel<<<blocksformY,threads>>>(dev_G,dev_B,dev_lineN,dev_from,dev_to,dev_r,dev_x,dev_c,dev_tr,dev_g1,dev_b1);
cudaThreadSynchronize();
QueryPerformanceCounter(&t2);
tformY+=(t2.QuadPart - t1.QuadPart)*1000.0/tc.QuadPart;
double *G=(double*)malloc((lineN+N)*sizeof(double));
cudaMemcpy(G, dev_G,(lineN+N)*sizeof(double), cudaMemcpyDeviceToHost);
double *B=(double*)malloc((lineN+N)*sizeof(double));
cudaMemcpy(B, dev_B,(lineN+N)*sizeof(double), cudaMemcpyDeviceToHost);
lineN=mergeY(from,to,G,B,lineN,N);
cudaMemcpy(dev_G, G,(lineN+N)*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_B, B,(lineN+N)*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_lineN,&lineN,sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_from,from,lineN*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_to,to,lineN*sizeof(int), cudaMemcpyHostToDevice);
//for(int i=0;i<lineN+N;i++)
// if(i<lineN)
// printf("%d %d %f %f\n",from[i],to[i],G[i],B[i]);
// else
// printf("%d %d %f %f\n",i-lineN,i-lineN,G[i],B[i]);
double *J=(double*)malloc((N*N)*sizeof(double));
int *Ji=(int*)malloc((N*N)*sizeof(int));
int *Jj=(int*)malloc((N*N)*sizeof(int));
//for(int i=0;i<2*(N-1);i++)
// printf("%f ",cntI_all[i]);
// printf("formY Time:%f ms\n",(t2.QuadPart - t1.QuadPart)*1000.0/tc.QuadPart);
free(R);
free(X);
free(C);
free(tr);
free(shift);
//// part X://
//if((fp=fopen("csrJ13659.txt","rt+"))==NULL){
// printf("Cannot open file strike any key exit!");
// getch();
// exit(1);
// }
//int nnz;
//fscanf(fp,"%d %d",&nfunc,&nnz);
//for(int i=0;i<nnzJ;i++)
// printf("%f ",val[i]);
//printf("\n");
//for(int i=0;i<nnzJ;i++)
// printf("%d ",colind[i]);
//printf("\n");
//for(int i=0;i<nfunc+1;i++)
// printf("%d ",rowptr[i]);
//
double *angle0 = (double*)malloc(2*sizeof(double));
double *anglelast = (double*)malloc(sizeof(double));
*anglelast=0;
//cublasHandle_t cublasHandle = NULL;
//cublasCreate(&cublasHandle);
const double alpha=1.0,beta=0.0;
int blocksode=nfunc/threads+1;
//QueryPerformanceCounter(&t22);
//tanaly+=(t22.QuadPart - t11.QuadPart)*1000.0/tc.QuadPart;
//
double *d_z;
cudaMalloc((void**)&d_z, nfunc * sizeof(double));
double *test=(double*)malloc(nfunc*sizeof(double));
double *pf=(double*)malloc(nfunc*sizeof(double));
double *permuPf=(double*)malloc(nfunc*sizeof(double));
//LARGE_INTEGER t3,t4,tstart,tend;
//QueryPerformanceFrequency(&tc);
// QueryPerformanceCounter(&t3);
int blocksPQ=(2*n)/threads+1;
int blockscntI=(N+lineN)/threads+1;
double *d_Ptot,*d_Qtot;
cudaMalloc((void**)&d_Ptot, n * sizeof(double));
cudaMalloc((void**)&d_Qtot, n * sizeof(double));
double *Ptot=(double*)malloc(N*sizeof(double));
double *Qtot=(double*)malloc(N*sizeof(double));
int nnzJ;
_handle_t solver = NULL;
_uint_t i;
_double_t *cfg;
const _double_t *stat;
if (__FAIL(NicsLU_Initialize(&solver, &cfg, &stat)))
{
printf("Failed to initialize\n");
system("pause");
return -1;
}
int* rowptrJ = (int *)malloc((nfunc+1)*sizeof(int));
unsigned int* rowptr = (unsigned int *)malloc((nfunc+1)*sizeof(unsigned int));
double *d_valL;
int *d_colindL,*d_rowptrL;
cudaError_t err;
err=cudaMalloc((void**)&d_rowptrL, (nfunc+1)*sizeof(int));
double *d_valU;
int *d_colindU,*d_rowptrU;
err=cudaMalloc((void**)&d_rowptrU, (nfunc+1)*sizeof(int));
//float time_elapsed=0;
//cudaEvent_t start,stop;
//cudaEventCreate(&start); //å建Event
// cudaEventCreate(&stop);
double *fx1=(double *)malloc(nfunc*sizeof(double));
double *fx2=(double *)malloc(nfunc*sizeof(double));
QueryPerformanceCounter(&ts);
while (t<tmax){
cudaMemcpy(dev_cntI, fxzeros,2*n*sizeof(double), cudaMemcpyHostToDevice);
/*QueryPerformanceCounter(&tstart);*/
//time_elapsed=0;
//cudaEventRecord( start,0);
calc_cntI<<<blockscntI,threads>>>(dev_cntI,dev_lineN,dev_from,dev_to,dev_G,dev_B,dev_V,dev_angle,dev_NodetoFuncP,dev_type);
cudaThreadSynchronize();
//cudaMemcpy(test, dev_cntI,nfunc*sizeof(double), cudaMemcpyDeviceToHost);
//for (int i=0;i<nfunc;i++)
// printf("%f ",test[i]);
calc_PQ<<<blocksPQ,threads>>>(d_Ptot,d_Qtot,dev_V,dev_cntI,dev_FunctoNode);
cudaThreadSynchronize();
calc_pf<<<blocksode,threads>>>(dev_pf,d_Ptot,d_Qtot,dev_FunctoNode,dev_NodetoFuncP,dev_Pg,dev_Qg,dev_Pl,dev_Ql);
// cudaThreadSynchronize();
cudaMemcpy(pf, dev_pf,nfunc*sizeof(double), cudaMemcpyDeviceToHost);
//for (int i=0;i<nfunc;i++)
// printf("%f\n",pf[i]);
//cudaEventRecord( stop,0); //è®°åœåœåæ¶éŽ
//cudaEventSynchronize(start); //Waits for an event to complete.
//cudaEventSynchronize(stop); //Waits for an event to complete.Recordä¹åçä»»å¡
//cudaEventElapsedTime(&time_elapsed,start,stop); //è®¡ç®æ¶éŽå·®
//tpf+=time_elapsed;
cudaMemcpy(Ptot, d_Ptot,n*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(Qtot, d_Qtot,n*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(V, dev_V,N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(ang, dev_angle,N*sizeof(double), cudaMemcpyDeviceToHost);
QueryPerformanceCounter(&t1);
nnzJ=formJ(Ji,Jj,J,from,to,G,B,V,ang,Ptot,Qtot,N-1,nPV,lineN,NodetoFuncP,NodetoFuncQ,FunctoNode,type);
double* val = (double *)malloc(nnzJ*sizeof(double));
int* colindJ = (int *)malloc(nnzJ*sizeof(int));
coo2csr(nfunc,nnzJ,J,Ji,Jj,val,colindJ,rowptrJ);
QueryPerformanceCounter(&t2);
tformJ+=(t2.QuadPart - t1.QuadPart)*1000.0/tc.QuadPart;
unsigned int* colind = (unsigned int *)malloc(nnzJ*sizeof(unsigned int));
for(int i=0;i<nnzJ;i++)
colind[i]=(unsigned int)colindJ[i];
for(int i=0;i<nfunc+1;i++)
rowptr[i]=(unsigned int)rowptrJ[i];
//for(int i=0;i<nnzJ;i++)
// printf("%f ",val[i]);
//printf("\n");
//for(int i=0;i<nnzJ;i++)
// printf("%d ",colind[i]);
//printf("\n");
//for(int i=0;i<nfunc+1;i++)
// printf("%d ",rowptr[i]);
QueryPerformanceFrequency(&tc);
QueryPerformanceCounter(&t1);
// printf("Version %.0lf\nLicense to %.0lf\n", stat[31], stat[29]);
int cao=NicsLU_Analyze(solver, nfunc, val, colind, rowptr, MATRIX_ROW_REAL, NULL, NULL);
int cao2=NicsLU_Factorize(solver, val, 1);
//for(int i=0;i<20;i++)
// printf("%f ",stat[i]);
QueryPerformanceCounter(&t2);
tLU+=(t2.QuadPart - t1.QuadPart)*1000.0/tc.QuadPart;
QueryPerformanceCounter(&t1);
NicsLU_Solve(solver, pf, fx1);
QueryPerformanceCounter(&t2);
tsolve+=(t2.QuadPart - t1.QuadPart)*1000.0/tc.QuadPart;
cudaMemcpy(dev_fx1, fx1,nfunc*sizeof(double), cudaMemcpyHostToDevice);
//cudaMemcpy(test, dev_fx1,nfunc*sizeof(double), cudaMemcpyDeviceToHost);
//for (int i=0;i<nfunc;i++)
// printf("%f\n",test[i]);
//cudaEventRecord( start,0);
changeVAng1<<<blocksode,threads>>>(dev_V,dev_angle,dev_FunctoNode,dev_delt,dev_fx1);
//cudaEventRecord( stop,0); //è®°åœåœåæ¶éŽ
//cudaEventSynchronize(start); //Waits for an event to complete.
//cudaEventSynchronize(stop); //Waits for an event to complete.Recordä¹åçä»»å¡
//cudaEventElapsedTime(&time_elapsed,start,stop); //è®¡ç®æ¶éŽå·®
//tchange+=time_elapsed;
cudaMemcpy(dev_cntI, fxzeros,nfunc*sizeof(double), cudaMemcpyHostToDevice);
calc_cntI<<<blockscntI,threads>>>(dev_cntI,dev_lineN,dev_from,dev_to,dev_G,dev_B,dev_V,dev_angle,dev_NodetoFuncP,dev_type);
cudaThreadSynchronize();
//cudaMemcpy(test, dev_cntI,nfunc*sizeof(double), cudaMemcpyDeviceToHost);
//for (int i=0;i<nfunc;i++)
// printf("cntI[%d]=%f\n",i,test[i]);
calc_PQ<<<blocksPQ,threads>>>(d_Ptot,d_Qtot,dev_V,dev_cntI,dev_FunctoNode);
cudaThreadSynchronize();
calc_pf<<<blocksode,threads>>>(dev_pf,d_Ptot,d_Qtot,dev_FunctoNode,dev_NodetoFuncP,dev_Pg,dev_Qg,dev_Pl,dev_Ql);
// cudaThreadSynchronize();
cudaMemcpy(pf, dev_pf,nfunc*sizeof(double), cudaMemcpyDeviceToHost);
QueryPerformanceCounter(&t1);
NicsLU_Solve(solver, pf, fx2);
QueryPerformanceCounter(&t2);
tsolve+=(t2.QuadPart - t1.QuadPart)*1000.0/tc.QuadPart;
cudaMemcpy(dev_fx2, fx2,nfunc*sizeof(double), cudaMemcpyHostToDevice);
changeVAng2<<<blocksode,threads>>>(dev_V,dev_angle,dev_FunctoNode,dev_delt,dev_fx1,dev_fx2);
cudaMemcpy(angle0, dev_angle,2*sizeof(double), cudaMemcpyDeviceToHost);
t=t+*deltat;
printf("t= %f angle[1]=%f\n",t,angle0[FunctoNode[0]]);
if(abs(angle0[FunctoNode[0]]-*anglelast)<0.0001) break;
*anglelast=angle0[FunctoNode[0]];
}
QueryPerformanceCounter(&te);
ttotal+=(te.QuadPart - ts.QuadPart)*1000.0/tc.QuadPart;
//QueryPerformanceCounter(&t4);
// printf("solve total Time:%f ms\n",(t4.QuadPart - t3.QuadPart)*1000.0/tc.QuadPart);
//double time=(double) (stop-start);
//printf("time = %f ",time);
free(fx1);
free(fx2);
free(from);
free(to);
free(V);
free(ang);
free(Pg);
free(Qg);
free(Pl);
free(Ql);
free(GG);
free(BB);
free(type);
free(node);
free(NodetoFuncP);
free(NodetoFuncQ);
free(FunctoNode);
free(J);
free(Ji);
free(Jj);
NicsLU_Free(solver);
cudaFree(dev_fx1);
cudaFree(dev_fx2);
cudaFree(dev_r);
cudaFree(dev_x);
cudaFree(dev_c);
cudaFree(dev_tr);
cudaFree(dev_b1);
cudaFree(dev_g1);
//cudaFree(dev_k);
//cudaFree(dev_delt);
//cudaFree(fxplus);
//cudaFree(fxminus);
cudaFree(d_valL);
cudaFree(d_rowptrL);
cudaFree(d_colindL);
cudaFree(d_valU);
cudaFree(d_rowptrU);
cudaFree(d_colindU);
//cudaFree(dev_G2);
//cudaFree(dev_B2);
cudaFree(dev_Pg);
cudaFree(dev_Qg);
cudaFree(dev_Pl);
cudaFree(dev_Ql);
//cudaFree(dev_pfplus);
//cudaFree(dev_pfminus);
cudaFree(dev_V);
cudaFree(dev_angle);
cudaFree(dev_FunctoNode);
cudaFree(dev_type);
cudaFree(dev_NodetoFuncP);
cudaFree(dev_NodetoFuncQ);
cudaFree(dev_type);
cudaFree(dev_cntI);
cudaFree(dev_lineN);
cudaFree(dev_from);
cudaFree(dev_to);
cudaFree(dev_G);
cudaFree(dev_B);
//cudaEventDestroy(start); //destory the event
//cudaEventDestroy(stop);
}
printf("iteration times: %f\n",t);
printf("formY Time:%f ms\n",tformY/iteration);
printf("formJ Time:%f ms\n",tformJ/iteration);
printf("JçLUåè§£ Time:%f ms\n",tLU/iteration);
printf("solve Time:%f ms\n",tsolve/iteration);
printf("total time:%f ms\n",ttotal/iteration);
//free(val);
//free(rowptr);
//free(colind);
//free(valU);
//free(rowptrU);
//free(colindU);
//free(inimax);
//free(delt);
//getchar();
//free(G);
//free(B);
return 0;
} |
b634db8ab9254a1a461c7b7fd5869396442f508b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include "helper/inc/helper_cuda.h"
#include "helper/inc/helper_functions.h"
#define TILE_W 16
#define TILE_H 16
#define R 2 // filter radius
#define D (R*2+1) // filter diameter
#define S (D*D) // filter size
#define BLOCK_W (TILE_W+(2*R))
#define BLOCK_H (TILE_H+(2*R))
#define MASK_COLS 3
#define MASK_ROWS 3
// Image to perform convolution on
const char *imageFilename = "data/lena_bw.pgm";
// Loaded mask in constant memory
__constant__ float mask[MASK_ROWS*MASK_COLS];
// Global Kernel
__global__ void convolution(float* dData, float* dResult, unsigned int width, unsigned int height){
__shared__ float smem[BLOCK_W*BLOCK_H];
int x = blockIdx.x * TILE_W + threadIdx.x - R;
int y = blockIdx.y * TILE_H + threadIdx.y - R;
// Image edges
x = max(0, x);
x = min(x, width-1);
y = max(y, 0);
y = min(y, height-1);
unsigned int index = y*width + x;
unsigned int bindex = threadIdx.y * blockDim.y + threadIdx.x;
smem[bindex] = dData[index];
__syncthreads();
// if (((threadIdx.x >= R) && (threadIdx.x < BLOCK_W-R)) && ((threadIdx.y>=R) && (threadIdx.y<=BLOCK_H-R))){
// float sum = 0;
// for(int dy=-R;dy<=R;dy++){
// for(int dx=-R;dx<R;dx++){
// float i = smem[bindex+(dy*blockDim.x)+dx];
// sum +=i;
// }
// }
// dResult[index] = sum/S;
// }
// dResult[index] = dData[index];
if (((threadIdx.x >= R) && (threadIdx.x < BLOCK_W-R)) && ((threadIdx.y>=R) && (threadIdx.y<=BLOCK_H-R))){
float sum = 0;
// Iterate over mask rows
for(int i = 0; i<MASK_ROWS; i++){
//Iterate over mask cols
for(int j = 0; j<MASK_COLS; j++){
sum += smem[bindex+(i*blockDim.x)+j] * mask[i*3+j];
}
}
dResult[index] = sum;
}
}
int main(void){
// Set mask in constant memory
// Edge Filter
// float constant_mem_mask[MASK_ROWS*MASK_COLS]= {-1, 0, 1, -2, 0, 2, -1, 0, 1};
// Sharpening Filter
float constant_mem_mask[MASK_ROWS*MASK_COLS]= {-1, -1, -1, -1, 9, -1, -1, -1, -1};
// Averaging Filter
// float constant_mem_mask[MASK_ROWS*MASK_COLS]= {1/9, 1/9, 1/9, 1/9, 1/9, 1/9, 1/9, 1/9, 1/9};
//Get the device properties
int devID = findCudaDevice(0, 0);
hipGetDeviceProperties(0, 0);
// Image on host
float *hData = NULL;
unsigned int width, height;
char *imagePath = sdkFindFilePath(imageFilename, 0);
if (imagePath == NULL)
{
printf("Unable to source image file: %s\n", imageFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(imagePath, &hData, &width, &height);
unsigned int size = width * height * sizeof(float);
printf("Loaded '%s', %d x %d pixels\n", imageFilename, width, height);
// Allocate space for image on device
float *dData = NULL;
checkCudaErrors(hipMalloc((void **) &dData, size));
checkCudaErrors(hipMemcpy(dData, hData, size, hipMemcpyHostToDevice));
// Allocate memory for the resulting image on device
float *dResult = NULL;
checkCudaErrors(hipMalloc((void **) &dResult, size));
hipMemcpyToSymbol(mask, &constant_mem_mask, MASK_ROWS*MASK_COLS*sizeof(float));
// Timing using Cuda Events
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// Perform work on GPU...
// Set the grid and block dimensions
// Set max threads <1024
int threads = 32;
// Set enough blocks equal to the DIM of your array
int blocks = (width+threads-1)/threads;
dim3 dimGrid(100, 100);
dim3 dimBlock(BLOCK_W, BLOCK_H);
hipLaunchKernelGGL(( convolution), dim3(dimGrid), dim3(dimBlock), 0, 0, dData, dResult, height, width);
hipEventRecord(stop,0);
hipEventSynchronize( stop );
float elapseTime;
hipEventElapsedTime(&elapseTime, start, stop);
float throughput = (width*height/((elapseTime*1000)*(10^9)));
printf( "GPU Global Mem Throughput: %3.6f ms\n", throughput);
printf( "GPU Global Mem Time elpased: %3.6f ms\n", elapseTime );
hipEventDestroy( start );
hipEventDestroy( stop );
//Copy the resulting image back to the host
float *hResult = (float *)malloc(size);
checkCudaErrors(hipMemcpy(hResult, dResult, size, hipMemcpyDeviceToHost));
// Write result to file
char outputFilename[1024];
strcpy(outputFilename, imagePath);
strcpy(outputFilename + strlen(imagePath) - 4, "_GPU_out_SM.pgm");
sdkSavePGM(outputFilename, hResult, width, height);
printf("Wrote '%s'\n", outputFilename);
free(hResult);
hipDeviceReset();
} | b634db8ab9254a1a461c7b7fd5869396442f508b.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
#include "helper/inc/helper_cuda.h"
#include "helper/inc/helper_functions.h"
#define TILE_W 16
#define TILE_H 16
#define R 2 // filter radius
#define D (R*2+1) // filter diameter
#define S (D*D) // filter size
#define BLOCK_W (TILE_W+(2*R))
#define BLOCK_H (TILE_H+(2*R))
#define MASK_COLS 3
#define MASK_ROWS 3
// Image to perform convolution on
const char *imageFilename = "data/lena_bw.pgm";
// Loaded mask in constant memory
__constant__ float mask[MASK_ROWS*MASK_COLS];
// Global Kernel
__global__ void convolution(float* dData, float* dResult, unsigned int width, unsigned int height){
__shared__ float smem[BLOCK_W*BLOCK_H];
int x = blockIdx.x * TILE_W + threadIdx.x - R;
int y = blockIdx.y * TILE_H + threadIdx.y - R;
// Image edges
x = max(0, x);
x = min(x, width-1);
y = max(y, 0);
y = min(y, height-1);
unsigned int index = y*width + x;
unsigned int bindex = threadIdx.y * blockDim.y + threadIdx.x;
smem[bindex] = dData[index];
__syncthreads();
// if (((threadIdx.x >= R) && (threadIdx.x < BLOCK_W-R)) && ((threadIdx.y>=R) && (threadIdx.y<=BLOCK_H-R))){
// float sum = 0;
// for(int dy=-R;dy<=R;dy++){
// for(int dx=-R;dx<R;dx++){
// float i = smem[bindex+(dy*blockDim.x)+dx];
// sum +=i;
// }
// }
// dResult[index] = sum/S;
// }
// dResult[index] = dData[index];
if (((threadIdx.x >= R) && (threadIdx.x < BLOCK_W-R)) && ((threadIdx.y>=R) && (threadIdx.y<=BLOCK_H-R))){
float sum = 0;
// Iterate over mask rows
for(int i = 0; i<MASK_ROWS; i++){
//Iterate over mask cols
for(int j = 0; j<MASK_COLS; j++){
sum += smem[bindex+(i*blockDim.x)+j] * mask[i*3+j];
}
}
dResult[index] = sum;
}
}
int main(void){
// Set mask in constant memory
// Edge Filter
// float constant_mem_mask[MASK_ROWS*MASK_COLS]= {-1, 0, 1, -2, 0, 2, -1, 0, 1};
// Sharpening Filter
float constant_mem_mask[MASK_ROWS*MASK_COLS]= {-1, -1, -1, -1, 9, -1, -1, -1, -1};
// Averaging Filter
// float constant_mem_mask[MASK_ROWS*MASK_COLS]= {1/9, 1/9, 1/9, 1/9, 1/9, 1/9, 1/9, 1/9, 1/9};
//Get the device properties
int devID = findCudaDevice(0, 0);
cudaGetDeviceProperties(0, 0);
// Image on host
float *hData = NULL;
unsigned int width, height;
char *imagePath = sdkFindFilePath(imageFilename, 0);
if (imagePath == NULL)
{
printf("Unable to source image file: %s\n", imageFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(imagePath, &hData, &width, &height);
unsigned int size = width * height * sizeof(float);
printf("Loaded '%s', %d x %d pixels\n", imageFilename, width, height);
// Allocate space for image on device
float *dData = NULL;
checkCudaErrors(cudaMalloc((void **) &dData, size));
checkCudaErrors(cudaMemcpy(dData, hData, size, cudaMemcpyHostToDevice));
// Allocate memory for the resulting image on device
float *dResult = NULL;
checkCudaErrors(cudaMalloc((void **) &dResult, size));
cudaMemcpyToSymbol(mask, &constant_mem_mask, MASK_ROWS*MASK_COLS*sizeof(float));
// Timing using Cuda Events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Perform work on GPU...
// Set the grid and block dimensions
// Set max threads <1024
int threads = 32;
// Set enough blocks equal to the DIM of your array
int blocks = (width+threads-1)/threads;
dim3 dimGrid(100, 100);
dim3 dimBlock(BLOCK_W, BLOCK_H);
convolution<<<dimGrid, dimBlock>>>(dData, dResult, height, width);
cudaEventRecord(stop,0);
cudaEventSynchronize( stop );
float elapseTime;
cudaEventElapsedTime(&elapseTime, start, stop);
float throughput = (width*height/((elapseTime*1000)*(10^9)));
printf( "GPU Global Mem Throughput: %3.6f ms\n", throughput);
printf( "GPU Global Mem Time elpased: %3.6f ms\n", elapseTime );
cudaEventDestroy( start );
cudaEventDestroy( stop );
//Copy the resulting image back to the host
float *hResult = (float *)malloc(size);
checkCudaErrors(cudaMemcpy(hResult, dResult, size, cudaMemcpyDeviceToHost));
// Write result to file
char outputFilename[1024];
strcpy(outputFilename, imagePath);
strcpy(outputFilename + strlen(imagePath) - 4, "_GPU_out_SM.pgm");
sdkSavePGM(outputFilename, hResult, width, height);
printf("Wrote '%s'\n", outputFilename);
free(hResult);
cudaDeviceReset();
} |
f0326be74f5ca42fb0321cd66d2a726e24233392.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//STL
#include <stdio.h>
__global__ void childKernel()
{
printf( "Hello %d", threadIdx.x );
}
__global__ void parentKernel()
{
hipLaunchKernelGGL(( childKernel), dim3(1), dim3(2) , 0, 0, );
hipDeviceSynchronize();
printf( "World!\n" );
}
int main( int argc, char *argv[] )
{
hipLaunchKernelGGL(( parentKernel), dim3(1), dim3(2) , 0, 0, );
hipDeviceSynchronize();
return 0;
}
| f0326be74f5ca42fb0321cd66d2a726e24233392.cu | //STL
#include <stdio.h>
__global__ void childKernel()
{
printf( "Hello %d", threadIdx.x );
}
__global__ void parentKernel()
{
childKernel<<< 1, 2 >>>();
cudaDeviceSynchronize();
printf( "World!\n" );
}
int main( int argc, char *argv[] )
{
parentKernel<<< 1, 2 >>>();
cudaDeviceSynchronize();
return 0;
}
|
fb61b3c9c6256d30b325396e74dc11d5a49b02eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void uplo_powx (const int sd, const int unit, const int bottom, const REAL* a, const int offset_a, const int ld_a, const REAL b, REAL* c, const int offset_c, const int ld_c) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < sd);
const bool check = valid &&
((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1);
if (check) {
c[offset_c + gid_0 + gid_1 * ld_c] = CAST(pow)(a[offset_a + gid_0 + gid_1 * ld_a], b);
}
} | fb61b3c9c6256d30b325396e74dc11d5a49b02eb.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void uplo_powx (const int sd, const int unit, const int bottom, const REAL* a, const int offset_a, const int ld_a, const REAL b, REAL* c, const int offset_c, const int ld_c) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < sd);
const bool check = valid &&
((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1);
if (check) {
c[offset_c + gid_0 + gid_1 * ld_c] = CAST(pow)(a[offset_a + gid_0 + gid_1 * ld_a], b);
}
} |
45fbc8830ac4dc9a6177987176511120904167c5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// See this header for all of the recursive handling of tuples of vectors
#include <tests/utilities/tuple_vectors.h>
// See this header for all of the handling of valids' vectors
#include <tests/utilities/valid_vectors.h>
#include <tests/utilities/cudf_test_fixtures.h>
#include <join/joining.h>
#include <join/join_compute_api.h>
#include <utilities/bit_util.cuh>
#include <cudf/cudf.h>
#include <rmm/rmm.h>
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include <iostream>
#include <vector>
#include <map>
#include <type_traits>
#include <memory>
#include <cstdlib>
// Selects the kind of join operation that is performed
enum struct join_op
{
INNER,
LEFT,
FULL
};
// Each element of the result will be an index into the left and right columns where
// left_columns[left_index] == right_columns[right_index]
using result_type = typename std::pair<int, int>;
// Define stream operator for a std::pair for conveinience of printing results.
// Needs to be in the std namespace to work with std::copy
namespace std{
template <typename first_t, typename second_t>
std::ostream& operator<<(std::ostream& os, std::pair<first_t, second_t> const & p)
{
os << p.first << ", " << p.second;
std::cout << "\n";
return os;
}
}
// A new instance of this class will be created for each *TEST(JoinTest, ...)
// Put all repeated setup and validation stuff here
template <class test_parameters>
struct JoinTest : public GdfTest
{
// The join type is passed via a member of the template argument class
const join_op op = test_parameters::op;
gdf_context ctxt = {
test_parameters::join_type == gdf_method::GDF_SORT,
test_parameters::join_type,
0
};
// multi_column_t is a tuple of vectors. The number of vectors in the tuple
// determines the number of columns to be joined, and the value_type of each
// vector determiens the data type of the column
using multi_column_t = typename test_parameters::multi_column_t;
multi_column_t left_columns;
multi_column_t right_columns;
// valids for multi_columns
std::vector<host_valid_pointer> left_valids;
std::vector<host_valid_pointer> right_valids;
// Type for a unique_ptr to a gdf_column with a custom deleter
// Custom deleter is defined at construction
using gdf_col_pointer =
typename std::unique_ptr<gdf_column, std::function<void(gdf_column*)>>;
// Containers for unique_ptrs to gdf_columns that will be used in the gdf_join
// functions. unique_ptrs are used to automate freeing device memory
std::vector<gdf_col_pointer> gdf_left_columns;
std::vector<gdf_col_pointer> gdf_right_columns;
// Containers for the raw pointers to the gdf_columns that will be used as
// input to the gdf_join functions
std::vector<gdf_column*> gdf_raw_left_columns;
std::vector<gdf_column*> gdf_raw_right_columns;
JoinTest()
{
// Use constant seed so the psuedo-random order is the same each time
// Each time the class is constructed a new constant seed is used
static size_t number_of_instantiations{0};
std::srand(number_of_instantiations++);
}
~JoinTest()
{
}
/* --------------------------------------------------------------------------*
* @brief Creates a unique_ptr that wraps a gdf_column structure
* intialized with a host vector
*
* @param host_vector vector containing data to be transfered to device side column
* @param host_valid vector containing valid masks associated with the supplied vector
* @param n_count null_count to be set for the generated column
*
* @returns A unique_ptr wrapping the new gdf_column
* --------------------------------------------------------------------------*/
template <typename col_type>
gdf_col_pointer create_gdf_column(std::vector<col_type> const & host_vector, gdf_valid_type* host_valid,
const gdf_size_type n_count)
{
// Deduce the type and set the gdf_dtype accordingly
gdf_dtype gdf_col_type;
if(std::is_same<col_type,int8_t>::value) gdf_col_type = GDF_INT8;
else if(std::is_same<col_type,uint8_t>::value) gdf_col_type = GDF_INT8;
else if(std::is_same<col_type,int16_t>::value) gdf_col_type = GDF_INT16;
else if(std::is_same<col_type,uint16_t>::value) gdf_col_type = GDF_INT16;
else if(std::is_same<col_type,int32_t>::value) gdf_col_type = GDF_INT32;
else if(std::is_same<col_type,uint32_t>::value) gdf_col_type = GDF_INT32;
else if(std::is_same<col_type,int64_t>::value) gdf_col_type = GDF_INT64;
else if(std::is_same<col_type,uint64_t>::value) gdf_col_type = GDF_INT64;
else if(std::is_same<col_type,float>::value) gdf_col_type = GDF_FLOAT32;
else if(std::is_same<col_type,double>::value) gdf_col_type = GDF_FLOAT64;
// Create a new instance of a gdf_column with a custom deleter that will
// free the associated device memory when it eventually goes out of scope
auto deleter = [](gdf_column* col) {
col->size = 0;
RMM_FREE(col->data, 0);
RMM_FREE(col->valid, 0);
};
gdf_col_pointer the_column{new gdf_column{}, deleter};
// Allocate device storage for gdf_column and copy contents from host_vector
EXPECT_EQ(RMM_ALLOC(&(the_column->data), host_vector.size() * sizeof(col_type), 0), RMM_SUCCESS);
EXPECT_EQ(hipMemcpy(the_column->data, host_vector.data(), host_vector.size() * sizeof(col_type), hipMemcpyHostToDevice), hipSuccess);
// Allocate device storage for gdf_column.valid
if (host_valid != nullptr) {
EXPECT_EQ(RMM_ALLOC((void**)&(the_column->valid), gdf_valid_allocation_size(host_vector.size()), 0), RMM_SUCCESS);
EXPECT_EQ(hipMemcpy(the_column->valid, host_valid, gdf_num_bitmask_elements(host_vector.size()), hipMemcpyHostToDevice), hipSuccess);
the_column->null_count = n_count;
} else {
the_column->valid = nullptr;
the_column->null_count = 0;
}
// Fill the gdf_column members
the_column->size = host_vector.size();
the_column->dtype = gdf_col_type;
gdf_dtype_extra_info extra_info{TIME_UNIT_NONE};
the_column->dtype_info = extra_info;
return the_column;
}
// Compile time recursion to convert each vector in a tuple of vectors into
// a gdf_column and append it to a vector of gdf_columns
template<std::size_t I = 0, typename... Tp>
inline typename std::enable_if<I == sizeof...(Tp), void>::type
convert_tuple_to_gdf_columns(std::vector<gdf_col_pointer> &gdf_columns,std::tuple<std::vector<Tp>...>& t, std::vector<host_valid_pointer>& valids, const gdf_size_type n_count)
{
//bottom of compile-time recursion
//purposely empty...
}
template<std::size_t I = 0, typename... Tp>
inline typename std::enable_if<I < sizeof...(Tp), void>::type
convert_tuple_to_gdf_columns(std::vector<gdf_col_pointer> &gdf_columns,std::tuple<std::vector<Tp>...>& t, std::vector<host_valid_pointer>& valids, const gdf_size_type n_count)
{
// Creates a gdf_column for the current vector and pushes it onto
// the vector of gdf_columns
if (valids.size() != 0) {
gdf_columns.push_back(create_gdf_column(std::get<I>(t), valids[I].get(), n_count));
} else {
gdf_columns.push_back(create_gdf_column(std::get<I>(t), nullptr, n_count));
}
//recurse to next vector in tuple
convert_tuple_to_gdf_columns<I + 1, Tp...>(gdf_columns, t, valids, n_count);
}
// Converts a tuple of host vectors into a vector of gdf_columns
std::vector<gdf_col_pointer>
initialize_gdf_columns(multi_column_t host_columns, std::vector<host_valid_pointer>& valids,
const gdf_size_type n_count)
{
std::vector<gdf_col_pointer> gdf_columns;
convert_tuple_to_gdf_columns(gdf_columns, host_columns, valids, n_count);
return gdf_columns;
}
/* --------------------------------------------------------------------------*
* @brief Initializes two sets of columns, left and right, with random
* values for the join operation.
*
* @param left_column_length The length of the left set of columns
* @param left_column_range The upper bound of random values for the left
* columns. Values are [0, left_column_range)
* @param right_column_length The length of the right set of columns
* @param right_column_range The upper bound of random values for the right
* columns. Values are [0, right_column_range)
* @param print Optionally print the left and right set of columns for debug
* -------------------------------------------------------------------------*/
void create_input( size_t left_column_length, size_t left_column_range,
size_t right_column_length, size_t right_column_range,
bool print = false, const gdf_size_type n_count = 0)
{
initialize_tuple(left_columns, left_column_length, left_column_range, static_cast<size_t>(ctxt.flag_sorted));
initialize_tuple(right_columns, right_column_length, right_column_range, static_cast<size_t>(ctxt.flag_sorted));
auto n_columns = std::tuple_size<multi_column_t>::value;
initialize_valids(left_valids, n_columns, left_column_length, 0);
initialize_valids(right_valids, n_columns, right_column_length, 0);
gdf_left_columns = initialize_gdf_columns(left_columns, left_valids, n_count);
gdf_right_columns = initialize_gdf_columns(right_columns, right_valids, n_count);
// Fill vector of raw pointers to gdf_columns
gdf_raw_left_columns.clear();
gdf_raw_right_columns.clear();
for(auto const& c : gdf_left_columns){
gdf_raw_left_columns.push_back(c.get());
}
for(auto const& c : gdf_right_columns){
gdf_raw_right_columns.push_back(c.get());
}
if(print)
{
std::cout << "Left column(s) created. Size: " << std::get<0>(left_columns).size() << std::endl;
print_tuples_and_valids(left_columns, left_valids);
std::cout << "Right column(s) created. Size: " << std::get<0>(right_columns).size() << std::endl;
print_tuples_and_valids(right_columns, right_valids);
}
}
/* --------------------------------------------------------------------------*
* @brief Creates two gdf_columns with size 1 data buffer allocations, but
* with a specified `size` attributed
*
* @param left_column_length The length of the left column
* @param right_column_length The length of the right column
* -------------------------------------------------------------------------*/
void create_dummy_input( gdf_size_type const left_column_length,
gdf_size_type const right_column_length)
{
using col_type = typename std::tuple_element<0, multi_column_t>::type::value_type;
// Only allocate a single element
std::vector<col_type> dummy_vector_left(1, static_cast<col_type>(0));
std::vector<col_type> dummy_vector_right(1, static_cast<col_type>(0));
gdf_left_columns.push_back(create_gdf_column<col_type>(dummy_vector_left, nullptr, 0));
gdf_right_columns.push_back(create_gdf_column<col_type>(dummy_vector_right, nullptr, 0));
// Fill vector of raw pointers to gdf_columns
for (auto const& c : gdf_left_columns) {
c->size = left_column_length;
gdf_raw_left_columns.push_back(c.get());
}
for (auto const& c : gdf_right_columns) {
c->size = right_column_length;
gdf_raw_right_columns.push_back(c.get());
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Computes a reference solution for joining the left and right sets of columns
*
* @param print Option to print the solution for debug
* @param sort Option to sort the solution. This is necessary for comparison against the gdf solution
*
* @returns A vector of 'result_type' where result_type is a structure with a left_index, right_index
* where left_columns[left_index] == right_columns[right_index]
*/
/* ----------------------------------------------------------------------------*/
std::vector<result_type> compute_reference_solution(bool print = false, bool sort = true)
{
// Use the type of the first vector as the key_type
using key_type = typename std::tuple_element<0, multi_column_t>::type::value_type;
using value_type = size_t;
// Multimap used to compute the reference solution
std::multimap<key_type, value_type> the_map;
// Build hash table that maps the first right columns' values to their row index in the column
std::vector<key_type> const & build_column = std::get<0>(right_columns);
auto build_valid = right_valids[0].get();
for(size_t right_index = 0; right_index < build_column.size(); ++right_index)
{
if (gdf_is_valid(build_valid, right_index)) {
the_map.insert(std::make_pair(build_column[right_index], right_index));
}
}
std::vector<result_type> reference_result;
// Probe hash table with first left column
std::vector<key_type> const & probe_column = std::get<0>(left_columns);
auto probe_valid = left_valids[0].get();
for(size_t left_index = 0; left_index < probe_column.size(); ++left_index)
{
bool match{false};
if (gdf_is_valid(probe_valid, left_index)) {
// Find all keys that match probe_key
const auto probe_key = probe_column[left_index];
auto range = the_map.equal_range(probe_key);
// Every element in the returned range identifies a row in the first right column that
// matches the probe_key. Need to check if all other columns also match
for(auto i = range.first; i != range.second; ++i)
{
const auto right_index = i->second;
// If all of the columns in right_columns[right_index] == all of the columns in left_columns[left_index]
// Then this index pair is added to the result as a matching pair of row indices
if( true == rows_equal_using_valids(left_columns, right_columns, left_valids, right_valids, left_index, right_index)){
reference_result.emplace_back(left_index, right_index);
match = true;
}
}
}
// For left joins, insert a NULL if no match is found
if((false == match) &&
((op == join_op::LEFT) || (op == join_op::FULL))){
constexpr int JoinNullValue{-1};
reference_result.emplace_back(left_index, JoinNullValue);
}
}
if (op == join_op::FULL)
{
the_map.clear();
// Build hash table that maps the first left columns' values to their row index in the column
for(size_t left_index = 0; left_index < probe_column.size(); ++left_index)
{
if (gdf_is_valid(probe_valid, left_index)) {
the_map.insert(std::make_pair(probe_column[left_index], left_index));
}
}
// Probe the hash table with first right column
// Add rows where a match for the right column does not exist
for(size_t right_index = 0; right_index < build_column.size(); ++right_index)
{
const auto probe_key = build_column[right_index];
auto search = the_map.find(probe_key);
if ((search == the_map.end()) || (!gdf_is_valid(build_valid, right_index)))
{
constexpr int JoinNullValue{-1};
reference_result.emplace_back(JoinNullValue, right_index);
}
}
}
// Sort the result
if(sort)
{
std::sort(reference_result.begin(), reference_result.end());
}
if(print)
{
std::cout << "Reference result size: " << reference_result.size() << std::endl;
std::cout << "left index, right index" << std::endl;
std::copy(reference_result.begin(), reference_result.end(), std::ostream_iterator<result_type>(std::cout, ""));
std::cout << "\n";
}
return reference_result;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Computes the result of joining the left and right sets of columns with the libgdf functions
*
* @param gdf_result A vector of result_type that holds the result of the libgdf join function
* @param print Option to print the result computed by the libgdf function
* @param sort Option to sort the result. This is required to compare the result against the reference solution
*/
/* ----------------------------------------------------------------------------*/
std::vector<result_type> compute_gdf_result(bool print = false, bool sort = true, gdf_error expected_result = GDF_SUCCESS)
{
const int num_columns = std::tuple_size<multi_column_t>::value;
gdf_column left_result{};
gdf_column right_result{};
left_result.size = 0;
right_result.size = 0;
gdf_error result_error{GDF_SUCCESS};
gdf_column ** left_gdf_columns = gdf_raw_left_columns.data();
gdf_column ** right_gdf_columns = gdf_raw_right_columns.data();
std::vector<int> range;
for (int i = 0; i < num_columns; ++i) {range.push_back(i);}
switch(op)
{
case join_op::LEFT:
{
result_error = gdf_left_join(
left_gdf_columns, num_columns, range.data(),
right_gdf_columns, num_columns, range.data(),
num_columns,
0, nullptr,
&left_result, &right_result,
&ctxt);
break;
}
case join_op::INNER:
{
result_error = gdf_inner_join(
left_gdf_columns, num_columns, range.data(),
right_gdf_columns, num_columns, range.data(),
num_columns,
0, nullptr,
&left_result, &right_result,
&ctxt);
break;
}
case join_op::FULL:
{
result_error = gdf_full_join(
left_gdf_columns, num_columns, range.data(),
right_gdf_columns, num_columns, range.data(),
num_columns,
0, nullptr,
&left_result, &right_result,
&ctxt);
break;
}
default:
std::cout << "Invalid join method" << std::endl;
EXPECT_TRUE(false);
}
EXPECT_EQ(expected_result, result_error) << "The gdf join function did not complete successfully";
// If the expected result was not GDF_SUCCESS, then this test was testing for a
// specific error condition, in which case we return imediately and do not do
// any further work on the output
if(GDF_SUCCESS != expected_result){
return std::vector<result_type>();
}
EXPECT_EQ(left_result.size, right_result.size) << "Join output size mismatch";
// The output is an array of size `n` where the first n/2 elements are the
// left_indices and the last n/2 elements are the right indices
size_t total_pairs = left_result.size;
size_t output_size = total_pairs*2;
int * l_join_output = static_cast<int*>(left_result.data);
int * r_join_output = static_cast<int*>(right_result.data);
// Host vector to hold gdf join output
std::vector<int> host_result(output_size);
// Copy result of gdf join to the host
EXPECT_EQ(hipMemcpy(host_result.data(),
l_join_output, total_pairs * sizeof(int), hipMemcpyDeviceToHost), hipSuccess);
EXPECT_EQ(hipMemcpy(host_result.data() + total_pairs,
r_join_output, total_pairs * sizeof(int), hipMemcpyDeviceToHost), hipSuccess);
// Free the original join result
if(output_size > 0){
gdf_column_free(&left_result);
gdf_column_free(&right_result);
}
// Host vector of result_type pairs to hold final result for comparison to reference solution
std::vector<result_type> host_pair_result(total_pairs);
// Copy raw output into corresponding result_type pair
for(size_t i = 0; i < total_pairs; ++i){
host_pair_result[i].first = host_result[i];
host_pair_result[i].second = host_result[i + total_pairs];
}
// Sort the output for comparison to reference solution
if(sort){
std::sort(host_pair_result.begin(), host_pair_result.end());
}
if(print){
std::cout << "GDF result size: " << host_pair_result.size() << std::endl;
std::cout << "left index, right index" << std::endl;
std::copy(host_pair_result.begin(), host_pair_result.end(), std::ostream_iterator<result_type>(std::cout, ""));
std::cout << "\n";
}
return host_pair_result;
}
};
// This structure is used to nest the join operations, join method and
// number/types of columns for use with Google Test type-parameterized
// tests .Here join_operation refers to the type of join eg. INNER,
// LEFT, FULL and join_method refers to the underlying join algorithm
//that performs it eg. GDF_HASH or GDF_SORT.
template<join_op join_operation,
gdf_method join_method,
typename tuple_of_vectors,
bool keys_are_unique = false>
struct TestParameters
{
// The method to use for the join
const static join_op op{join_operation};
// The method to use for the join
const static gdf_method join_type{join_method};
// The tuple of vectors that determines the number and types of the columns to join
using multi_column_t = tuple_of_vectors;
const static bool unique_keys{keys_are_unique};
};
const static gdf_method HASH = gdf_method::GDF_HASH;
const static gdf_method SORT = gdf_method::GDF_SORT;
template <typename... T>
using VTuple = std::tuple<std::vector<T>...>;
// Using Google Tests "Type Parameterized Tests"
// Every test defined as TYPED_TEST(JoinTest, *) will be run once for every instance of
// TestParameters defined below
// The kind of join is determined by the first template argument to TestParameters
// The number and types of columns used in both the left and right sets of columns are
// determined by the number and types of vectors in the std::tuple<...> that is the second
// template argument to TestParameters
typedef ::testing::Types<
// Single column inner join tests for all types
TestParameters< join_op::INNER, HASH, VTuple<int32_t > >,
TestParameters< join_op::INNER, HASH, VTuple<int64_t > >,
TestParameters< join_op::INNER, HASH, VTuple<float > >,
TestParameters< join_op::INNER, HASH, VTuple<double > >,
TestParameters< join_op::INNER, HASH, VTuple<uint32_t> >,
TestParameters< join_op::INNER, HASH, VTuple<uint64_t> >,
TestParameters< join_op::INNER, SORT, VTuple<int32_t > >,
TestParameters< join_op::INNER, SORT, VTuple<int64_t > >,
TestParameters< join_op::INNER, SORT, VTuple<float > >,
TestParameters< join_op::INNER, SORT, VTuple<double > >,
TestParameters< join_op::INNER, SORT, VTuple<uint32_t> >,
TestParameters< join_op::INNER, SORT, VTuple<uint64_t> >,
// Single column left join tests for all types
TestParameters< join_op::LEFT, HASH, VTuple<int32_t > >,
TestParameters< join_op::LEFT, HASH, VTuple<int64_t > >,
TestParameters< join_op::LEFT, HASH, VTuple<float > >,
TestParameters< join_op::LEFT, HASH, VTuple<double > >,
TestParameters< join_op::LEFT, HASH, VTuple<uint32_t> >,
TestParameters< join_op::LEFT, HASH, VTuple<uint64_t> >,
TestParameters< join_op::LEFT, SORT, VTuple<int32_t > >,
TestParameters< join_op::LEFT, SORT, VTuple<int64_t > >,
TestParameters< join_op::LEFT, SORT, VTuple<float > >,
TestParameters< join_op::LEFT, SORT, VTuple<double > >,
TestParameters< join_op::LEFT, SORT, VTuple<uint32_t> >,
TestParameters< join_op::LEFT, SORT, VTuple<uint64_t> >,
// Single column full join tests for all types
TestParameters< join_op::FULL, HASH, VTuple<int32_t > >,
TestParameters< join_op::FULL, HASH, VTuple<int64_t > >,
TestParameters< join_op::FULL, HASH, VTuple<float > >,
TestParameters< join_op::FULL, HASH, VTuple<double > >,
TestParameters< join_op::FULL, HASH, VTuple<uint32_t> >,
TestParameters< join_op::FULL, HASH, VTuple<uint64_t> >,
// Two Column Left Join tests for some combination of types
TestParameters< join_op::LEFT, HASH, VTuple<int32_t , int32_t> >,
TestParameters< join_op::LEFT, HASH, VTuple<uint32_t, int32_t> >,
// Three Column Left Join tests for some combination of types
TestParameters< join_op::LEFT, HASH, VTuple<int32_t , uint32_t, float > >,
TestParameters< join_op::LEFT, HASH, VTuple<double , uint32_t, int64_t> >,
// Two Column Inner Join tests for some combination of types
TestParameters< join_op::INNER, HASH, VTuple<int32_t , int32_t> >,
TestParameters< join_op::INNER, HASH, VTuple<uint32_t, int32_t> >,
// Three Column Inner Join tests for some combination of types
TestParameters< join_op::INNER, HASH, VTuple<int32_t , uint32_t, float > >,
TestParameters< join_op::INNER, HASH, VTuple<double , uint32_t, int64_t> >,
// Four column test for Left Joins
TestParameters< join_op::LEFT, HASH, VTuple<double, int32_t, int64_t, int32_t> >,
TestParameters< join_op::LEFT, HASH, VTuple<float, uint32_t, double, int32_t> >,
// Four column test for Inner Joins
TestParameters< join_op::INNER, HASH, VTuple<uint32_t, float, int64_t, int32_t> >,
TestParameters< join_op::INNER, HASH, VTuple<double, float, int64_t, double> >,
// Five column test for Left Joins
TestParameters< join_op::LEFT, HASH, VTuple<double, int32_t, int64_t, int32_t, int32_t> >,
// Five column test for Inner Joins
TestParameters< join_op::INNER, HASH, VTuple<uint32_t, float, int64_t, int32_t, float> >
> Implementations;
TYPED_TEST_CASE(JoinTest, Implementations);
// This test is used for debugging purposes and is disabled by default.
// The input sizes are small and has a large amount of debug printing enabled.
TYPED_TEST(JoinTest, DISABLED_DebugTest)
{
this->create_input(5, 2,
5, 2,
true);
std::vector<result_type> reference_result = this->compute_reference_solution(true);
std::vector<result_type> gdf_result = this->compute_gdf_result(true);
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, EqualValues)
{
this->create_input(100,1,
1000,1);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, MaxRandomValues)
{
this->create_input(10000,RAND_MAX,
10000,RAND_MAX);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, LeftColumnsBigger)
{
this->create_input(10000,100,
100,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, RightColumnsBigger)
{
this->create_input(100,100,
10000,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, EmptyLeftFrame)
{
this->create_input(0,100,
1000,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, EmptyRightFrame)
{
this->create_input(1000,100,
0,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, BothFramesEmpty)
{
this->create_input(0,100,
0,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
// The below tests check correct reporting of missing valid pointer
// Create a new derived class from JoinTest so we can do a new Typed Test set of tests
template <class test_parameters>
struct JoinValidTest : public JoinTest<test_parameters>
{ };
using ValidTestImplementation = testing::Types< TestParameters< join_op::INNER, SORT, VTuple<int32_t >>,
TestParameters< join_op::LEFT , SORT, VTuple<int32_t >>,
TestParameters< join_op::FULL , SORT, VTuple<int32_t >> >;
TYPED_TEST_CASE(JoinValidTest, ValidTestImplementation);
TYPED_TEST(JoinValidTest, ReportValidMaskError)
{
this->create_input(1000,100,
100,100,
false, 1);
std::vector<result_type> gdf_result = this->compute_gdf_result(false, true, GDF_VALIDITY_UNSUPPORTED);
}
// The below tests are for testing inputs that are at or above the maximum input size possible
// Create a new derived class from JoinTest so we can do a new Typed Test set of tests
template <class test_parameters>
struct MaxJoinTest : public JoinTest<test_parameters>
{ };
// Only test for single column inputs for Inner and Left joins because these tests take a long time
using MaxImplementations = testing::Types< TestParameters< join_op::INNER, HASH, VTuple<int32_t >>,
TestParameters< join_op::LEFT, HASH, VTuple<int32_t >> >;
TYPED_TEST_CASE(MaxJoinTest, MaxImplementations);
TYPED_TEST(MaxJoinTest, InputTooLarge)
{
const gdf_size_type left_table_size = 100;
const gdf_size_type right_table_size =
static_cast<gdf_size_type>(std::numeric_limits<int>::max());
this->create_dummy_input(left_table_size, right_table_size);
const bool print_result{false};
const bool sort_result{false};
// We expect the function to fail when the input is this large
const gdf_error expected_error{GDF_COLUMN_SIZE_TOO_BIG};
std::vector<result_type> gdf_result = this->compute_gdf_result(print_result,
sort_result,
expected_error);
}
// These tests will only fail on a non-release build where `assert`s are enabled
#ifndef NDEBUG
TEST(HashTableSizeDeathTest, ZeroOccupancyTest){
int const num_insertions{100};
uint32_t occupancy{0};
EXPECT_DEATH(compute_hash_table_size(num_insertions,occupancy),"");
}
TEST(HashTableSizeDeathTest, TooLargeOccupancyTest){
int const num_insertions{100};
uint32_t occupancy{101};
EXPECT_DEATH(compute_hash_table_size(num_insertions,occupancy),"");
}
#endif
TEST(HashTableSizeTest, OverflowTest){
int const num_insertions{std::numeric_limits<int>::max()};
uint32_t occupancy{50};
size_t hash_table_size = compute_hash_table_size(num_insertions, occupancy);
size_t expected_size{ size_t{2} * std::numeric_limits<int>::max()};
ASSERT_TRUE(hash_table_size > num_insertions);
EXPECT_EQ(expected_size, hash_table_size);
}
| 45fbc8830ac4dc9a6177987176511120904167c5.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// See this header for all of the recursive handling of tuples of vectors
#include <tests/utilities/tuple_vectors.h>
// See this header for all of the handling of valids' vectors
#include <tests/utilities/valid_vectors.h>
#include <tests/utilities/cudf_test_fixtures.h>
#include <join/joining.h>
#include <join/join_compute_api.h>
#include <utilities/bit_util.cuh>
#include <cudf/cudf.h>
#include <rmm/rmm.h>
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include <iostream>
#include <vector>
#include <map>
#include <type_traits>
#include <memory>
#include <cstdlib>
// Selects the kind of join operation that is performed
enum struct join_op
{
INNER,
LEFT,
FULL
};
// Each element of the result will be an index into the left and right columns where
// left_columns[left_index] == right_columns[right_index]
using result_type = typename std::pair<int, int>;
// Define stream operator for a std::pair for conveinience of printing results.
// Needs to be in the std namespace to work with std::copy
namespace std{
template <typename first_t, typename second_t>
std::ostream& operator<<(std::ostream& os, std::pair<first_t, second_t> const & p)
{
os << p.first << ", " << p.second;
std::cout << "\n";
return os;
}
}
// A new instance of this class will be created for each *TEST(JoinTest, ...)
// Put all repeated setup and validation stuff here
template <class test_parameters>
struct JoinTest : public GdfTest
{
// The join type is passed via a member of the template argument class
const join_op op = test_parameters::op;
gdf_context ctxt = {
test_parameters::join_type == gdf_method::GDF_SORT,
test_parameters::join_type,
0
};
// multi_column_t is a tuple of vectors. The number of vectors in the tuple
// determines the number of columns to be joined, and the value_type of each
// vector determiens the data type of the column
using multi_column_t = typename test_parameters::multi_column_t;
multi_column_t left_columns;
multi_column_t right_columns;
// valids for multi_columns
std::vector<host_valid_pointer> left_valids;
std::vector<host_valid_pointer> right_valids;
// Type for a unique_ptr to a gdf_column with a custom deleter
// Custom deleter is defined at construction
using gdf_col_pointer =
typename std::unique_ptr<gdf_column, std::function<void(gdf_column*)>>;
// Containers for unique_ptrs to gdf_columns that will be used in the gdf_join
// functions. unique_ptrs are used to automate freeing device memory
std::vector<gdf_col_pointer> gdf_left_columns;
std::vector<gdf_col_pointer> gdf_right_columns;
// Containers for the raw pointers to the gdf_columns that will be used as
// input to the gdf_join functions
std::vector<gdf_column*> gdf_raw_left_columns;
std::vector<gdf_column*> gdf_raw_right_columns;
JoinTest()
{
// Use constant seed so the psuedo-random order is the same each time
// Each time the class is constructed a new constant seed is used
static size_t number_of_instantiations{0};
std::srand(number_of_instantiations++);
}
~JoinTest()
{
}
/* --------------------------------------------------------------------------*
* @brief Creates a unique_ptr that wraps a gdf_column structure
* intialized with a host vector
*
* @param host_vector vector containing data to be transfered to device side column
* @param host_valid vector containing valid masks associated with the supplied vector
* @param n_count null_count to be set for the generated column
*
* @returns A unique_ptr wrapping the new gdf_column
* --------------------------------------------------------------------------*/
template <typename col_type>
gdf_col_pointer create_gdf_column(std::vector<col_type> const & host_vector, gdf_valid_type* host_valid,
const gdf_size_type n_count)
{
// Deduce the type and set the gdf_dtype accordingly
gdf_dtype gdf_col_type;
if(std::is_same<col_type,int8_t>::value) gdf_col_type = GDF_INT8;
else if(std::is_same<col_type,uint8_t>::value) gdf_col_type = GDF_INT8;
else if(std::is_same<col_type,int16_t>::value) gdf_col_type = GDF_INT16;
else if(std::is_same<col_type,uint16_t>::value) gdf_col_type = GDF_INT16;
else if(std::is_same<col_type,int32_t>::value) gdf_col_type = GDF_INT32;
else if(std::is_same<col_type,uint32_t>::value) gdf_col_type = GDF_INT32;
else if(std::is_same<col_type,int64_t>::value) gdf_col_type = GDF_INT64;
else if(std::is_same<col_type,uint64_t>::value) gdf_col_type = GDF_INT64;
else if(std::is_same<col_type,float>::value) gdf_col_type = GDF_FLOAT32;
else if(std::is_same<col_type,double>::value) gdf_col_type = GDF_FLOAT64;
// Create a new instance of a gdf_column with a custom deleter that will
// free the associated device memory when it eventually goes out of scope
auto deleter = [](gdf_column* col) {
col->size = 0;
RMM_FREE(col->data, 0);
RMM_FREE(col->valid, 0);
};
gdf_col_pointer the_column{new gdf_column{}, deleter};
// Allocate device storage for gdf_column and copy contents from host_vector
EXPECT_EQ(RMM_ALLOC(&(the_column->data), host_vector.size() * sizeof(col_type), 0), RMM_SUCCESS);
EXPECT_EQ(cudaMemcpy(the_column->data, host_vector.data(), host_vector.size() * sizeof(col_type), cudaMemcpyHostToDevice), cudaSuccess);
// Allocate device storage for gdf_column.valid
if (host_valid != nullptr) {
EXPECT_EQ(RMM_ALLOC((void**)&(the_column->valid), gdf_valid_allocation_size(host_vector.size()), 0), RMM_SUCCESS);
EXPECT_EQ(cudaMemcpy(the_column->valid, host_valid, gdf_num_bitmask_elements(host_vector.size()), cudaMemcpyHostToDevice), cudaSuccess);
the_column->null_count = n_count;
} else {
the_column->valid = nullptr;
the_column->null_count = 0;
}
// Fill the gdf_column members
the_column->size = host_vector.size();
the_column->dtype = gdf_col_type;
gdf_dtype_extra_info extra_info{TIME_UNIT_NONE};
the_column->dtype_info = extra_info;
return the_column;
}
// Compile time recursion to convert each vector in a tuple of vectors into
// a gdf_column and append it to a vector of gdf_columns
template<std::size_t I = 0, typename... Tp>
inline typename std::enable_if<I == sizeof...(Tp), void>::type
convert_tuple_to_gdf_columns(std::vector<gdf_col_pointer> &gdf_columns,std::tuple<std::vector<Tp>...>& t, std::vector<host_valid_pointer>& valids, const gdf_size_type n_count)
{
//bottom of compile-time recursion
//purposely empty...
}
template<std::size_t I = 0, typename... Tp>
inline typename std::enable_if<I < sizeof...(Tp), void>::type
convert_tuple_to_gdf_columns(std::vector<gdf_col_pointer> &gdf_columns,std::tuple<std::vector<Tp>...>& t, std::vector<host_valid_pointer>& valids, const gdf_size_type n_count)
{
// Creates a gdf_column for the current vector and pushes it onto
// the vector of gdf_columns
if (valids.size() != 0) {
gdf_columns.push_back(create_gdf_column(std::get<I>(t), valids[I].get(), n_count));
} else {
gdf_columns.push_back(create_gdf_column(std::get<I>(t), nullptr, n_count));
}
//recurse to next vector in tuple
convert_tuple_to_gdf_columns<I + 1, Tp...>(gdf_columns, t, valids, n_count);
}
// Converts a tuple of host vectors into a vector of gdf_columns
std::vector<gdf_col_pointer>
initialize_gdf_columns(multi_column_t host_columns, std::vector<host_valid_pointer>& valids,
const gdf_size_type n_count)
{
std::vector<gdf_col_pointer> gdf_columns;
convert_tuple_to_gdf_columns(gdf_columns, host_columns, valids, n_count);
return gdf_columns;
}
/* --------------------------------------------------------------------------*
* @brief Initializes two sets of columns, left and right, with random
* values for the join operation.
*
* @param left_column_length The length of the left set of columns
* @param left_column_range The upper bound of random values for the left
* columns. Values are [0, left_column_range)
* @param right_column_length The length of the right set of columns
* @param right_column_range The upper bound of random values for the right
* columns. Values are [0, right_column_range)
* @param print Optionally print the left and right set of columns for debug
* -------------------------------------------------------------------------*/
void create_input( size_t left_column_length, size_t left_column_range,
size_t right_column_length, size_t right_column_range,
bool print = false, const gdf_size_type n_count = 0)
{
initialize_tuple(left_columns, left_column_length, left_column_range, static_cast<size_t>(ctxt.flag_sorted));
initialize_tuple(right_columns, right_column_length, right_column_range, static_cast<size_t>(ctxt.flag_sorted));
auto n_columns = std::tuple_size<multi_column_t>::value;
initialize_valids(left_valids, n_columns, left_column_length, 0);
initialize_valids(right_valids, n_columns, right_column_length, 0);
gdf_left_columns = initialize_gdf_columns(left_columns, left_valids, n_count);
gdf_right_columns = initialize_gdf_columns(right_columns, right_valids, n_count);
// Fill vector of raw pointers to gdf_columns
gdf_raw_left_columns.clear();
gdf_raw_right_columns.clear();
for(auto const& c : gdf_left_columns){
gdf_raw_left_columns.push_back(c.get());
}
for(auto const& c : gdf_right_columns){
gdf_raw_right_columns.push_back(c.get());
}
if(print)
{
std::cout << "Left column(s) created. Size: " << std::get<0>(left_columns).size() << std::endl;
print_tuples_and_valids(left_columns, left_valids);
std::cout << "Right column(s) created. Size: " << std::get<0>(right_columns).size() << std::endl;
print_tuples_and_valids(right_columns, right_valids);
}
}
/* --------------------------------------------------------------------------*
* @brief Creates two gdf_columns with size 1 data buffer allocations, but
* with a specified `size` attributed
*
* @param left_column_length The length of the left column
* @param right_column_length The length of the right column
* -------------------------------------------------------------------------*/
void create_dummy_input( gdf_size_type const left_column_length,
gdf_size_type const right_column_length)
{
using col_type = typename std::tuple_element<0, multi_column_t>::type::value_type;
// Only allocate a single element
std::vector<col_type> dummy_vector_left(1, static_cast<col_type>(0));
std::vector<col_type> dummy_vector_right(1, static_cast<col_type>(0));
gdf_left_columns.push_back(create_gdf_column<col_type>(dummy_vector_left, nullptr, 0));
gdf_right_columns.push_back(create_gdf_column<col_type>(dummy_vector_right, nullptr, 0));
// Fill vector of raw pointers to gdf_columns
for (auto const& c : gdf_left_columns) {
c->size = left_column_length;
gdf_raw_left_columns.push_back(c.get());
}
for (auto const& c : gdf_right_columns) {
c->size = right_column_length;
gdf_raw_right_columns.push_back(c.get());
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Computes a reference solution for joining the left and right sets of columns
*
* @param print Option to print the solution for debug
* @param sort Option to sort the solution. This is necessary for comparison against the gdf solution
*
* @returns A vector of 'result_type' where result_type is a structure with a left_index, right_index
* where left_columns[left_index] == right_columns[right_index]
*/
/* ----------------------------------------------------------------------------*/
std::vector<result_type> compute_reference_solution(bool print = false, bool sort = true)
{
// Use the type of the first vector as the key_type
using key_type = typename std::tuple_element<0, multi_column_t>::type::value_type;
using value_type = size_t;
// Multimap used to compute the reference solution
std::multimap<key_type, value_type> the_map;
// Build hash table that maps the first right columns' values to their row index in the column
std::vector<key_type> const & build_column = std::get<0>(right_columns);
auto build_valid = right_valids[0].get();
for(size_t right_index = 0; right_index < build_column.size(); ++right_index)
{
if (gdf_is_valid(build_valid, right_index)) {
the_map.insert(std::make_pair(build_column[right_index], right_index));
}
}
std::vector<result_type> reference_result;
// Probe hash table with first left column
std::vector<key_type> const & probe_column = std::get<0>(left_columns);
auto probe_valid = left_valids[0].get();
for(size_t left_index = 0; left_index < probe_column.size(); ++left_index)
{
bool match{false};
if (gdf_is_valid(probe_valid, left_index)) {
// Find all keys that match probe_key
const auto probe_key = probe_column[left_index];
auto range = the_map.equal_range(probe_key);
// Every element in the returned range identifies a row in the first right column that
// matches the probe_key. Need to check if all other columns also match
for(auto i = range.first; i != range.second; ++i)
{
const auto right_index = i->second;
// If all of the columns in right_columns[right_index] == all of the columns in left_columns[left_index]
// Then this index pair is added to the result as a matching pair of row indices
if( true == rows_equal_using_valids(left_columns, right_columns, left_valids, right_valids, left_index, right_index)){
reference_result.emplace_back(left_index, right_index);
match = true;
}
}
}
// For left joins, insert a NULL if no match is found
if((false == match) &&
((op == join_op::LEFT) || (op == join_op::FULL))){
constexpr int JoinNullValue{-1};
reference_result.emplace_back(left_index, JoinNullValue);
}
}
if (op == join_op::FULL)
{
the_map.clear();
// Build hash table that maps the first left columns' values to their row index in the column
for(size_t left_index = 0; left_index < probe_column.size(); ++left_index)
{
if (gdf_is_valid(probe_valid, left_index)) {
the_map.insert(std::make_pair(probe_column[left_index], left_index));
}
}
// Probe the hash table with first right column
// Add rows where a match for the right column does not exist
for(size_t right_index = 0; right_index < build_column.size(); ++right_index)
{
const auto probe_key = build_column[right_index];
auto search = the_map.find(probe_key);
if ((search == the_map.end()) || (!gdf_is_valid(build_valid, right_index)))
{
constexpr int JoinNullValue{-1};
reference_result.emplace_back(JoinNullValue, right_index);
}
}
}
// Sort the result
if(sort)
{
std::sort(reference_result.begin(), reference_result.end());
}
if(print)
{
std::cout << "Reference result size: " << reference_result.size() << std::endl;
std::cout << "left index, right index" << std::endl;
std::copy(reference_result.begin(), reference_result.end(), std::ostream_iterator<result_type>(std::cout, ""));
std::cout << "\n";
}
return reference_result;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Computes the result of joining the left and right sets of columns with the libgdf functions
*
* @param gdf_result A vector of result_type that holds the result of the libgdf join function
* @param print Option to print the result computed by the libgdf function
* @param sort Option to sort the result. This is required to compare the result against the reference solution
*/
/* ----------------------------------------------------------------------------*/
std::vector<result_type> compute_gdf_result(bool print = false, bool sort = true, gdf_error expected_result = GDF_SUCCESS)
{
const int num_columns = std::tuple_size<multi_column_t>::value;
gdf_column left_result{};
gdf_column right_result{};
left_result.size = 0;
right_result.size = 0;
gdf_error result_error{GDF_SUCCESS};
gdf_column ** left_gdf_columns = gdf_raw_left_columns.data();
gdf_column ** right_gdf_columns = gdf_raw_right_columns.data();
std::vector<int> range;
for (int i = 0; i < num_columns; ++i) {range.push_back(i);}
switch(op)
{
case join_op::LEFT:
{
result_error = gdf_left_join(
left_gdf_columns, num_columns, range.data(),
right_gdf_columns, num_columns, range.data(),
num_columns,
0, nullptr,
&left_result, &right_result,
&ctxt);
break;
}
case join_op::INNER:
{
result_error = gdf_inner_join(
left_gdf_columns, num_columns, range.data(),
right_gdf_columns, num_columns, range.data(),
num_columns,
0, nullptr,
&left_result, &right_result,
&ctxt);
break;
}
case join_op::FULL:
{
result_error = gdf_full_join(
left_gdf_columns, num_columns, range.data(),
right_gdf_columns, num_columns, range.data(),
num_columns,
0, nullptr,
&left_result, &right_result,
&ctxt);
break;
}
default:
std::cout << "Invalid join method" << std::endl;
EXPECT_TRUE(false);
}
EXPECT_EQ(expected_result, result_error) << "The gdf join function did not complete successfully";
// If the expected result was not GDF_SUCCESS, then this test was testing for a
// specific error condition, in which case we return imediately and do not do
// any further work on the output
if(GDF_SUCCESS != expected_result){
return std::vector<result_type>();
}
EXPECT_EQ(left_result.size, right_result.size) << "Join output size mismatch";
// The output is an array of size `n` where the first n/2 elements are the
// left_indices and the last n/2 elements are the right indices
size_t total_pairs = left_result.size;
size_t output_size = total_pairs*2;
int * l_join_output = static_cast<int*>(left_result.data);
int * r_join_output = static_cast<int*>(right_result.data);
// Host vector to hold gdf join output
std::vector<int> host_result(output_size);
// Copy result of gdf join to the host
EXPECT_EQ(cudaMemcpy(host_result.data(),
l_join_output, total_pairs * sizeof(int), cudaMemcpyDeviceToHost), cudaSuccess);
EXPECT_EQ(cudaMemcpy(host_result.data() + total_pairs,
r_join_output, total_pairs * sizeof(int), cudaMemcpyDeviceToHost), cudaSuccess);
// Free the original join result
if(output_size > 0){
gdf_column_free(&left_result);
gdf_column_free(&right_result);
}
// Host vector of result_type pairs to hold final result for comparison to reference solution
std::vector<result_type> host_pair_result(total_pairs);
// Copy raw output into corresponding result_type pair
for(size_t i = 0; i < total_pairs; ++i){
host_pair_result[i].first = host_result[i];
host_pair_result[i].second = host_result[i + total_pairs];
}
// Sort the output for comparison to reference solution
if(sort){
std::sort(host_pair_result.begin(), host_pair_result.end());
}
if(print){
std::cout << "GDF result size: " << host_pair_result.size() << std::endl;
std::cout << "left index, right index" << std::endl;
std::copy(host_pair_result.begin(), host_pair_result.end(), std::ostream_iterator<result_type>(std::cout, ""));
std::cout << "\n";
}
return host_pair_result;
}
};
// This structure is used to nest the join operations, join method and
// number/types of columns for use with Google Test type-parameterized
// tests .Here join_operation refers to the type of join eg. INNER,
// LEFT, FULL and join_method refers to the underlying join algorithm
//that performs it eg. GDF_HASH or GDF_SORT.
template<join_op join_operation,
gdf_method join_method,
typename tuple_of_vectors,
bool keys_are_unique = false>
struct TestParameters
{
// The method to use for the join
const static join_op op{join_operation};
// The method to use for the join
const static gdf_method join_type{join_method};
// The tuple of vectors that determines the number and types of the columns to join
using multi_column_t = tuple_of_vectors;
const static bool unique_keys{keys_are_unique};
};
const static gdf_method HASH = gdf_method::GDF_HASH;
const static gdf_method SORT = gdf_method::GDF_SORT;
template <typename... T>
using VTuple = std::tuple<std::vector<T>...>;
// Using Google Tests "Type Parameterized Tests"
// Every test defined as TYPED_TEST(JoinTest, *) will be run once for every instance of
// TestParameters defined below
// The kind of join is determined by the first template argument to TestParameters
// The number and types of columns used in both the left and right sets of columns are
// determined by the number and types of vectors in the std::tuple<...> that is the second
// template argument to TestParameters
typedef ::testing::Types<
// Single column inner join tests for all types
TestParameters< join_op::INNER, HASH, VTuple<int32_t > >,
TestParameters< join_op::INNER, HASH, VTuple<int64_t > >,
TestParameters< join_op::INNER, HASH, VTuple<float > >,
TestParameters< join_op::INNER, HASH, VTuple<double > >,
TestParameters< join_op::INNER, HASH, VTuple<uint32_t> >,
TestParameters< join_op::INNER, HASH, VTuple<uint64_t> >,
TestParameters< join_op::INNER, SORT, VTuple<int32_t > >,
TestParameters< join_op::INNER, SORT, VTuple<int64_t > >,
TestParameters< join_op::INNER, SORT, VTuple<float > >,
TestParameters< join_op::INNER, SORT, VTuple<double > >,
TestParameters< join_op::INNER, SORT, VTuple<uint32_t> >,
TestParameters< join_op::INNER, SORT, VTuple<uint64_t> >,
// Single column left join tests for all types
TestParameters< join_op::LEFT, HASH, VTuple<int32_t > >,
TestParameters< join_op::LEFT, HASH, VTuple<int64_t > >,
TestParameters< join_op::LEFT, HASH, VTuple<float > >,
TestParameters< join_op::LEFT, HASH, VTuple<double > >,
TestParameters< join_op::LEFT, HASH, VTuple<uint32_t> >,
TestParameters< join_op::LEFT, HASH, VTuple<uint64_t> >,
TestParameters< join_op::LEFT, SORT, VTuple<int32_t > >,
TestParameters< join_op::LEFT, SORT, VTuple<int64_t > >,
TestParameters< join_op::LEFT, SORT, VTuple<float > >,
TestParameters< join_op::LEFT, SORT, VTuple<double > >,
TestParameters< join_op::LEFT, SORT, VTuple<uint32_t> >,
TestParameters< join_op::LEFT, SORT, VTuple<uint64_t> >,
// Single column full join tests for all types
TestParameters< join_op::FULL, HASH, VTuple<int32_t > >,
TestParameters< join_op::FULL, HASH, VTuple<int64_t > >,
TestParameters< join_op::FULL, HASH, VTuple<float > >,
TestParameters< join_op::FULL, HASH, VTuple<double > >,
TestParameters< join_op::FULL, HASH, VTuple<uint32_t> >,
TestParameters< join_op::FULL, HASH, VTuple<uint64_t> >,
// Two Column Left Join tests for some combination of types
TestParameters< join_op::LEFT, HASH, VTuple<int32_t , int32_t> >,
TestParameters< join_op::LEFT, HASH, VTuple<uint32_t, int32_t> >,
// Three Column Left Join tests for some combination of types
TestParameters< join_op::LEFT, HASH, VTuple<int32_t , uint32_t, float > >,
TestParameters< join_op::LEFT, HASH, VTuple<double , uint32_t, int64_t> >,
// Two Column Inner Join tests for some combination of types
TestParameters< join_op::INNER, HASH, VTuple<int32_t , int32_t> >,
TestParameters< join_op::INNER, HASH, VTuple<uint32_t, int32_t> >,
// Three Column Inner Join tests for some combination of types
TestParameters< join_op::INNER, HASH, VTuple<int32_t , uint32_t, float > >,
TestParameters< join_op::INNER, HASH, VTuple<double , uint32_t, int64_t> >,
// Four column test for Left Joins
TestParameters< join_op::LEFT, HASH, VTuple<double, int32_t, int64_t, int32_t> >,
TestParameters< join_op::LEFT, HASH, VTuple<float, uint32_t, double, int32_t> >,
// Four column test for Inner Joins
TestParameters< join_op::INNER, HASH, VTuple<uint32_t, float, int64_t, int32_t> >,
TestParameters< join_op::INNER, HASH, VTuple<double, float, int64_t, double> >,
// Five column test for Left Joins
TestParameters< join_op::LEFT, HASH, VTuple<double, int32_t, int64_t, int32_t, int32_t> >,
// Five column test for Inner Joins
TestParameters< join_op::INNER, HASH, VTuple<uint32_t, float, int64_t, int32_t, float> >
> Implementations;
TYPED_TEST_CASE(JoinTest, Implementations);
// This test is used for debugging purposes and is disabled by default.
// The input sizes are small and has a large amount of debug printing enabled.
TYPED_TEST(JoinTest, DISABLED_DebugTest)
{
this->create_input(5, 2,
5, 2,
true);
std::vector<result_type> reference_result = this->compute_reference_solution(true);
std::vector<result_type> gdf_result = this->compute_gdf_result(true);
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, EqualValues)
{
this->create_input(100,1,
1000,1);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, MaxRandomValues)
{
this->create_input(10000,RAND_MAX,
10000,RAND_MAX);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, LeftColumnsBigger)
{
this->create_input(10000,100,
100,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, RightColumnsBigger)
{
this->create_input(100,100,
10000,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, EmptyLeftFrame)
{
this->create_input(0,100,
1000,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, EmptyRightFrame)
{
this->create_input(1000,100,
0,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, BothFramesEmpty)
{
this->create_input(0,100,
0,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
// The below tests check correct reporting of missing valid pointer
// Create a new derived class from JoinTest so we can do a new Typed Test set of tests
template <class test_parameters>
struct JoinValidTest : public JoinTest<test_parameters>
{ };
using ValidTestImplementation = testing::Types< TestParameters< join_op::INNER, SORT, VTuple<int32_t >>,
TestParameters< join_op::LEFT , SORT, VTuple<int32_t >>,
TestParameters< join_op::FULL , SORT, VTuple<int32_t >> >;
TYPED_TEST_CASE(JoinValidTest, ValidTestImplementation);
TYPED_TEST(JoinValidTest, ReportValidMaskError)
{
this->create_input(1000,100,
100,100,
false, 1);
std::vector<result_type> gdf_result = this->compute_gdf_result(false, true, GDF_VALIDITY_UNSUPPORTED);
}
// The below tests are for testing inputs that are at or above the maximum input size possible
// Create a new derived class from JoinTest so we can do a new Typed Test set of tests
template <class test_parameters>
struct MaxJoinTest : public JoinTest<test_parameters>
{ };
// Only test for single column inputs for Inner and Left joins because these tests take a long time
using MaxImplementations = testing::Types< TestParameters< join_op::INNER, HASH, VTuple<int32_t >>,
TestParameters< join_op::LEFT, HASH, VTuple<int32_t >> >;
TYPED_TEST_CASE(MaxJoinTest, MaxImplementations);
TYPED_TEST(MaxJoinTest, InputTooLarge)
{
const gdf_size_type left_table_size = 100;
const gdf_size_type right_table_size =
static_cast<gdf_size_type>(std::numeric_limits<int>::max());
this->create_dummy_input(left_table_size, right_table_size);
const bool print_result{false};
const bool sort_result{false};
// We expect the function to fail when the input is this large
const gdf_error expected_error{GDF_COLUMN_SIZE_TOO_BIG};
std::vector<result_type> gdf_result = this->compute_gdf_result(print_result,
sort_result,
expected_error);
}
// These tests will only fail on a non-release build where `assert`s are enabled
#ifndef NDEBUG
TEST(HashTableSizeDeathTest, ZeroOccupancyTest){
int const num_insertions{100};
uint32_t occupancy{0};
EXPECT_DEATH(compute_hash_table_size(num_insertions,occupancy),"");
}
TEST(HashTableSizeDeathTest, TooLargeOccupancyTest){
int const num_insertions{100};
uint32_t occupancy{101};
EXPECT_DEATH(compute_hash_table_size(num_insertions,occupancy),"");
}
#endif
TEST(HashTableSizeTest, OverflowTest){
int const num_insertions{std::numeric_limits<int>::max()};
uint32_t occupancy{50};
size_t hash_table_size = compute_hash_table_size(num_insertions, occupancy);
size_t expected_size{ size_t{2} * std::numeric_limits<int>::max()};
ASSERT_TRUE(hash_table_size > num_insertions);
EXPECT_EQ(expected_size, hash_table_size);
}
|
bb600f50906bd9765d46f607c1386a12de0fb0bd.hip | // !!! This is a file automatically generated by hipify!!!
/*
* File: mandel.c
* Author: Antonio Lechuga
*
* Created on Da 9999 de la cuarentena COVID19
*/
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
//PP#include <hip/hip_runtime.h>
# define POINTS_PER_DIM 1024
# define MAX_ITER 2000
// Defining complex type
typedef struct complex_ {
double real;
double imag;
} complex, *Pcomplex;
// Getting new complex number
complex new_complex(double real, double imag) {
Pcomplex complex_ptr = (Pcomplex)malloc(sizeof(complex));
complex_ptr->real = real;
complex_ptr->imag = imag;
return *complex_ptr;
}
/* Utilidad para checar errores de CUDA */
void checkCUDAError(const char*);
// Mandelbrot generation kernel
__global__ void generate_mandelbrot(complex *in, int *out, complex z, int i_size, int max_iter) {
// calculating indices
int id_r = blockIdx.x * blockDim.x + threadIdx.x;
int id_i = blockIdx.y * blockDim.y + threadIdx.y;
// initial values
complex c = in[id_i * i_size + id_r];
int result = 1;
double temp_real;
double abs_value;
// determining if c is part of mandelbrot set
for (int i = 0; i < max_iter; i++) {
// squaring z and adding c
temp_real = z.real;
z.real = (z.real * z.real) - (z.imag * z.imag) + c.real;
z.imag = 2 * temp_real * z.imag + c.imag;
// calculating abs value
abs_value = sqrt((z.real * z.real) + (z.imag * z.imag));
if (abs_value > 2.0) {
result = 0;
break;
}
}
out[id_i * i_size + id_r] = result;
__syncthreads();
// calculating number of elements outside of mandelbrot set
if (blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0) {
int num_inside = 0;
for (int i = 0; i < i_size * i_size; i++) {
num_inside += out[i];
}
float area = 16.0 * (double)(num_inside) / (double)(i_size * i_size);
float error = area / (double)i_size;
printf("The number of points outside is: %d\n", i_size * i_size - num_inside);
printf("Area of Mandlebrot set is: %12.8f +/- %12.8f\n", area, error);
}
}
int main(int argc, char** argv) {
// parsing input
int r_points, i_points;
if (argc < 2) {
r_points = POINTS_PER_DIM;
i_points = POINTS_PER_DIM;
} else if (argc < 3) {
r_points = 1 << atoi(argv[1]);
i_points = 1 << atoi(argv[1]);
} else {
printf("Usage: mandel-gpu <log(xdim)> <log(ydim)>\n");
exit(-1);
}
// initialization
time_t t1, t2;
double max = 2.0;
double min = -2.0;
int array_size = r_points * i_points;
// int num_outside = 0;
double dR = (max - min) / r_points;
double dI = (max - min) / i_points;
complex z;
z.real = 0.0;
z.imag = 0.0;
// calculating sizes
size_t size_input = array_size * sizeof(complex);
size_t size_output = array_size * sizeof(int);
// pointers
complex *h_input; // CPU
complex *d_input; // GPU
int *h_output; // CPU
int *d_output; // GPU
// allocating space in CPU
h_input = (complex *) malloc(size_input);
h_output = (int *) malloc(size_output);
// allocating space in GPU
hipMalloc((void **) &d_input, size_input);
hipMalloc((void **) &d_output, size_output);
// generating input
printf("Generating input...\n");
for (int i = 0; i < i_points; i++) {
for (int j = 0; j < r_points; j++) {
double real_part = min + dR * j;
double imag_part = max - dI * i;
h_input[i_points * i + j] = new_complex(real_part, imag_part);
}
}
// copying from CPU to GPU
hipMemcpy(d_input, h_input, size_input, hipMemcpyHostToDevice);
// executing kernels
t1 = time(NULL);
int n_threads = 16;
int n_blocks_r = r_points / n_threads;
int n_blocks_i = i_points / n_threads;
dim3 dimBlock(n_threads, n_threads);
dim3 dimGrid(n_blocks_r, n_blocks_i);
hipLaunchKernelGGL(( generate_mandelbrot), dim3(dimGrid), dim3(dimBlock), 0, 0, d_input, d_output, z, i_points, MAX_ITER);
// waiting for threads
hipDeviceSynchronize();
checkCUDAError("kernel invocation");
// timing execution
t2 = time(NULL);
printf("Execution time: %f sec\n", difftime(t2, t1));
// copying back to CPU
hipMemcpy(h_output, d_output, size_output, hipMemcpyDeviceToHost);
checkCUDAError("memcpy");
// verifying on CPU
printf("Verifying result on CPU...\n");
int temp = 0;
for (int i = 0; i < array_size; i++) {
temp += h_output[i];
}
printf("The number of points outside is: %d\n", array_size - temp);
// generating pmg image
printf("Generating image...\n");
FILE *fp;
fp = fopen("mandelbrot-fractal-gpu.pgm", "w");
fputs("P2 \n", fp);
fprintf(fp, "%d %d \n", i_points, r_points);
fputs("1 \n", fp);
for (int i = 0; i < i_points; i++) {
for (int j = 0; j < r_points; j++) {
fprintf(fp, "%d ", h_output[i * i_points + j]);
}
fputs("\n", fp);
}
fclose(fp);
// freeing memory
printf("Freeing memory...\n");
free(h_input);
free(h_output);
hipFree(d_input);
hipFree(d_output);
printf("Done!\n");
return 0;
}
// Utility function to check for and report CUDA errors
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
} | bb600f50906bd9765d46f607c1386a12de0fb0bd.cu | /*
* File: mandel.c
* Author: Antonio Lechuga
*
* Created on DÃa 9999 de la cuarentena COVID19
*/
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
//PP#include <cuda.h>
# define POINTS_PER_DIM 1024
# define MAX_ITER 2000
// Defining complex type
typedef struct complex_ {
double real;
double imag;
} complex, *Pcomplex;
// Getting new complex number
complex new_complex(double real, double imag) {
Pcomplex complex_ptr = (Pcomplex)malloc(sizeof(complex));
complex_ptr->real = real;
complex_ptr->imag = imag;
return *complex_ptr;
}
/* Utilidad para checar errores de CUDA */
void checkCUDAError(const char*);
// Mandelbrot generation kernel
__global__ void generate_mandelbrot(complex *in, int *out, complex z, int i_size, int max_iter) {
// calculating indices
int id_r = blockIdx.x * blockDim.x + threadIdx.x;
int id_i = blockIdx.y * blockDim.y + threadIdx.y;
// initial values
complex c = in[id_i * i_size + id_r];
int result = 1;
double temp_real;
double abs_value;
// determining if c is part of mandelbrot set
for (int i = 0; i < max_iter; i++) {
// squaring z and adding c
temp_real = z.real;
z.real = (z.real * z.real) - (z.imag * z.imag) + c.real;
z.imag = 2 * temp_real * z.imag + c.imag;
// calculating abs value
abs_value = sqrt((z.real * z.real) + (z.imag * z.imag));
if (abs_value > 2.0) {
result = 0;
break;
}
}
out[id_i * i_size + id_r] = result;
__syncthreads();
// calculating number of elements outside of mandelbrot set
if (blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0) {
int num_inside = 0;
for (int i = 0; i < i_size * i_size; i++) {
num_inside += out[i];
}
float area = 16.0 * (double)(num_inside) / (double)(i_size * i_size);
float error = area / (double)i_size;
printf("The number of points outside is: %d\n", i_size * i_size - num_inside);
printf("Area of Mandlebrot set is: %12.8f +/- %12.8f\n", area, error);
}
}
int main(int argc, char** argv) {
// parsing input
int r_points, i_points;
if (argc < 2) {
r_points = POINTS_PER_DIM;
i_points = POINTS_PER_DIM;
} else if (argc < 3) {
r_points = 1 << atoi(argv[1]);
i_points = 1 << atoi(argv[1]);
} else {
printf("Usage: mandel-gpu <log(xdim)> <log(ydim)>\n");
exit(-1);
}
// initialization
time_t t1, t2;
double max = 2.0;
double min = -2.0;
int array_size = r_points * i_points;
// int num_outside = 0;
double dR = (max - min) / r_points;
double dI = (max - min) / i_points;
complex z;
z.real = 0.0;
z.imag = 0.0;
// calculating sizes
size_t size_input = array_size * sizeof(complex);
size_t size_output = array_size * sizeof(int);
// pointers
complex *h_input; // CPU
complex *d_input; // GPU
int *h_output; // CPU
int *d_output; // GPU
// allocating space in CPU
h_input = (complex *) malloc(size_input);
h_output = (int *) malloc(size_output);
// allocating space in GPU
cudaMalloc((void **) &d_input, size_input);
cudaMalloc((void **) &d_output, size_output);
// generating input
printf("Generating input...\n");
for (int i = 0; i < i_points; i++) {
for (int j = 0; j < r_points; j++) {
double real_part = min + dR * j;
double imag_part = max - dI * i;
h_input[i_points * i + j] = new_complex(real_part, imag_part);
}
}
// copying from CPU to GPU
cudaMemcpy(d_input, h_input, size_input, cudaMemcpyHostToDevice);
// executing kernels
t1 = time(NULL);
int n_threads = 16;
int n_blocks_r = r_points / n_threads;
int n_blocks_i = i_points / n_threads;
dim3 dimBlock(n_threads, n_threads);
dim3 dimGrid(n_blocks_r, n_blocks_i);
generate_mandelbrot<<<dimGrid, dimBlock>>>(d_input, d_output, z, i_points, MAX_ITER);
// waiting for threads
cudaThreadSynchronize();
checkCUDAError("kernel invocation");
// timing execution
t2 = time(NULL);
printf("Execution time: %f sec\n", difftime(t2, t1));
// copying back to CPU
cudaMemcpy(h_output, d_output, size_output, cudaMemcpyDeviceToHost);
checkCUDAError("memcpy");
// verifying on CPU
printf("Verifying result on CPU...\n");
int temp = 0;
for (int i = 0; i < array_size; i++) {
temp += h_output[i];
}
printf("The number of points outside is: %d\n", array_size - temp);
// generating pmg image
printf("Generating image...\n");
FILE *fp;
fp = fopen("mandelbrot-fractal-gpu.pgm", "w");
fputs("P2 \n", fp);
fprintf(fp, "%d %d \n", i_points, r_points);
fputs("1 \n", fp);
for (int i = 0; i < i_points; i++) {
for (int j = 0; j < r_points; j++) {
fprintf(fp, "%d ", h_output[i * i_points + j]);
}
fputs("\n", fp);
}
fclose(fp);
// freeing memory
printf("Freeing memory...\n");
free(h_input);
free(h_output);
cudaFree(d_input);
cudaFree(d_output);
printf("Done!\n");
return 0;
}
// Utility function to check for and report CUDA errors
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
} |
98c4ed6e4925338b5f3d2bfa8daf4c800a521572.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "betweenness_centrality.cuh"
//TODO: Return reference
void betweenness_centrality_setup(const device_graph &g, int start, int end, std::vector< std::vector<float> > &delta_h)
{
//For now, use "standard" grid/block sizes. These can be tuned later on.
dim3 dimGrid, dimBlock;
//Returns number of source vertices to store for verification purposes
size_t sources_to_store = configure_grid(dimGrid,dimBlock,start,end);
//Device pointers
int *d_d, *Q_d, *Q2_d, *S_d, *endpoints_d;
unsigned long long *sigma_d;
float *delta_d;
pitch p;
hipEvent_t start_event, end_event;
//Allocate algorithm-specific memory
start_clock(start_event,end_event);
checkCudaErrors(hipMallocPitch((void**)&d_d,&p.d,sizeof(int)*g.n,sources_to_store));
checkCudaErrors(hipMallocPitch((void**)&sigma_d,&p.sigma,sizeof(unsigned long long)*g.n,sources_to_store));
checkCudaErrors(hipMallocPitch((void**)&delta_d,&p.delta,sizeof(float)*g.n,sources_to_store));
checkCudaErrors(hipMallocPitch((void**)&Q_d,&p.Q,sizeof(int)*g.n,dimGrid.x));
checkCudaErrors(hipMallocPitch((void**)&Q2_d,&p.Q2,sizeof(int)*g.n,dimGrid.x));
checkCudaErrors(hipMallocPitch((void**)&S_d,&p.S,sizeof(int)*g.n,dimGrid.x));
checkCudaErrors(hipMallocPitch((void**)&endpoints_d,&p.endpoints,sizeof(int)*g.n,dimGrid.x));
thrust::device_vector<float> bc_d(g.n,0);
thrust::device_vector<int> dep_accum(1,0); //Signify which dependency accumulation method was chosen
//Memory specific to Load-Balancing Search
//int *edge_counts_d, *scanned_edges_d, *LBS_d;
//thrust::device_vector<int> edge_frontier_size_d(dimGrid.x,0);
//checkCudaErrors(hipMallocPitch((void**)&edge_counts_d,&p.edge_counts,sizeof(int)*g.n,dimGrid.x));
//checkCudaErrors(hipMallocPitch((void**)&scanned_edges_d,&p.scanned_edges,sizeof(int)*g.n,dimGrid.x));
//checkCudaErrors(hipMallocPitch((void**)&LBS_d,&p.LBS,sizeof(int)*g.m,dimGrid.x));
size_t GPU_memory_requirement = sizeof(int)*g.n*sources_to_store + 4*sizeof(int)*g.n*dimGrid.x + sizeof(int)*(g.n+1) + sizeof(int)*(g.m) + sizeof(unsigned long long)*g.n*sources_to_store + sizeof(float)*g.n*sources_to_store + sizeof(float)*g.n; //+ sizeof(int)*sources_to_store + 2*sizeof(int)*g.n*sources_to_store + sizeof(int)*g.m*sources_to_store;
std::cout << "BC memory requirement: " << GPU_memory_requirement/(1 << 20) << " MB" << std::endl;
//shuffle-based dep: 896; LBS: 512
hipLaunchKernelGGL(( betweenness_centrality), dim3(dimGrid),dim3(dimBlock), 0, 0, thrust::raw_pointer_cast(g.R.data()),thrust::raw_pointer_cast(g.C.data()),thrust::raw_pointer_cast(g.F.data()),g.n,g.m,d_d,sigma_d,delta_d,thrust::raw_pointer_cast(bc_d.data()),Q_d,Q2_d,S_d,endpoints_d,p,start,end,thrust::raw_pointer_cast(dep_accum.data()));//thrust::raw_pointer_cast(edge_frontier_size_d.data()),edge_counts_d,scanned_edges_d,LBS_d,p,start,end);
checkCudaErrors(hipPeekAtLastError());
//std::vector< std::vector<float> > delta_h;
transfer_result(g,delta_d,p.delta,sources_to_store,delta_h);
//Free algorithm-specific memory
/*checkCudaErrors(hipFree(LBS_d));
checkCudaErrors(hipFree(scanned_edges_d));
checkCudaErrors(hipFree(edge_counts_d));*/
checkCudaErrors(hipFree(endpoints_d));
checkCudaErrors(hipFree(S_d));
checkCudaErrors(hipFree(Q2_d));
checkCudaErrors(hipFree(Q_d));
checkCudaErrors(hipFree(delta_d));
checkCudaErrors(hipFree(sigma_d));
checkCudaErrors(hipFree(d_d));
float time = end_clock(start_event,end_event);
int diameter_est = dep_accum[0];
std::cout << "Estimated diameter: " << diameter_est << std::endl;
std::cout << "Threshold: " << 4*std::log2(g.n) << " (";
if(diameter_est < 4*std::log2(g.n))
{
std::cout << "Edge-parallel dependency accumulation chosen)" << std::endl;
}
else
{
std::cout << "Work-efficient dependency accumulation chosen)" << std::endl;
}
std::cout << "Time for shuffle-based BC: " << std::setprecision(9) << time << " s" << std::endl;
}
| 98c4ed6e4925338b5f3d2bfa8daf4c800a521572.cu | #include "betweenness_centrality.cuh"
//TODO: Return reference
void betweenness_centrality_setup(const device_graph &g, int start, int end, std::vector< std::vector<float> > &delta_h)
{
//For now, use "standard" grid/block sizes. These can be tuned later on.
dim3 dimGrid, dimBlock;
//Returns number of source vertices to store for verification purposes
size_t sources_to_store = configure_grid(dimGrid,dimBlock,start,end);
//Device pointers
int *d_d, *Q_d, *Q2_d, *S_d, *endpoints_d;
unsigned long long *sigma_d;
float *delta_d;
pitch p;
cudaEvent_t start_event, end_event;
//Allocate algorithm-specific memory
start_clock(start_event,end_event);
checkCudaErrors(cudaMallocPitch((void**)&d_d,&p.d,sizeof(int)*g.n,sources_to_store));
checkCudaErrors(cudaMallocPitch((void**)&sigma_d,&p.sigma,sizeof(unsigned long long)*g.n,sources_to_store));
checkCudaErrors(cudaMallocPitch((void**)&delta_d,&p.delta,sizeof(float)*g.n,sources_to_store));
checkCudaErrors(cudaMallocPitch((void**)&Q_d,&p.Q,sizeof(int)*g.n,dimGrid.x));
checkCudaErrors(cudaMallocPitch((void**)&Q2_d,&p.Q2,sizeof(int)*g.n,dimGrid.x));
checkCudaErrors(cudaMallocPitch((void**)&S_d,&p.S,sizeof(int)*g.n,dimGrid.x));
checkCudaErrors(cudaMallocPitch((void**)&endpoints_d,&p.endpoints,sizeof(int)*g.n,dimGrid.x));
thrust::device_vector<float> bc_d(g.n,0);
thrust::device_vector<int> dep_accum(1,0); //Signify which dependency accumulation method was chosen
//Memory specific to Load-Balancing Search
//int *edge_counts_d, *scanned_edges_d, *LBS_d;
//thrust::device_vector<int> edge_frontier_size_d(dimGrid.x,0);
//checkCudaErrors(cudaMallocPitch((void**)&edge_counts_d,&p.edge_counts,sizeof(int)*g.n,dimGrid.x));
//checkCudaErrors(cudaMallocPitch((void**)&scanned_edges_d,&p.scanned_edges,sizeof(int)*g.n,dimGrid.x));
//checkCudaErrors(cudaMallocPitch((void**)&LBS_d,&p.LBS,sizeof(int)*g.m,dimGrid.x));
size_t GPU_memory_requirement = sizeof(int)*g.n*sources_to_store + 4*sizeof(int)*g.n*dimGrid.x + sizeof(int)*(g.n+1) + sizeof(int)*(g.m) + sizeof(unsigned long long)*g.n*sources_to_store + sizeof(float)*g.n*sources_to_store + sizeof(float)*g.n; //+ sizeof(int)*sources_to_store + 2*sizeof(int)*g.n*sources_to_store + sizeof(int)*g.m*sources_to_store;
std::cout << "BC memory requirement: " << GPU_memory_requirement/(1 << 20) << " MB" << std::endl;
//shuffle-based dep: 896; LBS: 512
betweenness_centrality<<<dimGrid,dimBlock>>>(thrust::raw_pointer_cast(g.R.data()),thrust::raw_pointer_cast(g.C.data()),thrust::raw_pointer_cast(g.F.data()),g.n,g.m,d_d,sigma_d,delta_d,thrust::raw_pointer_cast(bc_d.data()),Q_d,Q2_d,S_d,endpoints_d,p,start,end,thrust::raw_pointer_cast(dep_accum.data()));//thrust::raw_pointer_cast(edge_frontier_size_d.data()),edge_counts_d,scanned_edges_d,LBS_d,p,start,end);
checkCudaErrors(cudaPeekAtLastError());
//std::vector< std::vector<float> > delta_h;
transfer_result(g,delta_d,p.delta,sources_to_store,delta_h);
//Free algorithm-specific memory
/*checkCudaErrors(cudaFree(LBS_d));
checkCudaErrors(cudaFree(scanned_edges_d));
checkCudaErrors(cudaFree(edge_counts_d));*/
checkCudaErrors(cudaFree(endpoints_d));
checkCudaErrors(cudaFree(S_d));
checkCudaErrors(cudaFree(Q2_d));
checkCudaErrors(cudaFree(Q_d));
checkCudaErrors(cudaFree(delta_d));
checkCudaErrors(cudaFree(sigma_d));
checkCudaErrors(cudaFree(d_d));
float time = end_clock(start_event,end_event);
int diameter_est = dep_accum[0];
std::cout << "Estimated diameter: " << diameter_est << std::endl;
std::cout << "Threshold: " << 4*std::log2(g.n) << " (";
if(diameter_est < 4*std::log2(g.n))
{
std::cout << "Edge-parallel dependency accumulation chosen)" << std::endl;
}
else
{
std::cout << "Work-efficient dependency accumulation chosen)" << std::endl;
}
std::cout << "Time for shuffle-based BC: " << std::setprecision(9) << time << " s" << std::endl;
}
|
6aec228e46401621ed240c5a5ba78579d594d443.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_mix16_1(int32_t * ip, uint32_t stride, int32_t * u, int32_t * v, int32_t numSamples, int32_t m2, int32_t mixbits, int32_t mixres)
{
int z = threadIdx.x + blockIdx.x * blockDim.x;
if (z < numSamples){
int32_t temp = ip[z];
int32_t l, r;
l = (int16_t)temp;
r = temp >> 16;
u[z] = (mixres * l + m2 * r) >> mixbits;
v[z] = l - r;
}
} | 6aec228e46401621ed240c5a5ba78579d594d443.cu | #include "includes.h"
__global__ void gpu_mix16_1(int32_t * ip, uint32_t stride, int32_t * u, int32_t * v, int32_t numSamples, int32_t m2, int32_t mixbits, int32_t mixres)
{
int z = threadIdx.x + blockIdx.x * blockDim.x;
if (z < numSamples){
int32_t temp = ip[z];
int32_t l, r;
l = (int16_t)temp;
r = temp >> 16;
u[z] = (mixres * l + m2 * r) >> mixbits;
v[z] = l - r;
}
} |
b51f6368c171565e38076091ddc8c9763c20e5a4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
// function for checking the CUDA runtime API results.
inline
void checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess)
{
printf_s("Error: %s : %d", __FILE__, __LINE__);
printf_s("CUDA Runtime Error: %d: %s\n", result, hipGetErrorString(result));
exit(1);
}
#endif
}
__global__ void fmad_kernel(double x, double y, double *out)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0)
{
*out = x * x + y;
}
}
double host_fmad_kernel(double x, double y)
{
return x * x + y;
}
int main(int argc, char **argv)
{
double *d_out, h_out;
double x = 2.891903;
double y = -3.980364;
double host_value = host_fmad_kernel(x, y);
checkCuda(hipMalloc((void **)&d_out, sizeof(double)));
fmad_kernel << <1, 32 >> >(x, y, d_out);
checkCuda(hipMemcpy(&h_out, d_out, sizeof(double),
hipMemcpyDeviceToHost));
if (host_value == h_out)
{
printf_s("The device output the same value as the host.\n");
}
else
{
printf_s("The device output a different value than the host, diff=%e.\n", abs(host_value - h_out));
}
return 0;
}
| b51f6368c171565e38076091ddc8c9763c20e5a4.cu | #include <stdio.h>
#include <stdlib.h>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
// function for checking the CUDA runtime API results.
inline
void checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess)
{
printf_s("Error: %s : %d", __FILE__, __LINE__);
printf_s("CUDA Runtime Error: %d: %s\n", result, cudaGetErrorString(result));
exit(1);
}
#endif
}
__global__ void fmad_kernel(double x, double y, double *out)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0)
{
*out = x * x + y;
}
}
double host_fmad_kernel(double x, double y)
{
return x * x + y;
}
int main(int argc, char **argv)
{
double *d_out, h_out;
double x = 2.891903;
double y = -3.980364;
double host_value = host_fmad_kernel(x, y);
checkCuda(cudaMalloc((void **)&d_out, sizeof(double)));
fmad_kernel << <1, 32 >> >(x, y, d_out);
checkCuda(cudaMemcpy(&h_out, d_out, sizeof(double),
cudaMemcpyDeviceToHost));
if (host_value == h_out)
{
printf_s("The device output the same value as the host.\n");
}
else
{
printf_s("The device output a different value than the host, diff=%e.\n", abs(host_value - h_out));
}
return 0;
}
|
996d8be43a4199b15f27bd04512dd8556bf0bf18.hip | // !!! This is a file automatically generated by hipify!!!
#include "base.h"
#include <math.h>
#include <hip/hip_runtime.h>
typedef float (*PF_HANDLE)(float, const void*);
static __device__ float
cu_sigmoid(float x) {
static float overflow = 20.0;
if (x > overflow) x = overflow;
if (x < -overflow) x = -overflow;
return 1.0 / (1.0 + exp(-x));
}
static __device__ float
cu_z(float val, const void *params) {
return cu_sigmoid(val) - ((float*)params)[blockIdx.x];
}
__device__ PF_HANDLE pf_handles[] = {NULL, cu_z};
static __device__ void
reduce(float val, unsigned short pf_idx, const void *params, float *out) {
extern __shared__ float sums[];
unsigned int s;
if (blockDim.x > 32) {
s = 16;
}
else {
s = blockDim.x >> 1;
}
for (; s > 0; s >>= 1) {
val += __shfl_down_sync(0xFFFFFFFF, val, s);
}
if (0 == (threadIdx.x & 0x1f)) {
unsigned int offset = blockIdx.x * WARP_COUNT;
sums[offset + (threadIdx.x >> 5)] = val;
__syncthreads();
if (0 == threadIdx.x) {
val = 0;
for (s = 0; s < WARP_COUNT; ++s) {
val += sums[offset + s];
}
if (1 == gridDim.y && pf_idx > 0) {
val = pf_handles[pf_idx](val, params);
}
out[blockIdx.x * gridDim.y + blockIdx.y] = val;
}
}
}
static __global__ void
reduce_weighted_sum(const float *in, const float *weights, unsigned short pf_idx, const void *params, float *out, unsigned int count) {
float val = 0;
unsigned int idx = (blockIdx.y << 1) * blockDim.x + threadIdx.x;
if (idx < count) {
unsigned int offset = blockIdx.x * count;
val = in[offset + idx] * weights[idx];
idx += blockDim.x;
if (idx < count) {
val += in[offset + idx] * weights[idx];
}
}
reduce(val, pf_idx, params, out);
}
static __global__ void
reduce_sum(const float *in, unsigned short pf_idx, const void *params, float *out, unsigned int count) {
float val = 0;
unsigned int idx = (blockIdx.y << 1) * blockDim.x + threadIdx.x;
if (idx < count) {
unsigned int offset = blockIdx.x * count;
val = in[offset + idx];
idx += blockDim.x;
if (idx < count) {
val += in[offset + idx];
}
}
reduce(val, pf_idx, params, out);
}
static __global__ void
cu_delta_weights(const float *in, const float *data, float *delta_weights, unsigned int task_batch, unsigned int feature_size, unsigned int data_size, float gama) {
unsigned int feature_idx = blockIdx.y * blockDim.x + threadIdx.x;
if (feature_idx >= feature_size) {
return;
}
float val = 0;
unsigned int data_idx = blockIdx.x * task_batch;
unsigned int i;
for (i = 0; i < task_batch; ++i) {
if (data_idx >= data_size) {
break;
}
val += in[data_idx] * data[data_idx * feature_size + feature_idx];
++data_idx;
}
val /= i;
atomicAdd(&delta_weights[feature_idx], gama * val);
}
static __global__ void
cu_adjust_weights(float *weights, float *delta_weights, unsigned int batch_count, unsigned int feature_size, float *norm) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= feature_size) {
return;
}
float val = delta_weights[idx] / batch_count;
weights[idx] -= val;
atomicAdd(norm, val * val);
}
extern "C" {
float
gpu_task(TASK_ARG *parg) {
dim3 block(BLOCK_SIZE), grid;
unsigned short pf_idx = 1;
unsigned int rec_count;
for (size_t i = 0; i < parg->parg_train->data_size; i += rec_count) {
rec_count = parg->parg_train->data_size - i;
if (rec_count > parg->max_rec_count) {
rec_count = parg->max_rec_count;
}
float *d_in = &parg->d_data[i * parg->parg_train->feature_size];
float *d_labels = &parg->d_labels[i];
unsigned int count = parg->parg_train->feature_size;
size_t shared_size = sizeof(float) * WARP_COUNT * rec_count;
grid.x = rec_count;
unsigned int block_count = (parg->block_count + 1) >> 1;
if (1 == block_count) {
grid.y = block_count;
hipLaunchKernelGGL(( reduce_weighted_sum), dim3(grid), dim3(block), shared_size, 0, d_in, parg->d_weights, pf_idx, d_labels, &parg->d_out[i], count);
continue;
}
bool b_first = true;
float *d_tmp = parg->d_tmp;
while (count > 1) {
grid.y = block_count;
if (1 == block_count) {
hipLaunchKernelGGL(( reduce_sum), dim3(grid), dim3(block), shared_size, 0, d_in, pf_idx, d_labels, &parg->d_out[i], count);
count = 1;
}
else {
if (b_first) {
hipLaunchKernelGGL(( reduce_weighted_sum), dim3(grid), dim3(block), shared_size, 0, d_in, parg->d_weights, pf_idx, d_labels, d_tmp, count);
b_first = false;
}
else {
hipLaunchKernelGGL(( reduce_sum), dim3(grid), dim3(block), shared_size, 0, d_in, pf_idx, d_labels, d_tmp, count);
}
count = block_count;
block_count = (block_count + BLOCK_SIZE - 1) / BLOCK_SIZE;
block_count = (block_count + 1) >> 1;
d_in = d_tmp;
if (parg->d_tmp == d_in) {
d_tmp = &d_in[count * rec_count];
}
else {
d_tmp = parg->d_tmp;
}
}
}
}
unsigned int batch_count = (parg->parg_train->data_size + parg->task_batch - 1) / parg->task_batch;
grid.x = batch_count;
grid.y = parg->block_count;
hipMemset(parg->d_delta_weights, 0, sizeof(float) * parg->parg_train->feature_size);
hipLaunchKernelGGL(( cu_delta_weights), dim3(grid), dim3(block), 0, 0, parg->d_out, parg->d_data, parg->d_delta_weights, parg->task_batch, parg->parg_train->feature_size, parg->parg_train->data_size, parg->parg_train->gama);
grid.x = parg->block_count;
grid.y = 1;
hipMemset(parg->d_norm, 0, sizeof(float));
hipLaunchKernelGGL(( cu_adjust_weights), dim3(grid), dim3(block), 0, 0, parg->d_weights, parg->d_delta_weights, batch_count, parg->parg_train->feature_size, parg->d_norm);
float norm;
hipMemcpy(&norm, parg->d_norm, sizeof(float), hipMemcpyDeviceToHost);
return sqrt(norm);
}
}
| 996d8be43a4199b15f27bd04512dd8556bf0bf18.cu | #include "base.h"
#include <math.h>
#include <cuda_runtime.h>
typedef float (*PF_HANDLE)(float, const void*);
static __device__ float
cu_sigmoid(float x) {
static float overflow = 20.0;
if (x > overflow) x = overflow;
if (x < -overflow) x = -overflow;
return 1.0 / (1.0 + exp(-x));
}
static __device__ float
cu_z(float val, const void *params) {
return cu_sigmoid(val) - ((float*)params)[blockIdx.x];
}
__device__ PF_HANDLE pf_handles[] = {NULL, cu_z};
static __device__ void
reduce(float val, unsigned short pf_idx, const void *params, float *out) {
extern __shared__ float sums[];
unsigned int s;
if (blockDim.x > 32) {
s = 16;
}
else {
s = blockDim.x >> 1;
}
for (; s > 0; s >>= 1) {
val += __shfl_down_sync(0xFFFFFFFF, val, s);
}
if (0 == (threadIdx.x & 0x1f)) {
unsigned int offset = blockIdx.x * WARP_COUNT;
sums[offset + (threadIdx.x >> 5)] = val;
__syncthreads();
if (0 == threadIdx.x) {
val = 0;
for (s = 0; s < WARP_COUNT; ++s) {
val += sums[offset + s];
}
if (1 == gridDim.y && pf_idx > 0) {
val = pf_handles[pf_idx](val, params);
}
out[blockIdx.x * gridDim.y + blockIdx.y] = val;
}
}
}
static __global__ void
reduce_weighted_sum(const float *in, const float *weights, unsigned short pf_idx, const void *params, float *out, unsigned int count) {
float val = 0;
unsigned int idx = (blockIdx.y << 1) * blockDim.x + threadIdx.x;
if (idx < count) {
unsigned int offset = blockIdx.x * count;
val = in[offset + idx] * weights[idx];
idx += blockDim.x;
if (idx < count) {
val += in[offset + idx] * weights[idx];
}
}
reduce(val, pf_idx, params, out);
}
static __global__ void
reduce_sum(const float *in, unsigned short pf_idx, const void *params, float *out, unsigned int count) {
float val = 0;
unsigned int idx = (blockIdx.y << 1) * blockDim.x + threadIdx.x;
if (idx < count) {
unsigned int offset = blockIdx.x * count;
val = in[offset + idx];
idx += blockDim.x;
if (idx < count) {
val += in[offset + idx];
}
}
reduce(val, pf_idx, params, out);
}
static __global__ void
cu_delta_weights(const float *in, const float *data, float *delta_weights, unsigned int task_batch, unsigned int feature_size, unsigned int data_size, float gama) {
unsigned int feature_idx = blockIdx.y * blockDim.x + threadIdx.x;
if (feature_idx >= feature_size) {
return;
}
float val = 0;
unsigned int data_idx = blockIdx.x * task_batch;
unsigned int i;
for (i = 0; i < task_batch; ++i) {
if (data_idx >= data_size) {
break;
}
val += in[data_idx] * data[data_idx * feature_size + feature_idx];
++data_idx;
}
val /= i;
atomicAdd(&delta_weights[feature_idx], gama * val);
}
static __global__ void
cu_adjust_weights(float *weights, float *delta_weights, unsigned int batch_count, unsigned int feature_size, float *norm) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= feature_size) {
return;
}
float val = delta_weights[idx] / batch_count;
weights[idx] -= val;
atomicAdd(norm, val * val);
}
extern "C" {
float
gpu_task(TASK_ARG *parg) {
dim3 block(BLOCK_SIZE), grid;
unsigned short pf_idx = 1;
unsigned int rec_count;
for (size_t i = 0; i < parg->parg_train->data_size; i += rec_count) {
rec_count = parg->parg_train->data_size - i;
if (rec_count > parg->max_rec_count) {
rec_count = parg->max_rec_count;
}
float *d_in = &parg->d_data[i * parg->parg_train->feature_size];
float *d_labels = &parg->d_labels[i];
unsigned int count = parg->parg_train->feature_size;
size_t shared_size = sizeof(float) * WARP_COUNT * rec_count;
grid.x = rec_count;
unsigned int block_count = (parg->block_count + 1) >> 1;
if (1 == block_count) {
grid.y = block_count;
reduce_weighted_sum<<<grid, block, shared_size>>>(d_in, parg->d_weights, pf_idx, d_labels, &parg->d_out[i], count);
continue;
}
bool b_first = true;
float *d_tmp = parg->d_tmp;
while (count > 1) {
grid.y = block_count;
if (1 == block_count) {
reduce_sum<<<grid, block, shared_size>>>(d_in, pf_idx, d_labels, &parg->d_out[i], count);
count = 1;
}
else {
if (b_first) {
reduce_weighted_sum<<<grid, block, shared_size>>>(d_in, parg->d_weights, pf_idx, d_labels, d_tmp, count);
b_first = false;
}
else {
reduce_sum<<<grid, block, shared_size>>>(d_in, pf_idx, d_labels, d_tmp, count);
}
count = block_count;
block_count = (block_count + BLOCK_SIZE - 1) / BLOCK_SIZE;
block_count = (block_count + 1) >> 1;
d_in = d_tmp;
if (parg->d_tmp == d_in) {
d_tmp = &d_in[count * rec_count];
}
else {
d_tmp = parg->d_tmp;
}
}
}
}
unsigned int batch_count = (parg->parg_train->data_size + parg->task_batch - 1) / parg->task_batch;
grid.x = batch_count;
grid.y = parg->block_count;
cudaMemset(parg->d_delta_weights, 0, sizeof(float) * parg->parg_train->feature_size);
cu_delta_weights<<<grid, block>>>(parg->d_out, parg->d_data, parg->d_delta_weights, parg->task_batch, parg->parg_train->feature_size, parg->parg_train->data_size, parg->parg_train->gama);
grid.x = parg->block_count;
grid.y = 1;
cudaMemset(parg->d_norm, 0, sizeof(float));
cu_adjust_weights<<<grid, block>>>(parg->d_weights, parg->d_delta_weights, batch_count, parg->parg_train->feature_size, parg->d_norm);
float norm;
cudaMemcpy(&norm, parg->d_norm, sizeof(float), cudaMemcpyDeviceToHost);
return sqrt(norm);
}
}
|
86b324f947ebcf626b2ccfb8d05235e7cca2c7c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#define THREADS_PER_BLOCK 512
#define N (2048*2048)
__global__ void dot(int *a, int *b, int *c){
__shared__ int temp[THREADS_PER_BLOCK];
int index = threadIdx.x + blockIdx.x * blockDim.x;
temp[threadIdx.x] = a[index] * b[index];
__syncthreads();
//Thread 0 sums the pairwise products
// Only the threads 0 in each block can acces to the shared memory
if ( 0 == threadIdx.x) {
int sum = 0;
for( int i =0; i < THREADS_PER_BLOCK ; i++)
sum +=temp[i];
atomicAdd(c, sum);
}
}
int main( void ){
int *a,*b,*c; // host copies of a, b and c
int *dev_a,*dev_b, *dev_c; // device copies of a, b and c
int size = N * sizeof(int); // we need space for an integer
//allocate device copies of a, b , c
hipMalloc((void**) &dev_a, size);
hipMalloc((void**) &dev_b, size);
hipMalloc((void**) &dev_c, sizeof(int));
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(sizeof(int));
for (int i= 0; i < N ; i++){
a[i]=i;
b[i]=i*2;
}
//copy inputs to device (GPU)
hipMemcpy(dev_a, a, size , hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice);
// launch add() kernel on GPU, passing parameters
hipLaunchKernelGGL(( dot), dim3(N / THREADS_PER_BLOCK) , dim3(THREADS_PER_BLOCK) , 0, 0, dev_a,dev_b,dev_c);
//copy device result back to host copy of c
hipMemcpy(c, dev_c, sizeof(int), hipMemcpyDeviceToHost);
printf("The sum is : %d\n", *c);
free(a);
free(b);
free(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| 86b324f947ebcf626b2ccfb8d05235e7cca2c7c4.cu | #include "stdio.h"
#define THREADS_PER_BLOCK 512
#define N (2048*2048)
__global__ void dot(int *a, int *b, int *c){
__shared__ int temp[THREADS_PER_BLOCK];
int index = threadIdx.x + blockIdx.x * blockDim.x;
temp[threadIdx.x] = a[index] * b[index];
__syncthreads();
//Thread 0 sums the pairwise products
// Only the threads 0 in each block can acces to the shared memory
if ( 0 == threadIdx.x) {
int sum = 0;
for( int i =0; i < THREADS_PER_BLOCK ; i++)
sum +=temp[i];
atomicAdd(c, sum);
}
}
int main( void ){
int *a,*b,*c; // host copies of a, b and c
int *dev_a,*dev_b, *dev_c; // device copies of a, b and c
int size = N * sizeof(int); // we need space for an integer
//allocate device copies of a, b , c
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b, size);
cudaMalloc((void**) &dev_c, sizeof(int));
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(sizeof(int));
for (int i= 0; i < N ; i++){
a[i]=i;
b[i]=i*2;
}
//copy inputs to device (GPU)
cudaMemcpy(dev_a, a, size , cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
// launch add() kernel on GPU, passing parameters
dot<<< N / THREADS_PER_BLOCK , THREADS_PER_BLOCK >>> (dev_a,dev_b,dev_c);
//copy device result back to host copy of c
cudaMemcpy(c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("The sum is : %d\n", *c);
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
b603db013e9139083a43c1dc26040df5d697184c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// SDSC Summer Institute 2017
// Andreas Goetz ([email protected])
// CUDA program that performs 1D stencil operation in parallel on the GPU
//
// /* FIXME */ COMMENTS REQUIRE ATTENTION
#include<stdio.h>
// define vector length, stencil radius,
#define N (1024*1024*8l)
#define RADIUS 3
#define GRIDSIZE 128
#define BLOCKSIZE 256
// -------------------------------------------------------
// CUDA device function that performs 1D stencil operation
// -------------------------------------------------------
__global__ void stencil_1D(int *in, int *out, long dim){
long gindex = threadIdx.x + blockDim.x * blockIdx.x;
int stride = gridDim.x * blockDim.x;
// Go through all data
// Step all threads in a block to avoid synchronization problem
while ( gindex < (dim + blockDim.x) ) {
/* FIXME - CAN WE USE SHARED MEMORY? */
// Apply the stencil
int result = 0;
for (int offset = -RADIUS; offset <= RADIUS; offset++) {
if ( gindex + offset < dim && gindex + offset > -1)
result += in[gindex + offset];
}
// Store the result
if (gindex < dim)
out[gindex] = result;
// Update global index and quit if we are done
gindex += stride;
__syncthreads();
}
}
// ------------
// main program
// ------------
int main(void){
int *h_in, *h_out;
int *d_in, *d_out;
long size = N * sizeof(int);
int i, j, ij, result, err;
// allocate host memory
h_in = new int[N];
h_out = new int[N];
// initialize vector
for (i=0; i<N; i++){
// h_in[i] = i+1;
h_in[i] = 1;
}
// allocate device memory
hipMalloc((void **)&d_in, size);
hipMalloc((void **)&d_out, size);
// copy input data to device
hipMemcpy(d_in, h_in, size, hipMemcpyHostToDevice);
// Apply stencil by launching a sufficient number of blocks
printf("\n---------------------------\n");
printf("Launching 1D stencil kernel\n");
printf("---------------------------\n");
printf("Vector length = %ld (%ld MB)\n",N,N*4/1024/1024);
printf("Stencil radius = %d\n",RADIUS);
printf("Blocks = %d\n",GRIDSIZE);
printf("Threads per block = %d\n",BLOCKSIZE);
printf("Total threads = %d\n",GRIDSIZE*BLOCKSIZE);
hipLaunchKernelGGL(( stencil_1D), dim3(GRIDSIZE),dim3(BLOCKSIZE), 0, 0, d_in, d_out, N);
// copy results back to host
hipMemcpy(h_out, d_out, size, hipMemcpyDeviceToHost);
// deallocate device memory
hipFree(d_in);
hipFree(d_out);
// check results
err = 0;
for (i=0; i<N; i++){
result = 0;
for (j=-RADIUS; j<=RADIUS; j++){
ij = i+j;
if (ij>=0 && ij<N)
result += h_in[ij];
}
if (h_out[i] != result) {
err++;
// printf("h_out[%d]=%d\n",i,h_out[i]);
}
}
if (err != 0){
printf("\n Error, %d elements do not match!\n\n", err);
} else {
printf("\n Success! All elements match CPU result.\n\n");
}
// deallocate host memory
free(h_in);
free(h_out);
return 0;
}
| b603db013e9139083a43c1dc26040df5d697184c.cu | // SDSC Summer Institute 2017
// Andreas Goetz ([email protected])
// CUDA program that performs 1D stencil operation in parallel on the GPU
//
// /* FIXME */ COMMENTS REQUIRE ATTENTION
#include<stdio.h>
// define vector length, stencil radius,
#define N (1024*1024*8l)
#define RADIUS 3
#define GRIDSIZE 128
#define BLOCKSIZE 256
// -------------------------------------------------------
// CUDA device function that performs 1D stencil operation
// -------------------------------------------------------
__global__ void stencil_1D(int *in, int *out, long dim){
long gindex = threadIdx.x + blockDim.x * blockIdx.x;
int stride = gridDim.x * blockDim.x;
// Go through all data
// Step all threads in a block to avoid synchronization problem
while ( gindex < (dim + blockDim.x) ) {
/* FIXME - CAN WE USE SHARED MEMORY? */
// Apply the stencil
int result = 0;
for (int offset = -RADIUS; offset <= RADIUS; offset++) {
if ( gindex + offset < dim && gindex + offset > -1)
result += in[gindex + offset];
}
// Store the result
if (gindex < dim)
out[gindex] = result;
// Update global index and quit if we are done
gindex += stride;
__syncthreads();
}
}
// ------------
// main program
// ------------
int main(void){
int *h_in, *h_out;
int *d_in, *d_out;
long size = N * sizeof(int);
int i, j, ij, result, err;
// allocate host memory
h_in = new int[N];
h_out = new int[N];
// initialize vector
for (i=0; i<N; i++){
// h_in[i] = i+1;
h_in[i] = 1;
}
// allocate device memory
cudaMalloc((void **)&d_in, size);
cudaMalloc((void **)&d_out, size);
// copy input data to device
cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice);
// Apply stencil by launching a sufficient number of blocks
printf("\n---------------------------\n");
printf("Launching 1D stencil kernel\n");
printf("---------------------------\n");
printf("Vector length = %ld (%ld MB)\n",N,N*4/1024/1024);
printf("Stencil radius = %d\n",RADIUS);
printf("Blocks = %d\n",GRIDSIZE);
printf("Threads per block = %d\n",BLOCKSIZE);
printf("Total threads = %d\n",GRIDSIZE*BLOCKSIZE);
stencil_1D<<<GRIDSIZE,BLOCKSIZE>>>(d_in, d_out, N);
// copy results back to host
cudaMemcpy(h_out, d_out, size, cudaMemcpyDeviceToHost);
// deallocate device memory
cudaFree(d_in);
cudaFree(d_out);
// check results
err = 0;
for (i=0; i<N; i++){
result = 0;
for (j=-RADIUS; j<=RADIUS; j++){
ij = i+j;
if (ij>=0 && ij<N)
result += h_in[ij];
}
if (h_out[i] != result) {
err++;
// printf("h_out[%d]=%d\n",i,h_out[i]);
}
}
if (err != 0){
printf("\n Error, %d elements do not match!\n\n", err);
} else {
printf("\n Success! All elements match CPU result.\n\n");
}
// deallocate host memory
free(h_in);
free(h_out);
return 0;
}
|
9a1e4e892abacdd31a94ef405546f54bfc0cf20d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Groute: An Asynchronous Multi-GPU Programming Framework
// http://www.github.com/groute/groute
// Copyright (c) 2017, A. Barak
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the names of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <gflags/gflags.h>
#include <groute/event_pool.h>
#include <groute/fused_distributed_worklist.h>
#include <groute/fused_worker.h>
#include <groute/cta_work.h>
#include <groute/graphs/csr_graph.h>
#include <groute/graphs/traversal_algo.h>
#include <groute/graphs/fused_solver.h>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/stopwatch.h>
#include <utils/markers.h>
#include "bfs_common.h"
DECLARE_int32(source_node);
DEFINE_bool(exitonerror, false, "exit on error");
namespace bfs {
namespace opt {
const level_t INF = UINT_MAX;
struct LevelData
{
index_t node;
level_t level;
__device__ __host__ __forceinline__ LevelData(index_t node, level_t level) : node(node), level(level) { }
__device__ __host__ __forceinline__ LevelData() : node(INF), level(INF) { }
};
typedef index_t local_work_t;
typedef LevelData remote_work_t;
__global__ void BFSInit(level_t* levels, int nnodes)
{
int tid = GTID;
if (tid < nnodes)
{
levels[tid] = INF;
}
}
template<
typename TGraph,
typename TGraphDatum>
struct BFSWorkNP
{
template<typename WorkSource>
__device__ static void work(
const WorkSource& work_source,
groute::dev::CircularWorklist<local_work_t>& rwl_in,
groute::dev::CircularWorklist<remote_work_t>& rwl_out,
const TGraph& graph, TGraphDatum& levels_datum
)
{
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads)
{
groute::dev::np_local<level_t> np_local = { 0, 0, 0 };
if (i < work_size)
{
index_t node = work_source.get_work(i);
np_local.start = graph.begin_edge(node);
np_local.size = graph.end_edge(node) - np_local.start;
np_local.meta_data = levels_datum.get_item(node) + 1;
}
groute::dev::CTAWorkScheduler<level_t>::template schedule(
np_local,
[&graph, &levels_datum, &rwl_in, &rwl_out](index_t edge, level_t next_level)
{
index_t dest = graph.edge_dest(edge);
if (next_level < atomicMin(levels_datum.get_item_ptr(dest), next_level))
{
int is_owned = graph.owns(dest);
// TODO: move ballot logic to a device structure
int owned_mask = __ballot(is_owned ? 1 : 0);
int remote_mask = __ballot(is_owned ? 0 : 1);
if (is_owned)
{
int high_leader = __ffs(owned_mask) - 1;
int thread_offset = __popc(owned_mask & ((1 << lane_id()) - 1));
rwl_in.prepend_warp(dest, high_leader, __popc(owned_mask), thread_offset);
}
else
{
int low_leader = __ffs(remote_mask) - 1;
int thread_offset = __popc(remote_mask & ((1 << lane_id()) - 1));
rwl_out.append_warp(LevelData(dest, next_level), low_leader, __popc(remote_mask), thread_offset);
}
}
}
);
}
}
};
template<
typename TGraph,
typename TGraphDatum>
struct BFSWork
{
template<typename WorkSource>
__device__ static void work(
const WorkSource& work_source,
groute::dev::CircularWorklist<local_work_t>& rwl_in,
groute::dev::CircularWorklist<remote_work_t>& rwl_out,
const TGraph& graph, TGraphDatum& levels_datum
)
{
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads)
{
index_t node = work_source.get_work(i);
level_t next_level = levels_datum.get_item(node) + 1;
for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge)
{
index_t dest = graph.edge_dest(edge);
if (next_level < atomicMin(levels_datum.get_item_ptr(dest), next_level))
{
int is_owned = graph.owns(dest);
// TODO: move ballot logic to a device structure
int owned_mask = __ballot(is_owned ? 1 : 0);
int remote_mask = __ballot(is_owned ? 0 : 1);
if (is_owned)
{
int high_leader = __ffs(owned_mask) - 1;
int thread_offset = __popc(owned_mask & ((1 << lane_id()) - 1));
rwl_in.prepend_warp(dest, high_leader, __popc(owned_mask), thread_offset);
}
else
{
int low_leader = __ffs(remote_mask) - 1;
int thread_offset = __popc(remote_mask & ((1 << lane_id()) - 1));
rwl_out.append_warp(LevelData(dest, next_level), low_leader, __popc(remote_mask), thread_offset);
}
}
}
}
}
};
struct SplitOps
{
private:
groute::graphs::dev::CSRGraphSeg m_graph_seg;
groute::graphs::dev::GraphDatum<level_t> m_levels_datum;
public:
template<typename...UnusedData>
SplitOps(const groute::graphs::dev::CSRGraphSeg& graph_seg, const groute::graphs::dev::GraphDatum<level_t>& levels_datum, UnusedData&... data)
: m_graph_seg(graph_seg), m_levels_datum(levels_datum)
{
}
__device__ __forceinline__ groute::opt::SplitFlags on_receive(const remote_work_t& work)
{
if (m_graph_seg.owns(work.node))
{
return (work.level < atomicMin(m_levels_datum.get_item_ptr(work.node), work.level))
? groute::opt::SF_Take
: groute::opt::SF_None; // filter
}
return groute::opt::SF_Pass;
}
__device__ __forceinline__ bool is_high_prio(const local_work_t& work, const level_t& global_prio)
{
return m_levels_datum[work] <= global_prio;
}
__device__ __forceinline__ groute::opt::SplitFlags on_send(local_work_t work)
{
return (m_graph_seg.owns(work))
? groute::opt::SF_Take
: groute::opt::SF_Pass;
}
__device__ __forceinline__ remote_work_t pack(local_work_t work)
{
return LevelData(work, m_levels_datum.get_item(work));
}
__device__ __forceinline__ local_work_t unpack(const remote_work_t& work)
{
return work.node;
}
};
template<typename TGraph, typename TGraphDatum>
struct FusedProblem
{
TGraph m_graph;
TGraphDatum m_levels_datum;
typedef BFSWork<TGraph, TGraphDatum> WorkType;
typedef BFSWorkNP<TGraph, TGraphDatum> WorkTypeNP;
public:
FusedProblem(const TGraph& graph, const TGraphDatum& levels_datum) :
m_graph(graph), m_levels_datum(levels_datum)
{
}
// Called before a global CPU+GPU barrier
void Init(groute::Stream& stream) const
{
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_levels_datum.size);
hipLaunchKernelGGL(( BFSInit) , dim3(grid_dims), dim3(block_dims), 0, stream.cuda_stream ,
m_levels_datum.data_ptr, m_levels_datum.size);
}
bool DoFusedInit(groute::Worklist<local_work_t>* lwl_high, groute::Worklist<local_work_t>* lwl_low,
groute::CircularWorklist<local_work_t>* rwl_in, groute::CircularWorklist<remote_work_t>* rwl_out,
int fused_chunk_size, level_t global_prio,
volatile int *high_work_counter, volatile int *low_work_counter,
uint32_t *kernel_internal_counter, volatile int *send_signal_ptr,
cub::GridBarrierLifetime& barrier_lifetime,
dim3 grid_dims, dim3 block_dims, groute::Stream& stream)
{
return false; // no work was done here
}
void DoFusedWork(groute::Worklist<local_work_t>* lwl_high, groute::Worklist<local_work_t>* lwl_low,
groute::CircularWorklist<local_work_t>* rwl_in, groute::CircularWorklist<remote_work_t>* rwl_out,
int fused_chunk_size, level_t global_prio,
volatile int *high_work_counter, volatile int *low_work_counter,
uint32_t *kernel_internal_counter, volatile int *send_signal_ptr,
cub::GridBarrierLifetime& barrier_lifetime,
dim3 grid_dims, dim3 block_dims, groute::Stream& stream)
{
if (FLAGS_iteration_fusion)
{
if (FLAGS_cta_np)
{
hipLaunchKernelGGL(( groute::FusedWork <
groute::NeverStop, local_work_t, remote_work_t, level_t, SplitOps,
WorkTypeNP,
TGraph, TGraphDatum >)
, dim3(grid_dims), dim3(block_dims), 0, stream.cuda_stream ,
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
bfs::opt::SplitOps(m_graph, m_levels_datum),
m_graph, m_levels_datum
);
}
else
{
hipLaunchKernelGGL(( groute::FusedWork <
groute::NeverStop, local_work_t, remote_work_t, level_t, SplitOps,
WorkType,
TGraph, TGraphDatum >)
, dim3(grid_dims), dim3(block_dims), 0, stream.cuda_stream ,
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
bfs::opt::SplitOps(m_graph, m_levels_datum),
m_graph, m_levels_datum
);
}
}
else
{
if (FLAGS_cta_np)
{
hipLaunchKernelGGL(( groute::FusedWork <
groute::RunNTimes<1>, local_work_t, remote_work_t, level_t, SplitOps,
WorkTypeNP,
TGraph, TGraphDatum >)
, dim3(grid_dims), dim3(block_dims), 0, stream.cuda_stream ,
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
bfs::opt::SplitOps(m_graph, m_levels_datum),
m_graph, m_levels_datum
);
}
else
{
groute::FusedWork <
groute::RunNTimes<1>, local_work_t, remote_work_t, level_t, SplitOps,
WorkType,
TGraph, TGraphDatum >
<< < grid_dims, block_dims, 0, stream.cuda_stream >> > (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
bfs::opt::SplitOps(m_graph, m_levels_datum),
m_graph, m_levels_datum
);
}
}
}
};
struct Algo
{
static const char* NameLower() { return "bfs"; }
static const char* Name() { return "BFS"; }
static void Init(
groute::graphs::traversal::Context<bfs::opt::Algo>& context,
groute::graphs::multi::CSRGraphAllocator& graph_manager,
groute::router::Router<remote_work_t>& worklist_router,
groute::opt::DistributedWorklist<local_work_t, remote_work_t, bfs::opt::SplitOps>& distributed_worklist)
{
index_t source_node = ::min((index_t)::max(0, FLAGS_source_node), context.host_graph.nnodes - 1);
auto partitioner = graph_manager.GetGraphPartitioner();
if (partitioner->NeedsReverseLookup())
{
source_node = partitioner->GetReverseLookupFunc()(source_node);
}
// Report the initial work
distributed_worklist.ReportHighPrioWork(1, 0, "Host", groute::Device::Host, true);
std::vector<remote_work_t> initial_work;
initial_work.push_back(remote_work_t(source_node, 0));
groute::router::ISender<remote_work_t>* work_sender = worklist_router.GetSender(groute::Device::Host);
work_sender->Send(
groute::Segment<remote_work_t>(&initial_work[0], 1), groute::Event());
work_sender->Shutdown();
}
template<typename TGraphAllocator, typename TGraphDatum, typename...UnusedData>
static std::vector<level_t> Gather(TGraphAllocator& graph_allocator, TGraphDatum& levels_datum, UnusedData&... data)
{
graph_allocator.GatherDatum(levels_datum);
return levels_datum.GetHostData();
}
template<typename...UnusedData>
static std::vector<level_t> Host(groute::graphs::host::CSRGraph& graph, UnusedData&... data)
{
return BFSHost(graph, ::min((index_t)::max(0, FLAGS_source_node), graph.nnodes - 1));
}
static int Output(const char *file, const std::vector<level_t>& levels)
{
return BFSOutput(file, levels);
}
static int CheckErrors(const std::vector<level_t>& levels, const std::vector<level_t>& regression)
{
return BFSCheckErrors(levels, regression);
}
};
}
}
bool TestBFSAsyncMultiOptimized(int ngpus)
{
typedef bfs::opt::FusedProblem<groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatum<level_t>> ProblemType;
typedef groute::graphs::traversal::FusedSolver<
bfs::opt::Algo, ProblemType,
bfs::opt::local_work_t , bfs::opt::remote_work_t, level_t,
bfs::opt::SplitOps,
groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatum<level_t>> SolverType;
groute::graphs::traversal::__MultiRunner__Opt__ <
bfs::opt::Algo,
ProblemType,
SolverType,
bfs::opt::SplitOps,
bfs::opt::local_work_t,
bfs::opt::remote_work_t,
groute::graphs::multi::NodeOutputGlobalDatum<level_t> > runner;
groute::graphs::multi::NodeOutputGlobalDatum<level_t> levels_datum;
bool retval = runner(ngpus, levels_datum);
if(FLAGS_exitonerror && !retval)
exit(100);
return retval;
}
| 9a1e4e892abacdd31a94ef405546f54bfc0cf20d.cu | // Groute: An Asynchronous Multi-GPU Programming Framework
// http://www.github.com/groute/groute
// Copyright (c) 2017, A. Barak
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the names of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <gflags/gflags.h>
#include <groute/event_pool.h>
#include <groute/fused_distributed_worklist.h>
#include <groute/fused_worker.h>
#include <groute/cta_work.h>
#include <groute/graphs/csr_graph.h>
#include <groute/graphs/traversal_algo.h>
#include <groute/graphs/fused_solver.h>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/stopwatch.h>
#include <utils/markers.h>
#include "bfs_common.h"
DECLARE_int32(source_node);
DEFINE_bool(exitonerror, false, "exit on error");
namespace bfs {
namespace opt {
const level_t INF = UINT_MAX;
struct LevelData
{
index_t node;
level_t level;
__device__ __host__ __forceinline__ LevelData(index_t node, level_t level) : node(node), level(level) { }
__device__ __host__ __forceinline__ LevelData() : node(INF), level(INF) { }
};
typedef index_t local_work_t;
typedef LevelData remote_work_t;
__global__ void BFSInit(level_t* levels, int nnodes)
{
int tid = GTID;
if (tid < nnodes)
{
levels[tid] = INF;
}
}
template<
typename TGraph,
typename TGraphDatum>
struct BFSWorkNP
{
template<typename WorkSource>
__device__ static void work(
const WorkSource& work_source,
groute::dev::CircularWorklist<local_work_t>& rwl_in,
groute::dev::CircularWorklist<remote_work_t>& rwl_out,
const TGraph& graph, TGraphDatum& levels_datum
)
{
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads)
{
groute::dev::np_local<level_t> np_local = { 0, 0, 0 };
if (i < work_size)
{
index_t node = work_source.get_work(i);
np_local.start = graph.begin_edge(node);
np_local.size = graph.end_edge(node) - np_local.start;
np_local.meta_data = levels_datum.get_item(node) + 1;
}
groute::dev::CTAWorkScheduler<level_t>::template schedule(
np_local,
[&graph, &levels_datum, &rwl_in, &rwl_out](index_t edge, level_t next_level)
{
index_t dest = graph.edge_dest(edge);
if (next_level < atomicMin(levels_datum.get_item_ptr(dest), next_level))
{
int is_owned = graph.owns(dest);
// TODO: move ballot logic to a device structure
int owned_mask = __ballot(is_owned ? 1 : 0);
int remote_mask = __ballot(is_owned ? 0 : 1);
if (is_owned)
{
int high_leader = __ffs(owned_mask) - 1;
int thread_offset = __popc(owned_mask & ((1 << lane_id()) - 1));
rwl_in.prepend_warp(dest, high_leader, __popc(owned_mask), thread_offset);
}
else
{
int low_leader = __ffs(remote_mask) - 1;
int thread_offset = __popc(remote_mask & ((1 << lane_id()) - 1));
rwl_out.append_warp(LevelData(dest, next_level), low_leader, __popc(remote_mask), thread_offset);
}
}
}
);
}
}
};
template<
typename TGraph,
typename TGraphDatum>
struct BFSWork
{
template<typename WorkSource>
__device__ static void work(
const WorkSource& work_source,
groute::dev::CircularWorklist<local_work_t>& rwl_in,
groute::dev::CircularWorklist<remote_work_t>& rwl_out,
const TGraph& graph, TGraphDatum& levels_datum
)
{
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads)
{
index_t node = work_source.get_work(i);
level_t next_level = levels_datum.get_item(node) + 1;
for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge)
{
index_t dest = graph.edge_dest(edge);
if (next_level < atomicMin(levels_datum.get_item_ptr(dest), next_level))
{
int is_owned = graph.owns(dest);
// TODO: move ballot logic to a device structure
int owned_mask = __ballot(is_owned ? 1 : 0);
int remote_mask = __ballot(is_owned ? 0 : 1);
if (is_owned)
{
int high_leader = __ffs(owned_mask) - 1;
int thread_offset = __popc(owned_mask & ((1 << lane_id()) - 1));
rwl_in.prepend_warp(dest, high_leader, __popc(owned_mask), thread_offset);
}
else
{
int low_leader = __ffs(remote_mask) - 1;
int thread_offset = __popc(remote_mask & ((1 << lane_id()) - 1));
rwl_out.append_warp(LevelData(dest, next_level), low_leader, __popc(remote_mask), thread_offset);
}
}
}
}
}
};
struct SplitOps
{
private:
groute::graphs::dev::CSRGraphSeg m_graph_seg;
groute::graphs::dev::GraphDatum<level_t> m_levels_datum;
public:
template<typename...UnusedData>
SplitOps(const groute::graphs::dev::CSRGraphSeg& graph_seg, const groute::graphs::dev::GraphDatum<level_t>& levels_datum, UnusedData&... data)
: m_graph_seg(graph_seg), m_levels_datum(levels_datum)
{
}
__device__ __forceinline__ groute::opt::SplitFlags on_receive(const remote_work_t& work)
{
if (m_graph_seg.owns(work.node))
{
return (work.level < atomicMin(m_levels_datum.get_item_ptr(work.node), work.level))
? groute::opt::SF_Take
: groute::opt::SF_None; // filter
}
return groute::opt::SF_Pass;
}
__device__ __forceinline__ bool is_high_prio(const local_work_t& work, const level_t& global_prio)
{
return m_levels_datum[work] <= global_prio;
}
__device__ __forceinline__ groute::opt::SplitFlags on_send(local_work_t work)
{
return (m_graph_seg.owns(work))
? groute::opt::SF_Take
: groute::opt::SF_Pass;
}
__device__ __forceinline__ remote_work_t pack(local_work_t work)
{
return LevelData(work, m_levels_datum.get_item(work));
}
__device__ __forceinline__ local_work_t unpack(const remote_work_t& work)
{
return work.node;
}
};
template<typename TGraph, typename TGraphDatum>
struct FusedProblem
{
TGraph m_graph;
TGraphDatum m_levels_datum;
typedef BFSWork<TGraph, TGraphDatum> WorkType;
typedef BFSWorkNP<TGraph, TGraphDatum> WorkTypeNP;
public:
FusedProblem(const TGraph& graph, const TGraphDatum& levels_datum) :
m_graph(graph), m_levels_datum(levels_datum)
{
}
// Called before a global CPU+GPU barrier
void Init(groute::Stream& stream) const
{
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_levels_datum.size);
BFSInit <<< grid_dims, block_dims, 0, stream.cuda_stream >>>(
m_levels_datum.data_ptr, m_levels_datum.size);
}
bool DoFusedInit(groute::Worklist<local_work_t>* lwl_high, groute::Worklist<local_work_t>* lwl_low,
groute::CircularWorklist<local_work_t>* rwl_in, groute::CircularWorklist<remote_work_t>* rwl_out,
int fused_chunk_size, level_t global_prio,
volatile int *high_work_counter, volatile int *low_work_counter,
uint32_t *kernel_internal_counter, volatile int *send_signal_ptr,
cub::GridBarrierLifetime& barrier_lifetime,
dim3 grid_dims, dim3 block_dims, groute::Stream& stream)
{
return false; // no work was done here
}
void DoFusedWork(groute::Worklist<local_work_t>* lwl_high, groute::Worklist<local_work_t>* lwl_low,
groute::CircularWorklist<local_work_t>* rwl_in, groute::CircularWorklist<remote_work_t>* rwl_out,
int fused_chunk_size, level_t global_prio,
volatile int *high_work_counter, volatile int *low_work_counter,
uint32_t *kernel_internal_counter, volatile int *send_signal_ptr,
cub::GridBarrierLifetime& barrier_lifetime,
dim3 grid_dims, dim3 block_dims, groute::Stream& stream)
{
if (FLAGS_iteration_fusion)
{
if (FLAGS_cta_np)
{
groute::FusedWork <
groute::NeverStop, local_work_t, remote_work_t, level_t, SplitOps,
WorkTypeNP,
TGraph, TGraphDatum >
<<< grid_dims, block_dims, 0, stream.cuda_stream >>> (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
bfs::opt::SplitOps(m_graph, m_levels_datum),
m_graph, m_levels_datum
);
}
else
{
groute::FusedWork <
groute::NeverStop, local_work_t, remote_work_t, level_t, SplitOps,
WorkType,
TGraph, TGraphDatum >
<<< grid_dims, block_dims, 0, stream.cuda_stream >>> (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
bfs::opt::SplitOps(m_graph, m_levels_datum),
m_graph, m_levels_datum
);
}
}
else
{
if (FLAGS_cta_np)
{
groute::FusedWork <
groute::RunNTimes<1>, local_work_t, remote_work_t, level_t, SplitOps,
WorkTypeNP,
TGraph, TGraphDatum >
<<< grid_dims, block_dims, 0, stream.cuda_stream >>> (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
bfs::opt::SplitOps(m_graph, m_levels_datum),
m_graph, m_levels_datum
);
}
else
{
groute::FusedWork <
groute::RunNTimes<1>, local_work_t, remote_work_t, level_t, SplitOps,
WorkType,
TGraph, TGraphDatum >
<< < grid_dims, block_dims, 0, stream.cuda_stream >> > (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
bfs::opt::SplitOps(m_graph, m_levels_datum),
m_graph, m_levels_datum
);
}
}
}
};
struct Algo
{
static const char* NameLower() { return "bfs"; }
static const char* Name() { return "BFS"; }
static void Init(
groute::graphs::traversal::Context<bfs::opt::Algo>& context,
groute::graphs::multi::CSRGraphAllocator& graph_manager,
groute::router::Router<remote_work_t>& worklist_router,
groute::opt::DistributedWorklist<local_work_t, remote_work_t, bfs::opt::SplitOps>& distributed_worklist)
{
index_t source_node = std::min((index_t)std::max(0, FLAGS_source_node), context.host_graph.nnodes - 1);
auto partitioner = graph_manager.GetGraphPartitioner();
if (partitioner->NeedsReverseLookup())
{
source_node = partitioner->GetReverseLookupFunc()(source_node);
}
// Report the initial work
distributed_worklist.ReportHighPrioWork(1, 0, "Host", groute::Device::Host, true);
std::vector<remote_work_t> initial_work;
initial_work.push_back(remote_work_t(source_node, 0));
groute::router::ISender<remote_work_t>* work_sender = worklist_router.GetSender(groute::Device::Host);
work_sender->Send(
groute::Segment<remote_work_t>(&initial_work[0], 1), groute::Event());
work_sender->Shutdown();
}
template<typename TGraphAllocator, typename TGraphDatum, typename...UnusedData>
static std::vector<level_t> Gather(TGraphAllocator& graph_allocator, TGraphDatum& levels_datum, UnusedData&... data)
{
graph_allocator.GatherDatum(levels_datum);
return levels_datum.GetHostData();
}
template<typename...UnusedData>
static std::vector<level_t> Host(groute::graphs::host::CSRGraph& graph, UnusedData&... data)
{
return BFSHost(graph, std::min((index_t)std::max(0, FLAGS_source_node), graph.nnodes - 1));
}
static int Output(const char *file, const std::vector<level_t>& levels)
{
return BFSOutput(file, levels);
}
static int CheckErrors(const std::vector<level_t>& levels, const std::vector<level_t>& regression)
{
return BFSCheckErrors(levels, regression);
}
};
}
}
bool TestBFSAsyncMultiOptimized(int ngpus)
{
typedef bfs::opt::FusedProblem<groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatum<level_t>> ProblemType;
typedef groute::graphs::traversal::FusedSolver<
bfs::opt::Algo, ProblemType,
bfs::opt::local_work_t , bfs::opt::remote_work_t, level_t,
bfs::opt::SplitOps,
groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatum<level_t>> SolverType;
groute::graphs::traversal::__MultiRunner__Opt__ <
bfs::opt::Algo,
ProblemType,
SolverType,
bfs::opt::SplitOps,
bfs::opt::local_work_t,
bfs::opt::remote_work_t,
groute::graphs::multi::NodeOutputGlobalDatum<level_t> > runner;
groute::graphs::multi::NodeOutputGlobalDatum<level_t> levels_datum;
bool retval = runner(ngpus, levels_datum);
if(FLAGS_exitonerror && !retval)
exit(100);
return retval;
}
|
b6abb506b13199f5799634b47118435980afc83c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pointwise_hist2_one_byte_templ.cuh"
#include "split_properties_helpers.cuh"
#include "compute_point_hist2_loop.cuh"
#include <hip/hip_cooperative_groups.h>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <library/cpp/cuda/wrappers/arch.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <>
struct TLoadEntriesTrait<0, false> {
constexpr static ELoadType LoadType() {
#if __CUDA_ARCH__ < 700
return ELoadType::OneElement;
#else
return ELoadType::FourElements;
#endif
}
};
template <>
struct TLoadEntriesTrait<0, true> {
constexpr static ELoadType LoadType() {
#if __CUDA_ARCH__ < 520
return ELoadType::OneElement;
#elif __CUDA_ARCH__ < 700
return ELoadType::TwoElements;
#else
return ELoadType::FourElements;
#endif
}
};
template <>
struct TDeclarePassInnerOuterBitsTrait<0> {
constexpr static int Inner() {
return 0;
}
constexpr static int Outer() {
return 0;
}
};
template <int BLOCK_SIZE>
struct TPointHist<0, 0, BLOCK_SIZE> {
float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
const int blocks = 4;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << 3));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void Add(float val, float* dst) {
dst[0] += val;
}
__forceinline__ __device__ void AddPoint(ui32 ci,
const float t,
const float w) {
thread_block_tile<8> syncTile = tiled_partition<8>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
const int bin = (ci >> (24 - (f << 2))) & 255;
const bool pass = bin != 32;
int offset = f + 32 * (bin & 31);
const int offset1 = offset + flag;
const float add1 = pass ? stat1 : 0.0f;
const int offset2 = offset + !flag;
const float add2 = pass ? stat2 : 0.0f;
syncTile.sync();
Buffer[offset1] += add1;
syncTile.sync();
Buffer[offset2] += add2;
}
}
__forceinline__ __device__ void AddPoint2(uint2 ci,
const float2 t,
const float2 w) {
thread_block_tile<8> syncTile = tiled_partition<8>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float2 stat1 = flag ? t : w;
const float2 stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
int f = ((2 * i + threadIdx.x) & 6);
const int bin1 = (ci.x >> (24 - (f << 2))) & 255;
const int bin2 = (ci.y >> (24 - (f << 2))) & 255;
const float passx = bin1 != 32 ? 1.0f : 0.0f;
const float passy = bin2 != 32 ? 1.0f : 0.0f;
int offsetx = f + 32 * (bin1 & 31) + flag;
int offsety = f + 32 * (bin2 & 31) + flag;
syncTile.sync();
Buffer[offsetx] += passx * stat1.x;
Buffer[offsety] += passy * stat1.y;
offsetx += flag ? -1 : 1;
offsety += flag ? -1 : 1;
syncTile.sync();
Buffer[offsetx] += passx * stat2.x;
Buffer[offsety] += passy * stat2.y;
}
}
__forceinline__ __device__ void AddPoint4(uint4 ci, const float4 t, const float4 w) {
//don't change anything without performance tests, nvcc is so awesome, that little change of code could slow everything by 5-10%
thread_block_tile<8> syncTile = tiled_partition<8>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float4 stat1 = flag ? t : w;
const float4 stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
int f = ((2 * i + threadIdx.x) & 6);
const ui32 shift = static_cast<ui32>(24 - (f << 2));
f += flag;
const int binx = (ci.x >> shift) & 255;
const int biny = (ci.y >> shift) & 255;
const int binz = (ci.z >> shift) & 255;
const int binw = (ci.w >> shift) & 255;
const float passx = binx != 32 ? 1.0f : 0.0f;
const float passy = biny != 32 ? 1.0f : 0.0f;
const float passz = binz != 32 ? 1.0f : 0.0f;
const float passw = binw != 32 ? 1.0f : 0.0f;
float* buffer = Buffer + f;
int offsetx = (binx & 31) << 5;
int offsety = (biny & 31) << 5;
int offsetz = (binz & 31) << 5;
int offsetw = (binw & 31) << 5;
syncTile.sync();
buffer[offsetx] += passx * stat1.x;
buffer[offsety] += passy * stat1.y;
buffer[offsetz] += passz * stat1.z;
buffer[offsetw] += passw * stat1.w;
offsetx += flag ? -1 : 1;
offsety += flag ? -1 : 1;
offsetz += flag ? -1 : 1;
offsetw += flag ? -1 : 1;
syncTile.sync();
buffer[offsetx] += passx * stat2.x;
buffer[offsety] += passy * stat2.y;
buffer[offsetz] += passz * stat2.z;
buffer[offsetw] += passw * stat2.w;
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
const int f = threadIdx.x / 64;
float sum = 0.0f;
const int fold = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 32;
if (fold < maxFoldCount) {
const int innerHistCount = 4;
const volatile float* __restrict__ src = Buffer
+ 1024 //warpHistSize
+ 32 * fold
+ 2 * f
+ w;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
sum += src[(inWarpHist << 3)];
}
Buffer[2 * (maxFoldCount * f + fold) + w] = sum;
}
}
__syncthreads();
}
};
template <>
struct TUnrollsTrait<0, ELoadType::FourElements> {
constexpr static int Outer() {
return 1;
}
};
DEFINE_NON_BINARY(5)
}
| b6abb506b13199f5799634b47118435980afc83c.cu | #include "pointwise_hist2_one_byte_templ.cuh"
#include "split_properties_helpers.cuh"
#include "compute_point_hist2_loop.cuh"
#include <cooperative_groups.h>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <library/cpp/cuda/wrappers/arch.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <>
struct TLoadEntriesTrait<0, false> {
constexpr static ELoadType LoadType() {
#if __CUDA_ARCH__ < 700
return ELoadType::OneElement;
#else
return ELoadType::FourElements;
#endif
}
};
template <>
struct TLoadEntriesTrait<0, true> {
constexpr static ELoadType LoadType() {
#if __CUDA_ARCH__ < 520
return ELoadType::OneElement;
#elif __CUDA_ARCH__ < 700
return ELoadType::TwoElements;
#else
return ELoadType::FourElements;
#endif
}
};
template <>
struct TDeclarePassInnerOuterBitsTrait<0> {
constexpr static int Inner() {
return 0;
}
constexpr static int Outer() {
return 0;
}
};
template <int BLOCK_SIZE>
struct TPointHist<0, 0, BLOCK_SIZE> {
float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
const int blocks = 4;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << 3));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void Add(float val, float* dst) {
dst[0] += val;
}
__forceinline__ __device__ void AddPoint(ui32 ci,
const float t,
const float w) {
thread_block_tile<8> syncTile = tiled_partition<8>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
const int bin = (ci >> (24 - (f << 2))) & 255;
const bool pass = bin != 32;
int offset = f + 32 * (bin & 31);
const int offset1 = offset + flag;
const float add1 = pass ? stat1 : 0.0f;
const int offset2 = offset + !flag;
const float add2 = pass ? stat2 : 0.0f;
syncTile.sync();
Buffer[offset1] += add1;
syncTile.sync();
Buffer[offset2] += add2;
}
}
__forceinline__ __device__ void AddPoint2(uint2 ci,
const float2 t,
const float2 w) {
thread_block_tile<8> syncTile = tiled_partition<8>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float2 stat1 = flag ? t : w;
const float2 stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
int f = ((2 * i + threadIdx.x) & 6);
const int bin1 = (ci.x >> (24 - (f << 2))) & 255;
const int bin2 = (ci.y >> (24 - (f << 2))) & 255;
const float passx = bin1 != 32 ? 1.0f : 0.0f;
const float passy = bin2 != 32 ? 1.0f : 0.0f;
int offsetx = f + 32 * (bin1 & 31) + flag;
int offsety = f + 32 * (bin2 & 31) + flag;
syncTile.sync();
Buffer[offsetx] += passx * stat1.x;
Buffer[offsety] += passy * stat1.y;
offsetx += flag ? -1 : 1;
offsety += flag ? -1 : 1;
syncTile.sync();
Buffer[offsetx] += passx * stat2.x;
Buffer[offsety] += passy * stat2.y;
}
}
__forceinline__ __device__ void AddPoint4(uint4 ci, const float4 t, const float4 w) {
//don't change anything without performance tests, nvcc is so awesome, that little change of code could slow everything by 5-10%
thread_block_tile<8> syncTile = tiled_partition<8>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float4 stat1 = flag ? t : w;
const float4 stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
int f = ((2 * i + threadIdx.x) & 6);
const ui32 shift = static_cast<ui32>(24 - (f << 2));
f += flag;
const int binx = (ci.x >> shift) & 255;
const int biny = (ci.y >> shift) & 255;
const int binz = (ci.z >> shift) & 255;
const int binw = (ci.w >> shift) & 255;
const float passx = binx != 32 ? 1.0f : 0.0f;
const float passy = biny != 32 ? 1.0f : 0.0f;
const float passz = binz != 32 ? 1.0f : 0.0f;
const float passw = binw != 32 ? 1.0f : 0.0f;
float* buffer = Buffer + f;
int offsetx = (binx & 31) << 5;
int offsety = (biny & 31) << 5;
int offsetz = (binz & 31) << 5;
int offsetw = (binw & 31) << 5;
syncTile.sync();
buffer[offsetx] += passx * stat1.x;
buffer[offsety] += passy * stat1.y;
buffer[offsetz] += passz * stat1.z;
buffer[offsetw] += passw * stat1.w;
offsetx += flag ? -1 : 1;
offsety += flag ? -1 : 1;
offsetz += flag ? -1 : 1;
offsetw += flag ? -1 : 1;
syncTile.sync();
buffer[offsetx] += passx * stat2.x;
buffer[offsety] += passy * stat2.y;
buffer[offsetz] += passz * stat2.z;
buffer[offsetw] += passw * stat2.w;
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
const int f = threadIdx.x / 64;
float sum = 0.0f;
const int fold = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 32;
if (fold < maxFoldCount) {
const int innerHistCount = 4;
const volatile float* __restrict__ src = Buffer
+ 1024 //warpHistSize
+ 32 * fold
+ 2 * f
+ w;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
sum += src[(inWarpHist << 3)];
}
Buffer[2 * (maxFoldCount * f + fold) + w] = sum;
}
}
__syncthreads();
}
};
template <>
struct TUnrollsTrait<0, ELoadType::FourElements> {
constexpr static int Outer() {
return 1;
}
};
DEFINE_NON_BINARY(5)
}
|
fb353ee7b3e23e65e6ec3d711ab9e1986aaa9ccc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/sigmoid_multi_label_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SigmoidMultiLabelLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, const int label_size, const int bottom_dim, Dtype negative_scale, Dtype* label_vector_data, Dtype* loss) {
int num = nthreads / bottom_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
// Set label_vector_data to zeros
int n = index / bottom_dim;
int c = index % bottom_dim;
label_vector_data[n * bottom_dim + c] = Dtype(0);
}
__syncthreads();
CUDA_KERNEL_LOOP(index, nthreads) {
if (index < num * label_size) {
int sample_id = index / label_size;
int label_id = index % label_size;
int label_value = static_cast<int>(label[sample_id * label_size + label_id]);
if (label_value > 0) {
label_vector_data[sample_id * bottom_dim + label_value - 1] = Dtype(1);
}
}
}
__syncthreads();
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index / bottom_dim;
int c = index % bottom_dim;
Dtype* cur_label_vector_data = label_vector_data + n * bottom_dim + c;
const Dtype* cur_prob_data = prob_data + n * bottom_dim + c;
loss[n * bottom_dim + c] = - cur_label_vector_data[0] * log(max(cur_prob_data[0], Dtype(kLOG_THRESHOLD))) - negative_scale * (1 - cur_label_vector_data[0]) * log(max(1 - cur_prob_data[0], Dtype(kLOG_THRESHOLD)));
}
}
template <typename Dtype>
void SigmoidMultiLabelLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
Dtype* label_vector_data = label_vector_.mutable_gpu_data();
int n = prob_.num();
int count = prob_.count();
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
hipLaunchKernelGGL(( SigmoidMultiLabelLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, prob_data, label, label_size_, bottom_dim_, negative_scale_, label_vector_data, loss_data);
Dtype loss;
caffe_gpu_asum(count, loss_data, &loss);
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
top[0]->mutable_cpu_data()[0] = loss / count;
if (top.size() == 2) {
top[1]->ShareData(prob_);
} else if (top.size() == 3) {
top[1]->ShareData(prob_);
top[2]->ShareData(label_vector_);
}
}
template <typename Dtype>
__global__ void SigmoidMultiLabelLossBackwardGPU(const int nthreads, const Dtype* prob,
const Dtype* label_vector_data, const int bottom_dim, Dtype negative_scale, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / bottom_dim;
const int c = index % bottom_dim;
const Dtype label_value = label_vector_data[n * bottom_dim + c];
const Dtype prob_value = prob[n * bottom_dim + c];
bottom_diff[n * bottom_dim + c] = -label_value * (1 - prob_value) + negative_scale * (1 - label_value) * prob_value;
}
}
template <typename Dtype>
void SigmoidMultiLabelLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label_vector_data = label_vector_.gpu_data();
const int num = prob_.num();
const int count = prob_.count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SigmoidMultiLabelLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, prob_data, label_vector_data, bottom_dim_, negative_scale_, bottom_diff);
const Dtype loss_weight = top[0]->cpu_diff()[0] / num / bottom_dim_;
caffe_gpu_scal(count, loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SigmoidMultiLabelLossLayer);
} // namespace caffe
| fb353ee7b3e23e65e6ec3d711ab9e1986aaa9ccc.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/sigmoid_multi_label_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SigmoidMultiLabelLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, const int label_size, const int bottom_dim, Dtype negative_scale, Dtype* label_vector_data, Dtype* loss) {
int num = nthreads / bottom_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
// Set label_vector_data to zeros
int n = index / bottom_dim;
int c = index % bottom_dim;
label_vector_data[n * bottom_dim + c] = Dtype(0);
}
__syncthreads();
CUDA_KERNEL_LOOP(index, nthreads) {
if (index < num * label_size) {
int sample_id = index / label_size;
int label_id = index % label_size;
int label_value = static_cast<int>(label[sample_id * label_size + label_id]);
if (label_value > 0) {
label_vector_data[sample_id * bottom_dim + label_value - 1] = Dtype(1);
}
}
}
__syncthreads();
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index / bottom_dim;
int c = index % bottom_dim;
Dtype* cur_label_vector_data = label_vector_data + n * bottom_dim + c;
const Dtype* cur_prob_data = prob_data + n * bottom_dim + c;
loss[n * bottom_dim + c] = - cur_label_vector_data[0] * log(max(cur_prob_data[0], Dtype(kLOG_THRESHOLD))) - negative_scale * (1 - cur_label_vector_data[0]) * log(max(1 - cur_prob_data[0], Dtype(kLOG_THRESHOLD)));
}
}
template <typename Dtype>
void SigmoidMultiLabelLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
Dtype* label_vector_data = label_vector_.mutable_gpu_data();
int n = prob_.num();
int count = prob_.count();
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
SigmoidMultiLabelLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, prob_data, label, label_size_, bottom_dim_, negative_scale_, label_vector_data, loss_data);
Dtype loss;
caffe_gpu_asum(count, loss_data, &loss);
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
top[0]->mutable_cpu_data()[0] = loss / count;
if (top.size() == 2) {
top[1]->ShareData(prob_);
} else if (top.size() == 3) {
top[1]->ShareData(prob_);
top[2]->ShareData(label_vector_);
}
}
template <typename Dtype>
__global__ void SigmoidMultiLabelLossBackwardGPU(const int nthreads, const Dtype* prob,
const Dtype* label_vector_data, const int bottom_dim, Dtype negative_scale, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / bottom_dim;
const int c = index % bottom_dim;
const Dtype label_value = label_vector_data[n * bottom_dim + c];
const Dtype prob_value = prob[n * bottom_dim + c];
bottom_diff[n * bottom_dim + c] = -label_value * (1 - prob_value) + negative_scale * (1 - label_value) * prob_value;
}
}
template <typename Dtype>
void SigmoidMultiLabelLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label_vector_data = label_vector_.gpu_data();
const int num = prob_.num();
const int count = prob_.count();
// NOLINT_NEXT_LINE(whitespace/operators)
SigmoidMultiLabelLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, prob_data, label_vector_data, bottom_dim_, negative_scale_, bottom_diff);
const Dtype loss_weight = top[0]->cpu_diff()[0] / num / bottom_dim_;
caffe_gpu_scal(count, loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SigmoidMultiLabelLossLayer);
} // namespace caffe
|
3ca5abc888583ee7cdf2c1d73940b64c38ef8b53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Prerequisites.cuh"
#include "FFT.cuh"
#include "Helper.cuh"
#include "Transformation.cuh"
namespace gtom
{
////////////////////////////
//CUDA kernel declarations//
////////////////////////////
template<int ndims, bool iszerocentered> __global__ void ShiftFourierKernel(tcomplex* d_input, tcomplex* d_output, int3 dims, tfloat3* d_delta);
template<int ndims, bool iszerocentered> __global__ void MotionBlurKernel(tfloat* d_output, int3 dims, float3* d_shifts, ushort nshifts);
////////////////////////////////////////
//Equivalent of TOM's tom_shift method//
////////////////////////////////////////
void d_Shift(tfloat* d_input, tfloat* d_output, int3 dims, tfloat3* h_delta, hipfftHandle* planforw, hipfftHandle* planback, tcomplex* d_sharedintermediate, int batch)
{
tcomplex* d_intermediate = NULL;
if (d_sharedintermediate == NULL)
hipMalloc((void**)&d_intermediate, batch * ElementsFFT(dims) * sizeof(tcomplex));
else
d_intermediate = d_sharedintermediate;
if (planforw == NULL)
d_FFTR2C(d_input, d_intermediate, DimensionCount(dims), dims, batch);
else
d_FFTR2C(d_input, d_intermediate, planforw);
d_Shift(d_intermediate, d_intermediate, dims, h_delta, false, batch);
if (planback == NULL)
d_IFFTC2R(d_intermediate, d_output, DimensionCount(dims), dims, batch);
else
d_IFFTC2R(d_intermediate, d_output, planback, dims);
if (d_sharedintermediate == NULL)
hipFree(d_intermediate);
}
void d_Shift(tcomplex* d_input, tcomplex* d_output, int3 dims, tfloat3* h_delta, bool iszerocentered, int batch)
{
tfloat3* h_deltanorm = (tfloat3*)malloc(batch * sizeof(tfloat3));
for (int b = 0; b < batch; b++)
h_deltanorm[b] = tfloat3(h_delta[b].x / (tfloat)dims.x, h_delta[b].y / (tfloat)dims.y, h_delta[b].z / (tfloat)dims.z);
tfloat3* d_delta = (tfloat3*)CudaMallocFromHostArray(h_deltanorm, batch * sizeof(tfloat3));
free(h_deltanorm);
int TpB = tmin(256, NextMultipleOf(dims.x / 2 + 1, 32));
dim3 grid = dim3(dims.y, dims.z, batch);
if (!iszerocentered)
{
if (DimensionCount(dims) == 3)
ShiftFourierKernel <3, false> << <grid, TpB >> > (d_input, d_output, dims, d_delta);
else
ShiftFourierKernel <2, false> << <grid, TpB >> > (d_input, d_output, dims, d_delta);
}
else
{
if (DimensionCount(dims) == 3)
ShiftFourierKernel <3, true> << <grid, TpB >> > (d_input, d_output, dims, d_delta);
else
ShiftFourierKernel <2, true> << <grid, TpB >> > (d_input, d_output, dims, d_delta);
}
hipFree(d_delta);
}
void d_MotionBlur(tfloat* d_output, int3 dims, float3* h_shifts, uint nshifts, bool iszerocentered, uint batch)
{
float3* h_deltanorm = (float3*)malloc(nshifts * batch * sizeof(float3));
for (int b = 0; b < nshifts * batch; b++)
h_deltanorm[b] = make_float3(h_shifts[b].x / (float)dims.x, h_shifts[b].y / (float)dims.y, h_shifts[b].z / (float)dims.z);
float3* d_delta = (float3*)CudaMallocFromHostArray(h_deltanorm, nshifts * batch * sizeof(float3));
free(h_deltanorm);
int TpB = tmin(256, NextMultipleOf(dims.x / 2 + 1, 32));
dim3 grid = dim3(dims.y, dims.z, batch);
if (!iszerocentered)
{
if (DimensionCount(dims) == 3)
MotionBlurKernel <3, false> << <grid, TpB >> > (d_output, dims, d_delta, nshifts);
else
MotionBlurKernel <2, false> << <grid, TpB >> > (d_output, dims, d_delta, nshifts);
}
else
{
if (DimensionCount(dims) == 3)
MotionBlurKernel <3, true> << <grid, TpB >> > (d_output, dims, d_delta, nshifts);
else
MotionBlurKernel <2, true> << <grid, TpB >> > (d_output, dims, d_delta, nshifts);
}
hipFree(d_delta);
}
////////////////
//CUDA kernels//
////////////////
template<int ndims, bool iszerocentered> __global__ void ShiftFourierKernel(tcomplex* d_input, tcomplex* d_output, int3 dims, tfloat3* d_delta)
{
int idy = blockIdx.x;
int idz = blockIdx.y;
int x, y, z;
if (!iszerocentered)
{
y = idy > dims.y / 2 ? idy - dims.y : idy;
z = idz > dims.z / 2 ? idz - dims.z : idz;
}
else
{
y = dims.y / 2 - idy;
z = dims.z / 2 - idz;
}
d_input += ((blockIdx.z * dims.z + idz) * dims.y + idy) * (dims.x / 2 + 1);
d_output += ((blockIdx.z * dims.z + idz) * dims.y + idy) * (dims.x / 2 + 1);
tfloat3 delta = d_delta[blockIdx.z];
for (int idx = threadIdx.x; idx <= dims.x / 2; idx += blockDim.x)
{
if (!iszerocentered)
x = idx;
else
x = dims.x / 2 - idx;
tfloat factor = -(delta.x * (tfloat)x + delta.y * (tfloat)y + (ndims > 2 ? delta.z * (tfloat)z : (tfloat)0)) * (tfloat)PI2;
tcomplex multiplicator = make_cuComplex(cos(factor), sin(factor));
d_output[idx] = cmul(d_input[idx], multiplicator);
}
}
template<int ndims, bool iszerocentered> __global__ void MotionBlurKernel(tfloat* d_output, int3 dims, float3* d_shifts, ushort nshifts)
{
int idy = blockIdx.x;
int idz = blockIdx.y;
int x, y, z;
if (!iszerocentered)
{
y = FFTShift(idy, dims.y) - dims.y / 2;
z = FFTShift(idz, dims.z) - dims.z / 2;
}
else
{
y = dims.y / 2 - idy;
z = dims.z / 2 - idz;
}
d_output += ((blockIdx.z * dims.z + idz) * dims.y + idy) * (dims.x / 2 + 1);
d_shifts += blockIdx.z * nshifts;
for (int idx = threadIdx.x; idx <= dims.x / 2; idx += blockDim.x)
{
if (!iszerocentered)
x = FFTShift(idx, dims.x) - dims.x / 2;
else
x = dims.x / 2 - idx;
float2 shift = make_float2(0, 0);
for (ushort s = 0; s < nshifts; s++)
{
float3 delta = d_shifts[s];
float factor = -(delta.x * (float)x + delta.y * (float)y + (ndims > 2 ? delta.z * (float)z : (float)0)) * (float)PI2;
shift += make_cuComplex(cos(factor), sin(factor));
}
d_output[idx] = length(shift) / (float)nshifts;
}
}
} | 3ca5abc888583ee7cdf2c1d73940b64c38ef8b53.cu | #include "Prerequisites.cuh"
#include "FFT.cuh"
#include "Helper.cuh"
#include "Transformation.cuh"
namespace gtom
{
////////////////////////////
//CUDA kernel declarations//
////////////////////////////
template<int ndims, bool iszerocentered> __global__ void ShiftFourierKernel(tcomplex* d_input, tcomplex* d_output, int3 dims, tfloat3* d_delta);
template<int ndims, bool iszerocentered> __global__ void MotionBlurKernel(tfloat* d_output, int3 dims, float3* d_shifts, ushort nshifts);
////////////////////////////////////////
//Equivalent of TOM's tom_shift method//
////////////////////////////////////////
void d_Shift(tfloat* d_input, tfloat* d_output, int3 dims, tfloat3* h_delta, cufftHandle* planforw, cufftHandle* planback, tcomplex* d_sharedintermediate, int batch)
{
tcomplex* d_intermediate = NULL;
if (d_sharedintermediate == NULL)
cudaMalloc((void**)&d_intermediate, batch * ElementsFFT(dims) * sizeof(tcomplex));
else
d_intermediate = d_sharedintermediate;
if (planforw == NULL)
d_FFTR2C(d_input, d_intermediate, DimensionCount(dims), dims, batch);
else
d_FFTR2C(d_input, d_intermediate, planforw);
d_Shift(d_intermediate, d_intermediate, dims, h_delta, false, batch);
if (planback == NULL)
d_IFFTC2R(d_intermediate, d_output, DimensionCount(dims), dims, batch);
else
d_IFFTC2R(d_intermediate, d_output, planback, dims);
if (d_sharedintermediate == NULL)
cudaFree(d_intermediate);
}
void d_Shift(tcomplex* d_input, tcomplex* d_output, int3 dims, tfloat3* h_delta, bool iszerocentered, int batch)
{
tfloat3* h_deltanorm = (tfloat3*)malloc(batch * sizeof(tfloat3));
for (int b = 0; b < batch; b++)
h_deltanorm[b] = tfloat3(h_delta[b].x / (tfloat)dims.x, h_delta[b].y / (tfloat)dims.y, h_delta[b].z / (tfloat)dims.z);
tfloat3* d_delta = (tfloat3*)CudaMallocFromHostArray(h_deltanorm, batch * sizeof(tfloat3));
free(h_deltanorm);
int TpB = tmin(256, NextMultipleOf(dims.x / 2 + 1, 32));
dim3 grid = dim3(dims.y, dims.z, batch);
if (!iszerocentered)
{
if (DimensionCount(dims) == 3)
ShiftFourierKernel <3, false> << <grid, TpB >> > (d_input, d_output, dims, d_delta);
else
ShiftFourierKernel <2, false> << <grid, TpB >> > (d_input, d_output, dims, d_delta);
}
else
{
if (DimensionCount(dims) == 3)
ShiftFourierKernel <3, true> << <grid, TpB >> > (d_input, d_output, dims, d_delta);
else
ShiftFourierKernel <2, true> << <grid, TpB >> > (d_input, d_output, dims, d_delta);
}
cudaFree(d_delta);
}
void d_MotionBlur(tfloat* d_output, int3 dims, float3* h_shifts, uint nshifts, bool iszerocentered, uint batch)
{
float3* h_deltanorm = (float3*)malloc(nshifts * batch * sizeof(float3));
for (int b = 0; b < nshifts * batch; b++)
h_deltanorm[b] = make_float3(h_shifts[b].x / (float)dims.x, h_shifts[b].y / (float)dims.y, h_shifts[b].z / (float)dims.z);
float3* d_delta = (float3*)CudaMallocFromHostArray(h_deltanorm, nshifts * batch * sizeof(float3));
free(h_deltanorm);
int TpB = tmin(256, NextMultipleOf(dims.x / 2 + 1, 32));
dim3 grid = dim3(dims.y, dims.z, batch);
if (!iszerocentered)
{
if (DimensionCount(dims) == 3)
MotionBlurKernel <3, false> << <grid, TpB >> > (d_output, dims, d_delta, nshifts);
else
MotionBlurKernel <2, false> << <grid, TpB >> > (d_output, dims, d_delta, nshifts);
}
else
{
if (DimensionCount(dims) == 3)
MotionBlurKernel <3, true> << <grid, TpB >> > (d_output, dims, d_delta, nshifts);
else
MotionBlurKernel <2, true> << <grid, TpB >> > (d_output, dims, d_delta, nshifts);
}
cudaFree(d_delta);
}
////////////////
//CUDA kernels//
////////////////
template<int ndims, bool iszerocentered> __global__ void ShiftFourierKernel(tcomplex* d_input, tcomplex* d_output, int3 dims, tfloat3* d_delta)
{
int idy = blockIdx.x;
int idz = blockIdx.y;
int x, y, z;
if (!iszerocentered)
{
y = idy > dims.y / 2 ? idy - dims.y : idy;
z = idz > dims.z / 2 ? idz - dims.z : idz;
}
else
{
y = dims.y / 2 - idy;
z = dims.z / 2 - idz;
}
d_input += ((blockIdx.z * dims.z + idz) * dims.y + idy) * (dims.x / 2 + 1);
d_output += ((blockIdx.z * dims.z + idz) * dims.y + idy) * (dims.x / 2 + 1);
tfloat3 delta = d_delta[blockIdx.z];
for (int idx = threadIdx.x; idx <= dims.x / 2; idx += blockDim.x)
{
if (!iszerocentered)
x = idx;
else
x = dims.x / 2 - idx;
tfloat factor = -(delta.x * (tfloat)x + delta.y * (tfloat)y + (ndims > 2 ? delta.z * (tfloat)z : (tfloat)0)) * (tfloat)PI2;
tcomplex multiplicator = make_cuComplex(cos(factor), sin(factor));
d_output[idx] = cmul(d_input[idx], multiplicator);
}
}
template<int ndims, bool iszerocentered> __global__ void MotionBlurKernel(tfloat* d_output, int3 dims, float3* d_shifts, ushort nshifts)
{
int idy = blockIdx.x;
int idz = blockIdx.y;
int x, y, z;
if (!iszerocentered)
{
y = FFTShift(idy, dims.y) - dims.y / 2;
z = FFTShift(idz, dims.z) - dims.z / 2;
}
else
{
y = dims.y / 2 - idy;
z = dims.z / 2 - idz;
}
d_output += ((blockIdx.z * dims.z + idz) * dims.y + idy) * (dims.x / 2 + 1);
d_shifts += blockIdx.z * nshifts;
for (int idx = threadIdx.x; idx <= dims.x / 2; idx += blockDim.x)
{
if (!iszerocentered)
x = FFTShift(idx, dims.x) - dims.x / 2;
else
x = dims.x / 2 - idx;
float2 shift = make_float2(0, 0);
for (ushort s = 0; s < nshifts; s++)
{
float3 delta = d_shifts[s];
float factor = -(delta.x * (float)x + delta.y * (float)y + (ndims > 2 ? delta.z * (float)z : (float)0)) * (float)PI2;
shift += make_cuComplex(cos(factor), sin(factor));
}
d_output[idx] = length(shift) / (float)nshifts;
}
}
} |
bda8c9012333c7097d1ec3318da3f919268358a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float* var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27) {
comp += var_2 - +1.9203E-11f + var_3;
float tmp_1 = (var_4 + (var_5 * (var_6 * var_7 / (-1.1925E-35f - var_8))));
comp += tmp_1 / (var_9 * sqrtf(ceilf(var_10 / (+1.9585E-4f - atanf((var_11 / var_12))))));
for (int i=0; i < var_1; ++i) {
comp += (-1.8728E28f * (var_14 / -1.1331E-35f));
var_13[i] = -1.1449E36f;
comp += var_13[i] / (+1.6099E19f * (-1.1344E-22f / (+1.8134E-44f * (+0.0f + powf(sinhf((var_15 * (var_16 * atanf(+1.4159E-42f)))), (-1.5757E-19f - -1.4257E-13f))))));
}
if (comp >= (var_17 * var_18)) {
float tmp_2 = (var_19 - var_20 - +1.2701E10f / +1.8868E-42f + (-1.5387E36f + var_21));
comp += tmp_2 - (-1.3896E-37f * floorf((var_22 / +1.5989E-44f)));
float tmp_3 = +0.0f;
comp += tmp_3 + (var_23 + acosf(-1.2172E35f / -1.2134E-41f - var_24 * var_25 - var_26 + var_27));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float* tmp_14 = initPointer( atof(argv[14]) );
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28);
hipDeviceSynchronize();
return 0;
}
| bda8c9012333c7097d1ec3318da3f919268358a3.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float* var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27) {
comp += var_2 - +1.9203E-11f + var_3;
float tmp_1 = (var_4 + (var_5 * (var_6 * var_7 / (-1.1925E-35f - var_8))));
comp += tmp_1 / (var_9 * sqrtf(ceilf(var_10 / (+1.9585E-4f - atanf((var_11 / var_12))))));
for (int i=0; i < var_1; ++i) {
comp += (-1.8728E28f * (var_14 / -1.1331E-35f));
var_13[i] = -1.1449E36f;
comp += var_13[i] / (+1.6099E19f * (-1.1344E-22f / (+1.8134E-44f * (+0.0f + powf(sinhf((var_15 * (var_16 * atanf(+1.4159E-42f)))), (-1.5757E-19f - -1.4257E-13f))))));
}
if (comp >= (var_17 * var_18)) {
float tmp_2 = (var_19 - var_20 - +1.2701E10f / +1.8868E-42f + (-1.5387E36f + var_21));
comp += tmp_2 - (-1.3896E-37f * floorf((var_22 / +1.5989E-44f)));
float tmp_3 = +0.0f;
comp += tmp_3 + (var_23 + acosf(-1.2172E35f / -1.2134E-41f - var_24 * var_25 - var_26 + var_27));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float* tmp_14 = initPointer( atof(argv[14]) );
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28);
cudaDeviceSynchronize();
return 0;
}
|
8d311875cbc472f2af924e9349f96a3035344db9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/core/Array.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/DistributionTemplates.h>
#include <ATen/native/Resize.h>
#include <c10/hip/HIPMathCompat.h>
namespace at {
namespace native {
// -----------------------------------
// glu forward
// -----------------------------------
void glu_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "glu_cuda", [&]() {
using acc_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a_, scalar_t b_) -> scalar_t {
const acc_t a = a_;
const acc_t b = b_;
const acc_t one = acc_t(1);
const acc_t sigmoid = one / (one + ::exp(-b));
return a * sigmoid;
});
});
}
// -----------------------------------
// glu backward
// -----------------------------------
// Byte offsets don't require multiplication by sizeof(T), so are slightly cheaper.
// For fixed offsets, this removes all penalty from 64-bit indexing.
template <typename T>
__device__ T* byte_offset(T* ptr, int64_t offset) {
using byte_ptr_t = typename std::conditional<
std::is_const<T>::value, const char*, char*>::type;
return reinterpret_cast<T*>(
reinterpret_cast<byte_ptr_t>(ptr) + offset
);
}
template <typename scalar_t, typename OffsetCalc>
__global__ void glu_backward_kernel(
int numel, scalar_t* gI, const scalar_t* I, const scalar_t* gO,
OffsetCalc offset_calculator,
int64_t gI_byte_offset, int64_t I_byte_offset) {
using acc_t = at::acc_type<scalar_t, true>;
const uint32_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= numel) {
return;
}
const auto offsets = offset_calculator.get(linear_index);
// We explicitly iterate over the first half of the input tensor, and
// gI_byte_offset and I_byte_offset are the offsets to access the
// corresponding index in the second half of the tensor.
const acc_t a = I[offsets[1]];
const acc_t b = *byte_offset(I + offsets[1], I_byte_offset);
const acc_t gO_val = gO[offsets[2]];
const auto one = acc_t(1);
const acc_t sigmoid = one / (one + ::exp(-b));
auto* gA = gI + offsets[0];
*gA = sigmoid * gO_val;
auto* gB = byte_offset(gA, gI_byte_offset);
*gB = (one - sigmoid) * sigmoid * gO_val * a;
}
void launch_glu_backward_kernel(const TensorIteratorBase& iter,
int64_t gI_stride, int64_t I_stride) {
const auto N = iter.numel();
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(N > 0 && N <= std::numeric_limits<int32_t>::max());
const auto offset_calculator = make_element_offset_calculator<3>(iter);
constexpr int64_t block_size = 256;
const int64_t grid = (N + block_size - 1) / block_size;
const auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "glu_backward_cuda", [&] {
auto gI = static_cast<scalar_t*>(iter.data_ptr(0));
auto I = static_cast<const scalar_t*>(iter.data_ptr(1));
auto gO = static_cast<const scalar_t*>(iter.data_ptr(2));
hipLaunchKernelGGL(( glu_backward_kernel), dim3(grid), dim3(block_size), 0, stream,
N, gI, I, gO, offset_calculator,
gI_stride * sizeof(scalar_t), I_stride * sizeof(scalar_t));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
Tensor& glu_backward_cuda_out(const Tensor& grad_output, const Tensor& input,
int64_t dim, Tensor& grad_input) {
TORCH_CHECK(input.dim() > 0, "glu does not support 0-dimensional tensors");
auto wrap_dim = maybe_wrap_dim(dim, input.dim());
auto input_sizes = input.sizes();
const int64_t nIn = input_sizes[wrap_dim];
TORCH_CHECK(nIn % 2 == 0, "Halving dimension must be even, but dimension ",
wrap_dim, " is size ", nIn);
resize_output(grad_input, input_sizes);
DimVector iter_shape(input_sizes);
const auto dim_size = nIn / 2;
iter_shape[wrap_dim] = dim_size;
TORCH_CHECK(grad_output.sizes() == IntArrayRef{iter_shape});
const auto iter = at::TensorIteratorConfig()
.add_output(grad_input)
.add_input(input)
.add_input(grad_output)
.resize_outputs(false)
.declare_static_shape(iter_shape)
.build();
if (iter.numel() == 0) {
return grad_input;
}
const auto I_stride = input.strides()[wrap_dim] * dim_size;
const auto gI_stride = grad_input.strides()[wrap_dim] * dim_size;
if (iter.can_use_32bit_indexing()) {
launch_glu_backward_kernel(iter, gI_stride, I_stride);
} else {
for (auto sub_iter: iter.with_32bit_indexing()) {
launch_glu_backward_kernel(sub_iter, gI_stride, I_stride);
}
}
return grad_input;
}
Tensor glu_backward_cuda(const Tensor& grad_output, const Tensor& input, int64_t dim) {
auto grad_input = at::empty({0}, input.options());
return glu_backward_cuda_out(grad_output, input, dim, grad_input);
}
// -----------------------------------
// log_sigmoid forward
// -----------------------------------
std::tuple<Tensor&, Tensor&> log_sigmoid_forward_out_cuda(const Tensor& input, Tensor& result, Tensor& buffer) {
// NOTE: buffer is only used by CPU dispatch, we just ignore it here
auto iter = TensorIteratorConfig()
.add_output(result)
.add_input(input)
.build();
AT_DISPATCH_FLOATING_TYPES_AND(kHalf, iter.common_dtype(),
"log_sigmoid_forward_cuda", [&] {
using acc_t = acc_type<scalar_t, true>;
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t in_) -> scalar_t {
const acc_t in = in_;
const auto max = ::max(acc_t(0), -in);
const auto z = ::exp(-max) + ::exp(-in - max);
return -(max + ::log(z));
});
});
return std::forward_as_tuple(result, buffer);
}
std::tuple<Tensor, Tensor> log_sigmoid_forward_cuda(const Tensor& input) {
auto result = at::empty_like(input);
auto buffer = at::empty({0}, input.options());
log_sigmoid_forward_out_cuda(input, result, buffer);
return std::forward_as_tuple(result, buffer);
}
// -----------------------------------
// log_sigmoid backward
// -----------------------------------
void log_sigmoid_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(kHalf, iter.common_dtype(),
"log_sigmoid_backward_cuda", [&] {
using acc_t = acc_type<scalar_t, true>;
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t in_, scalar_t grad_out_) -> scalar_t {
const acc_t in = in_;
const acc_t grad_out = grad_out_;
const auto max = ::max(acc_t(0), -in);
const auto z = ::exp(-max) + ::exp(-in - max);
auto in_negative = in < acc_t(0);
auto max_deriv = in_negative ? acc_t(1) : acc_t(0);
auto sign = in_negative ? acc_t(1) : -acc_t(1);
return grad_out * (max_deriv - sign * (acc_t(1) - acc_t(1) / z));
});
});
}
// -----------------------------------
// prelu forward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_kernel_share_weights(
const Tensor& input,
Tensor& result,
const scalar_t* weight_data)
{
auto iter = TensorIterator::unary_op(result, input);
at::native::gpu_kernel(iter,
[weight_data] GPU_LAMBDA (scalar_t input_val) {
return (input_val > 0) ? input_val : *weight_data * input_val;
});
}
template <typename scalar_t>
__global__ void prelu_cuda_kernel_multi_weights(
scalar_t* result_data,
const scalar_t* input_data,
const scalar_t* weight_data,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
// multiply values at each channel with weight[channel_index]
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val;
}
Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
int64_t weight_num = weight.numel();
Tensor result = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto strides = input.strides();
// case1: shared weight for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
prelu_cuda_kernel_share_weights<scalar_t>(
input,
result,
weight.data_ptr<scalar_t>());
});
}
else { // case2: multiple weights, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
hipLaunchKernelGGL(( prelu_cuda_kernel_multi_weights<scalar_t>)
, dim3(grid), dim3(block), 0, stream,
result.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
return result;
}
// -----------------------------------
// prelu backward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_backward_kernel_share_weights(
const Tensor& input,
const Tensor& grad_out,
Tensor& input_grad,
Tensor& weight_grad_collector,
const scalar_t* weight_data) {
at::TensorIterator iter = TensorIteratorConfig()
.add_output(input_grad)
.add_output(weight_grad_collector)
.add_input(input)
.add_input(grad_out)
.build();
// N.B. `std::tuple` does not support `::operator=` on device code.
gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t input, scalar_t grad_out) -> thrust::tuple<scalar_t, scalar_t> {
scalar_t input_grad = input > 0 ? grad_out : (*weight_data) * grad_out;
scalar_t weight_grad_collector = input > 0 ? scalar_t(0) : input * grad_out;
return {input_grad, weight_grad_collector};
});
}
template <typename scalar_t>
__global__ void prelu_cuda_backward_kernel_multi_weights(
const scalar_t* input_data,
const scalar_t* weight_data,
const scalar_t* grad_out_data,
scalar_t* input_grad_data,
scalar_t* weight_grad_collector,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
scalar_t grad_out_data_val = grad_out_data[linearId];
input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val;
weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val;
}
std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(grad_out_.is_cuda());
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto grad_out = grad_out_.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
TORCH_CHECK(grad_out.is_contiguous());
int64_t weight_num = weight.numel();
auto strides = input.strides();
auto dims = input.dim();
Tensor input_grad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad = at::empty_like(weight, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad_collector = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// case1: shared parameter for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_share_weights<scalar_t>(
input,
grad_out,
input_grad,
weight_grad_collector,
weight.data_ptr<scalar_t>());
});
weight_grad.fill_(weight_grad_collector.sum());
}
else { // case2: multiple parameters, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
hipLaunchKernelGGL(( prelu_cuda_backward_kernel_multi_weights<scalar_t>)
, dim3(grid), dim3(block), 0, stream,
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
grad_out.data_ptr<scalar_t>(),
input_grad.data_ptr<scalar_t>(),
weight_grad_collector.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
// update weight_grad
std::vector<int64_t> reduce_dims;
reduce_dims.push_back(0);
if (dims > 2) {
for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i);
}
weight_grad = weight_grad_collector.sum(reduce_dims);
}
return std::tuple<Tensor, Tensor>{input_grad, weight_grad};
}
// -----------------------------------
// rrelu
// -----------------------------------
template <typename scalar_t, int unroll_factor, typename F>
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void rrelu_with_noise_cuda_kernel(
int numel,
PhiloxCudaState philox_args,
scalar_t* output,
scalar_t* input,
scalar_t* noise,
double lower,
double upper,
const F& random_func) {
auto seeds = at::cuda::philox::unpack(philox_args);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(std::get<0>(seeds),
idx,
std::get<1>(seeds),
&state);
int grid_stride = blockDim.x * gridDim.x * unroll_factor;
int rounded_size = ((numel - 1) / grid_stride + 1) * grid_stride;
double range = upper - lower;
for (int linear_index = idx; linear_index < rounded_size; linear_index += grid_stride) {
auto rand = random_func(&state);
// ensure that (&rand.x)[ii] is safe
static_assert(sizeof(rand)/sizeof(rand.x) == unroll_factor, "");
#pragma unroll
for (int ii = 0; ii < unroll_factor; ii++) {
int li = linear_index + blockDim.x * gridDim.x * ii;
if (li >= numel) {
continue;
}
scalar_t r = static_cast<scalar_t>((&rand.x)[ii]);
r = r * range + lower;
if (input[li] <= 0) {
output[li] = input[li] * r;
noise[li] = r;
} else {
output[li] = input[li];
noise[li] = static_cast<scalar_t>(0);
}
}
__syncthreads();
}
}
template <typename scalar_t>
inline void _rrelu_with_noise_cuda_train(
Tensor& output,
const Tensor& input_,
const Tensor& noise_,
const Scalar& lower_,
const Scalar& upper_,
c10::optional<Generator> generator) {
auto input = input_.contiguous();
auto noise = noise_.contiguous();
Tensor tmp_output = output.contiguous();
int64_t numel = input.numel();
auto execution_policy = calc_execution_policy(numel);
auto counter_offset = std::get<0>(execution_policy);
auto grid = std::get<1>(execution_policy);
auto block = std::get<2>(execution_policy);
auto gen = get_generator_or_default<CUDAGeneratorImpl>(
generator, cuda::detail::getDefaultCUDAGenerator());
PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(counter_offset);
}
scalar_t* input_data = input.data_ptr<scalar_t>();
scalar_t* noise_data = noise.data_ptr<scalar_t>();
scalar_t* output_data = tmp_output.data_ptr<scalar_t>();
double lower = lower_.to<double>();
double upper = upper_.to<double>();
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (std::is_same<scalar_t, double>::value) {
hipLaunchKernelGGL(( rrelu_with_noise_cuda_kernel<scalar_t, 2>), dim3(grid), dim3(block), 0, stream,
numel,
rng_engine_inputs,
output_data,
input_data,
noise_data,
lower,
upper,
[] __device__ (hiprandStatePhilox4_32_10_t* state) {
return hiprand_uniform2_double(state);
});
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
// half and float
hipLaunchKernelGGL(( rrelu_with_noise_cuda_kernel<scalar_t, 4>), dim3(grid), dim3(block), 0, stream,
numel,
rng_engine_inputs,
output_data,
input_data,
noise_data,
lower, upper,
[] __device__ (hiprandStatePhilox4_32_10_t* state) {
return hiprand_uniform4(state);
});
C10_HIP_KERNEL_LAUNCH_CHECK();
}
if (!output.is_contiguous()) {
output.copy_(tmp_output);
}
}
Tensor& rrelu_with_noise_out_cuda(const Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator,
Tensor& output) {
TensorArg self_arg{self, "self", 1}, noise_arg{noise, "noise", 2},
output_arg{output, "output", 3};
checkAllSameGPU("rrelu_with_noise_out_cuda", {self_arg, noise_arg, output_arg});
if (training) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
self.scalar_type(), "rrelu_with_noise_out_cuda", [&] {
_rrelu_with_noise_cuda_train<scalar_t>(
output, self, noise, lower, upper, generator);
});
}
else {
auto lower_tensor = lower.to<double>();
auto upper_tensor = upper.to<double>();
Scalar negative_slope = (lower_tensor + upper_tensor) / 2;
at::leaky_relu_out(output, self, negative_slope);
}
return output;
}
Tensor rrelu_with_noise_cuda(
const Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator) {
Tensor output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
return at::native::rrelu_with_noise_out_cuda(self, noise, lower, upper, training, generator, output);
}
Tensor& rrelu_with_noise_cuda_(
Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator) {
return at::native::rrelu_with_noise_out_cuda(
self, noise, lower, upper, training, generator, self);
}
// -----------------------------------
// hardshrink
// -----------------------------------
void hardshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;
});
});
}
void softshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0));
});
});
}
void shrink_backward_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t {
return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val;
});
});
}
void hardtanh_backward_kernel(TensorIterator& iter, const Scalar& min, const Scalar& max) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() {
auto min_val = min.to<scalar_t>();
auto max_val = max.to<scalar_t>();
gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a;
});
});
}
void softplus_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a * beta) > threshold ? a : static_cast<scalar_t>(::log1p(::exp(a * beta))) / beta;
});
});
}
void softplus_backward_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t z = ::exp(b * beta);
return (b * beta) > threshold ? a : a * z / (z + scalar_t(1.));
});
});
}
template <typename scalar_t>
void threshold_kernel_impl(TensorIteratorBase& iter, scalar_t threshold, scalar_t value) {
gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {
return x <= threshold ? value : other;
});
}
static void threshold_kernel_cuda(TensorIteratorBase& iter, const Scalar& threshold, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] {
threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>());
});
}
void elu_kernel(TensorIteratorBase& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a * poscoef : (static_cast<scalar_t>(::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef;
});
});
}
void elu_backward_kernel(TensorIteratorBase& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale, bool is_result) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef, is_result]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
if (is_result) {
return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef;
} else {
return b <= scalar_t(0) ? a * negiptcoef * negcoef * (static_cast<scalar_t>(::exp(b * negiptcoef))) : a * poscoef;
}
});
});
}
namespace {
void GeluCUDAKernelImpl(TensorIteratorBase& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
return static_cast<T_ACC>(x) *
c10::hip::compat::normcdf(static_cast<T_ACC>(x));
});
});
}
void GeluBackwardCUDAKernelImpl(TensorIteratorBase& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5);
const T_ACC cdf = c10::hip::compat::normcdf(static_cast<T_ACC>(x));
const T_ACC pdf =
c10::hip::compat::exp(
T_ACC(-0.5) * static_cast<T_ACC>(x) * static_cast<T_ACC>(x)) *
kBeta;
return static_cast<T_ACC>(dy) * (cdf + static_cast<T_ACC>(x) * pdf);
});
});
}
void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a : a * negval;
});
});
}
void leaky_relu_backward_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a > scalar_t(0) ? b : b * negval;
});
});
}
void hardswish_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC one_sixth(1.0f / 6.0f);
const T_ACC three(3.0f);
const T_ACC six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
T_ACC x = static_cast<T_ACC>(self_val);
return x * ::min(::max(x + three, zero), six) * one_sixth;
});
});
}
void hardswish_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC three(3.0f);
const T_ACC neg_three(-3.0f);
const T_ACC one_half(0.5f);
gpu_kernel(
iter,
[zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
T_ACC grad_val = static_cast<T_ACC>(grad_val_);
T_ACC self_val = static_cast<T_ACC>(self_val_);
if (self_val < neg_three) {
return zero;
} else if (self_val <= three) {
return grad_val * ((self_val / three) + one_half);
} else {
return grad_val;
}
});
});
}
void hardsigmoid_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC one_sixth(1.0f / 6.0f);
const T_ACC three(3.0f);
const T_ACC six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
T_ACC x = static_cast<T_ACC>(self_val);
return ::min(::max(x + three, zero), six) * one_sixth;
});
});
}
void hardsigmoid_backward_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_backward_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC three(3.0f);
const T_ACC neg_three(-3.0f);
const T_ACC one_sixth(1.0f / 6.0f);
gpu_kernel(
iter,
[zero, three, neg_three, one_sixth]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
T_ACC grad_val = static_cast<T_ACC>(grad_val_);
T_ACC self_val = static_cast<T_ACC>(self_val_);
return (self_val > neg_three && self_val < three)
? grad_val * one_sixth
: zero;
});
});
}
void silu_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC x_acc = static_cast<T_ACC>(x);
return x_acc / (T_ACC(1) + c10::hip::compat::exp(-x_acc));
});
});
}
void silu_backward_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_backward_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
const T_ACC s_acc =
T_ACC(1) / (T_ACC(1) + c10::hip::compat::exp(-x_acc));
return dy_acc * s_acc * (T_ACC(1) + x_acc * (T_ACC(1) - s_acc));
});
});
}
void mish_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"mish_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC x_acc = static_cast<T_ACC>(x);
return x_acc * c10::hip::compat::tanh(c10::hip::compat::log1p(c10::hip::compat::exp(x_acc)));
});
});
}
void mish_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"mish_backward_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
const T_ACC s_acc =
T_ACC(1) / (T_ACC(1) + c10::hip::compat::exp(-x_acc));
const T_ACC t_acc =
c10::hip::compat::tanh(c10::hip::compat::log1p(c10::hip::compat::exp(x_acc)));
return dy_acc * (t_acc + x_acc * s_acc * (T_ACC(1) - t_acc * t_acc));
});
});
}
} // namespace
TORCH_IMPL_FUNC(gelu_out_cuda) (
const Tensor& self, const Tensor& result
) {
GeluCUDAKernelImpl(*this);
}
TORCH_IMPL_FUNC(gelu_backward_out_cuda) (
const Tensor& grad, const Tensor& self, const Tensor& grad_input
) {
GeluBackwardCUDAKernelImpl(*this);
}
REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel);
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
REGISTER_DISPATCH(log_sigmoid_backward_stub, &log_sigmoid_backward_kernel);
REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);
REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);
REGISTER_DISPATCH(elu_stub, &elu_kernel);
REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel);
REGISTER_DISPATCH(glu_stub, &glu_kernel);
REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel);
REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel);
REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel);
REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel);
REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel);
REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel);
REGISTER_DISPATCH(softplus_stub, &softplus_kernel);
REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel);
REGISTER_DISPATCH(silu_stub, &silu_kernel);
REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel);
REGISTER_DISPATCH(mish_stub, &mish_kernel);
REGISTER_DISPATCH(mish_backward_stub, &mish_backward_kernel);
REGISTER_DISPATCH(threshold_stub, &threshold_kernel_cuda);
} // namespace native
} // namespace at
| 8d311875cbc472f2af924e9349f96a3035344db9.cu | #define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/core/Array.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/DistributionTemplates.h>
#include <ATen/native/Resize.h>
#include <c10/cuda/CUDAMathCompat.h>
namespace at {
namespace native {
// -----------------------------------
// glu forward
// -----------------------------------
void glu_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "glu_cuda", [&]() {
using acc_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a_, scalar_t b_) -> scalar_t {
const acc_t a = a_;
const acc_t b = b_;
const acc_t one = acc_t(1);
const acc_t sigmoid = one / (one + std::exp(-b));
return a * sigmoid;
});
});
}
// -----------------------------------
// glu backward
// -----------------------------------
// Byte offsets don't require multiplication by sizeof(T), so are slightly cheaper.
// For fixed offsets, this removes all penalty from 64-bit indexing.
template <typename T>
__device__ T* byte_offset(T* ptr, int64_t offset) {
using byte_ptr_t = typename std::conditional<
std::is_const<T>::value, const char*, char*>::type;
return reinterpret_cast<T*>(
reinterpret_cast<byte_ptr_t>(ptr) + offset
);
}
template <typename scalar_t, typename OffsetCalc>
__global__ void glu_backward_kernel(
int numel, scalar_t* gI, const scalar_t* I, const scalar_t* gO,
OffsetCalc offset_calculator,
int64_t gI_byte_offset, int64_t I_byte_offset) {
using acc_t = at::acc_type<scalar_t, true>;
const uint32_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= numel) {
return;
}
const auto offsets = offset_calculator.get(linear_index);
// We explicitly iterate over the first half of the input tensor, and
// gI_byte_offset and I_byte_offset are the offsets to access the
// corresponding index in the second half of the tensor.
const acc_t a = I[offsets[1]];
const acc_t b = *byte_offset(I + offsets[1], I_byte_offset);
const acc_t gO_val = gO[offsets[2]];
const auto one = acc_t(1);
const acc_t sigmoid = one / (one + std::exp(-b));
auto* gA = gI + offsets[0];
*gA = sigmoid * gO_val;
auto* gB = byte_offset(gA, gI_byte_offset);
*gB = (one - sigmoid) * sigmoid * gO_val * a;
}
void launch_glu_backward_kernel(const TensorIteratorBase& iter,
int64_t gI_stride, int64_t I_stride) {
const auto N = iter.numel();
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(N > 0 && N <= std::numeric_limits<int32_t>::max());
const auto offset_calculator = make_element_offset_calculator<3>(iter);
constexpr int64_t block_size = 256;
const int64_t grid = (N + block_size - 1) / block_size;
const auto stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "glu_backward_cuda", [&] {
auto gI = static_cast<scalar_t*>(iter.data_ptr(0));
auto I = static_cast<const scalar_t*>(iter.data_ptr(1));
auto gO = static_cast<const scalar_t*>(iter.data_ptr(2));
glu_backward_kernel<<<grid, block_size, 0, stream>>>(
N, gI, I, gO, offset_calculator,
gI_stride * sizeof(scalar_t), I_stride * sizeof(scalar_t));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
Tensor& glu_backward_cuda_out(const Tensor& grad_output, const Tensor& input,
int64_t dim, Tensor& grad_input) {
TORCH_CHECK(input.dim() > 0, "glu does not support 0-dimensional tensors");
auto wrap_dim = maybe_wrap_dim(dim, input.dim());
auto input_sizes = input.sizes();
const int64_t nIn = input_sizes[wrap_dim];
TORCH_CHECK(nIn % 2 == 0, "Halving dimension must be even, but dimension ",
wrap_dim, " is size ", nIn);
resize_output(grad_input, input_sizes);
DimVector iter_shape(input_sizes);
const auto dim_size = nIn / 2;
iter_shape[wrap_dim] = dim_size;
TORCH_CHECK(grad_output.sizes() == IntArrayRef{iter_shape});
const auto iter = at::TensorIteratorConfig()
.add_output(grad_input)
.add_input(input)
.add_input(grad_output)
.resize_outputs(false)
.declare_static_shape(iter_shape)
.build();
if (iter.numel() == 0) {
return grad_input;
}
const auto I_stride = input.strides()[wrap_dim] * dim_size;
const auto gI_stride = grad_input.strides()[wrap_dim] * dim_size;
if (iter.can_use_32bit_indexing()) {
launch_glu_backward_kernel(iter, gI_stride, I_stride);
} else {
for (auto sub_iter: iter.with_32bit_indexing()) {
launch_glu_backward_kernel(sub_iter, gI_stride, I_stride);
}
}
return grad_input;
}
Tensor glu_backward_cuda(const Tensor& grad_output, const Tensor& input, int64_t dim) {
auto grad_input = at::empty({0}, input.options());
return glu_backward_cuda_out(grad_output, input, dim, grad_input);
}
// -----------------------------------
// log_sigmoid forward
// -----------------------------------
std::tuple<Tensor&, Tensor&> log_sigmoid_forward_out_cuda(const Tensor& input, Tensor& result, Tensor& buffer) {
// NOTE: buffer is only used by CPU dispatch, we just ignore it here
auto iter = TensorIteratorConfig()
.add_output(result)
.add_input(input)
.build();
AT_DISPATCH_FLOATING_TYPES_AND(kHalf, iter.common_dtype(),
"log_sigmoid_forward_cuda", [&] {
using acc_t = acc_type<scalar_t, true>;
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t in_) -> scalar_t {
const acc_t in = in_;
const auto max = std::max(acc_t(0), -in);
const auto z = std::exp(-max) + std::exp(-in - max);
return -(max + std::log(z));
});
});
return std::forward_as_tuple(result, buffer);
}
std::tuple<Tensor, Tensor> log_sigmoid_forward_cuda(const Tensor& input) {
auto result = at::empty_like(input);
auto buffer = at::empty({0}, input.options());
log_sigmoid_forward_out_cuda(input, result, buffer);
return std::forward_as_tuple(result, buffer);
}
// -----------------------------------
// log_sigmoid backward
// -----------------------------------
void log_sigmoid_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(kHalf, iter.common_dtype(),
"log_sigmoid_backward_cuda", [&] {
using acc_t = acc_type<scalar_t, true>;
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t in_, scalar_t grad_out_) -> scalar_t {
const acc_t in = in_;
const acc_t grad_out = grad_out_;
const auto max = std::max(acc_t(0), -in);
const auto z = std::exp(-max) + std::exp(-in - max);
auto in_negative = in < acc_t(0);
auto max_deriv = in_negative ? acc_t(1) : acc_t(0);
auto sign = in_negative ? acc_t(1) : -acc_t(1);
return grad_out * (max_deriv - sign * (acc_t(1) - acc_t(1) / z));
});
});
}
// -----------------------------------
// prelu forward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_kernel_share_weights(
const Tensor& input,
Tensor& result,
const scalar_t* weight_data)
{
auto iter = TensorIterator::unary_op(result, input);
at::native::gpu_kernel(iter,
[weight_data] GPU_LAMBDA (scalar_t input_val) {
return (input_val > 0) ? input_val : *weight_data * input_val;
});
}
template <typename scalar_t>
__global__ void prelu_cuda_kernel_multi_weights(
scalar_t* result_data,
const scalar_t* input_data,
const scalar_t* weight_data,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
// multiply values at each channel with weight[channel_index]
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val;
}
Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
int64_t weight_num = weight.numel();
Tensor result = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto strides = input.strides();
// case1: shared weight for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
prelu_cuda_kernel_share_weights<scalar_t>(
input,
result,
weight.data_ptr<scalar_t>());
});
}
else { // case2: multiple weights, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
prelu_cuda_kernel_multi_weights<scalar_t>
<<<grid, block, 0, stream>>>(
result.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
return result;
}
// -----------------------------------
// prelu backward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_backward_kernel_share_weights(
const Tensor& input,
const Tensor& grad_out,
Tensor& input_grad,
Tensor& weight_grad_collector,
const scalar_t* weight_data) {
at::TensorIterator iter = TensorIteratorConfig()
.add_output(input_grad)
.add_output(weight_grad_collector)
.add_input(input)
.add_input(grad_out)
.build();
// N.B. `std::tuple` does not support `::operator=` on device code.
gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t input, scalar_t grad_out) -> thrust::tuple<scalar_t, scalar_t> {
scalar_t input_grad = input > 0 ? grad_out : (*weight_data) * grad_out;
scalar_t weight_grad_collector = input > 0 ? scalar_t(0) : input * grad_out;
return {input_grad, weight_grad_collector};
});
}
template <typename scalar_t>
__global__ void prelu_cuda_backward_kernel_multi_weights(
const scalar_t* input_data,
const scalar_t* weight_data,
const scalar_t* grad_out_data,
scalar_t* input_grad_data,
scalar_t* weight_grad_collector,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
scalar_t grad_out_data_val = grad_out_data[linearId];
input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val;
weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val;
}
std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(grad_out_.is_cuda());
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto grad_out = grad_out_.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
TORCH_CHECK(grad_out.is_contiguous());
int64_t weight_num = weight.numel();
auto strides = input.strides();
auto dims = input.dim();
Tensor input_grad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad = at::empty_like(weight, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad_collector = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// case1: shared parameter for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_share_weights<scalar_t>(
input,
grad_out,
input_grad,
weight_grad_collector,
weight.data_ptr<scalar_t>());
});
weight_grad.fill_(weight_grad_collector.sum());
}
else { // case2: multiple parameters, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_multi_weights<scalar_t>
<<<grid, block, 0, stream>>>(
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
grad_out.data_ptr<scalar_t>(),
input_grad.data_ptr<scalar_t>(),
weight_grad_collector.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
// update weight_grad
std::vector<int64_t> reduce_dims;
reduce_dims.push_back(0);
if (dims > 2) {
for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i);
}
weight_grad = weight_grad_collector.sum(reduce_dims);
}
return std::tuple<Tensor, Tensor>{input_grad, weight_grad};
}
// -----------------------------------
// rrelu
// -----------------------------------
template <typename scalar_t, int unroll_factor, typename F>
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void rrelu_with_noise_cuda_kernel(
int numel,
PhiloxCudaState philox_args,
scalar_t* output,
scalar_t* input,
scalar_t* noise,
double lower,
double upper,
const F& random_func) {
auto seeds = at::cuda::philox::unpack(philox_args);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(std::get<0>(seeds),
idx,
std::get<1>(seeds),
&state);
int grid_stride = blockDim.x * gridDim.x * unroll_factor;
int rounded_size = ((numel - 1) / grid_stride + 1) * grid_stride;
double range = upper - lower;
for (int linear_index = idx; linear_index < rounded_size; linear_index += grid_stride) {
auto rand = random_func(&state);
// ensure that (&rand.x)[ii] is safe
static_assert(sizeof(rand)/sizeof(rand.x) == unroll_factor, "");
#pragma unroll
for (int ii = 0; ii < unroll_factor; ii++) {
int li = linear_index + blockDim.x * gridDim.x * ii;
if (li >= numel) {
continue;
}
scalar_t r = static_cast<scalar_t>((&rand.x)[ii]);
r = r * range + lower;
if (input[li] <= 0) {
output[li] = input[li] * r;
noise[li] = r;
} else {
output[li] = input[li];
noise[li] = static_cast<scalar_t>(0);
}
}
__syncthreads();
}
}
template <typename scalar_t>
inline void _rrelu_with_noise_cuda_train(
Tensor& output,
const Tensor& input_,
const Tensor& noise_,
const Scalar& lower_,
const Scalar& upper_,
c10::optional<Generator> generator) {
auto input = input_.contiguous();
auto noise = noise_.contiguous();
Tensor tmp_output = output.contiguous();
int64_t numel = input.numel();
auto execution_policy = calc_execution_policy(numel);
auto counter_offset = std::get<0>(execution_policy);
auto grid = std::get<1>(execution_policy);
auto block = std::get<2>(execution_policy);
auto gen = get_generator_or_default<CUDAGeneratorImpl>(
generator, cuda::detail::getDefaultCUDAGenerator());
PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(counter_offset);
}
scalar_t* input_data = input.data_ptr<scalar_t>();
scalar_t* noise_data = noise.data_ptr<scalar_t>();
scalar_t* output_data = tmp_output.data_ptr<scalar_t>();
double lower = lower_.to<double>();
double upper = upper_.to<double>();
auto stream = at::cuda::getCurrentCUDAStream();
if (std::is_same<scalar_t, double>::value) {
rrelu_with_noise_cuda_kernel<scalar_t, 2><<<grid, block, 0, stream>>>(
numel,
rng_engine_inputs,
output_data,
input_data,
noise_data,
lower,
upper,
[] __device__ (curandStatePhilox4_32_10_t* state) {
return curand_uniform2_double(state);
});
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
// half and float
rrelu_with_noise_cuda_kernel<scalar_t, 4><<<grid, block, 0, stream>>>(
numel,
rng_engine_inputs,
output_data,
input_data,
noise_data,
lower, upper,
[] __device__ (curandStatePhilox4_32_10_t* state) {
return curand_uniform4(state);
});
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
if (!output.is_contiguous()) {
output.copy_(tmp_output);
}
}
Tensor& rrelu_with_noise_out_cuda(const Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator,
Tensor& output) {
TensorArg self_arg{self, "self", 1}, noise_arg{noise, "noise", 2},
output_arg{output, "output", 3};
checkAllSameGPU("rrelu_with_noise_out_cuda", {self_arg, noise_arg, output_arg});
if (training) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
self.scalar_type(), "rrelu_with_noise_out_cuda", [&] {
_rrelu_with_noise_cuda_train<scalar_t>(
output, self, noise, lower, upper, generator);
});
}
else {
auto lower_tensor = lower.to<double>();
auto upper_tensor = upper.to<double>();
Scalar negative_slope = (lower_tensor + upper_tensor) / 2;
at::leaky_relu_out(output, self, negative_slope);
}
return output;
}
Tensor rrelu_with_noise_cuda(
const Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator) {
Tensor output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
return at::native::rrelu_with_noise_out_cuda(self, noise, lower, upper, training, generator, output);
}
Tensor& rrelu_with_noise_cuda_(
Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator) {
return at::native::rrelu_with_noise_out_cuda(
self, noise, lower, upper, training, generator, self);
}
// -----------------------------------
// hardshrink
// -----------------------------------
void hardshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;
});
});
}
void softshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0));
});
});
}
void shrink_backward_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t {
return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val;
});
});
}
void hardtanh_backward_kernel(TensorIterator& iter, const Scalar& min, const Scalar& max) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() {
auto min_val = min.to<scalar_t>();
auto max_val = max.to<scalar_t>();
gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a;
});
});
}
void softplus_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a * beta) > threshold ? a : static_cast<scalar_t>(::log1p(std::exp(a * beta))) / beta;
});
});
}
void softplus_backward_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t z = std::exp(b * beta);
return (b * beta) > threshold ? a : a * z / (z + scalar_t(1.));
});
});
}
template <typename scalar_t>
void threshold_kernel_impl(TensorIteratorBase& iter, scalar_t threshold, scalar_t value) {
gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {
return x <= threshold ? value : other;
});
}
static void threshold_kernel_cuda(TensorIteratorBase& iter, const Scalar& threshold, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] {
threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>());
});
}
void elu_kernel(TensorIteratorBase& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a * poscoef : (static_cast<scalar_t>(std::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef;
});
});
}
void elu_backward_kernel(TensorIteratorBase& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale, bool is_result) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef, is_result]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
if (is_result) {
return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef;
} else {
return b <= scalar_t(0) ? a * negiptcoef * negcoef * (static_cast<scalar_t>(std::exp(b * negiptcoef))) : a * poscoef;
}
});
});
}
namespace {
void GeluCUDAKernelImpl(TensorIteratorBase& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
return static_cast<T_ACC>(x) *
c10::cuda::compat::normcdf(static_cast<T_ACC>(x));
});
});
}
void GeluBackwardCUDAKernelImpl(TensorIteratorBase& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5);
const T_ACC cdf = c10::cuda::compat::normcdf(static_cast<T_ACC>(x));
const T_ACC pdf =
c10::cuda::compat::exp(
T_ACC(-0.5) * static_cast<T_ACC>(x) * static_cast<T_ACC>(x)) *
kBeta;
return static_cast<T_ACC>(dy) * (cdf + static_cast<T_ACC>(x) * pdf);
});
});
}
void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a : a * negval;
});
});
}
void leaky_relu_backward_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a > scalar_t(0) ? b : b * negval;
});
});
}
void hardswish_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC one_sixth(1.0f / 6.0f);
const T_ACC three(3.0f);
const T_ACC six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
T_ACC x = static_cast<T_ACC>(self_val);
return x * std::min(std::max(x + three, zero), six) * one_sixth;
});
});
}
void hardswish_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC three(3.0f);
const T_ACC neg_three(-3.0f);
const T_ACC one_half(0.5f);
gpu_kernel(
iter,
[zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
T_ACC grad_val = static_cast<T_ACC>(grad_val_);
T_ACC self_val = static_cast<T_ACC>(self_val_);
if (self_val < neg_three) {
return zero;
} else if (self_val <= three) {
return grad_val * ((self_val / three) + one_half);
} else {
return grad_val;
}
});
});
}
void hardsigmoid_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC one_sixth(1.0f / 6.0f);
const T_ACC three(3.0f);
const T_ACC six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
T_ACC x = static_cast<T_ACC>(self_val);
return std::min(std::max(x + three, zero), six) * one_sixth;
});
});
}
void hardsigmoid_backward_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_backward_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC three(3.0f);
const T_ACC neg_three(-3.0f);
const T_ACC one_sixth(1.0f / 6.0f);
gpu_kernel(
iter,
[zero, three, neg_three, one_sixth]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
T_ACC grad_val = static_cast<T_ACC>(grad_val_);
T_ACC self_val = static_cast<T_ACC>(self_val_);
return (self_val > neg_three && self_val < three)
? grad_val * one_sixth
: zero;
});
});
}
void silu_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC x_acc = static_cast<T_ACC>(x);
return x_acc / (T_ACC(1) + c10::cuda::compat::exp(-x_acc));
});
});
}
void silu_backward_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_backward_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
const T_ACC s_acc =
T_ACC(1) / (T_ACC(1) + c10::cuda::compat::exp(-x_acc));
return dy_acc * s_acc * (T_ACC(1) + x_acc * (T_ACC(1) - s_acc));
});
});
}
void mish_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"mish_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC x_acc = static_cast<T_ACC>(x);
return x_acc * c10::cuda::compat::tanh(c10::cuda::compat::log1p(c10::cuda::compat::exp(x_acc)));
});
});
}
void mish_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"mish_backward_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
const T_ACC s_acc =
T_ACC(1) / (T_ACC(1) + c10::cuda::compat::exp(-x_acc));
const T_ACC t_acc =
c10::cuda::compat::tanh(c10::cuda::compat::log1p(c10::cuda::compat::exp(x_acc)));
return dy_acc * (t_acc + x_acc * s_acc * (T_ACC(1) - t_acc * t_acc));
});
});
}
} // namespace
TORCH_IMPL_FUNC(gelu_out_cuda) (
const Tensor& self, const Tensor& result
) {
GeluCUDAKernelImpl(*this);
}
TORCH_IMPL_FUNC(gelu_backward_out_cuda) (
const Tensor& grad, const Tensor& self, const Tensor& grad_input
) {
GeluBackwardCUDAKernelImpl(*this);
}
REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel);
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
REGISTER_DISPATCH(log_sigmoid_backward_stub, &log_sigmoid_backward_kernel);
REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);
REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);
REGISTER_DISPATCH(elu_stub, &elu_kernel);
REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel);
REGISTER_DISPATCH(glu_stub, &glu_kernel);
REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel);
REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel);
REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel);
REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel);
REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel);
REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel);
REGISTER_DISPATCH(softplus_stub, &softplus_kernel);
REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel);
REGISTER_DISPATCH(silu_stub, &silu_kernel);
REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel);
REGISTER_DISPATCH(mish_stub, &mish_kernel);
REGISTER_DISPATCH(mish_backward_stub, &mish_backward_kernel);
REGISTER_DISPATCH(threshold_stub, &threshold_kernel_cuda);
} // namespace native
} // namespace at
|
da72650c7675c316f6806d96d258e9f9551ac799.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
//these are access control for staggered action
#ifdef GPU_STAGGERED_DIRAC
#if (__COMPUTE_CAPABILITY__ >= 300) // Kepler works best with texture loads only
//#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
//#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#elif (__COMPUTE_CAPABILITY__ >= 200)
//#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#else
#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
//#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#endif
#endif // GPU_STAGGERED_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
enum KernelType {
INTERIOR_KERNEL = 5,
EXTERIOR_KERNEL_X = 0,
EXTERIOR_KERNEL_Y = 1,
EXTERIOR_KERNEL_Z = 2,
EXTERIOR_KERNEL_T = 3
};
namespace quda {
struct DslashParam {
int threads; // the desired number of active threads
int parity; // Even-Odd or Odd-Even
int commDim[QUDA_MAX_DIM]; // Whether to do comms or not
int ghostDim[QUDA_MAX_DIM]; // Whether a ghost zone has been allocated for a given dimension
int ghostOffset[QUDA_MAX_DIM+1];
int ghostNormOffset[QUDA_MAX_DIM+1];
int X[4];
KernelType kernel_type; //is it INTERIOR_KERNEL, EXTERIOR_KERNEL_X/Y/Z/T
int sp_stride; // spinor stride
#ifdef GPU_STAGGERED_DIRAC
int gauge_stride;
int long_gauge_stride;
float fat_link_max;
#endif
#ifdef USE_TEXTURE_OBJECTS
hipTextureObject_t inTex;
hipTextureObject_t inTexNorm;
hipTextureObject_t xTex;
hipTextureObject_t xTexNorm;
hipTextureObject_t outTex;
hipTextureObject_t outTexNorm;
hipTextureObject_t gauge0Tex; // also applies to fat gauge
hipTextureObject_t gauge1Tex; // also applies to fat gauge
hipTextureObject_t longGauge0Tex;
hipTextureObject_t longGauge1Tex;
hipTextureObject_t longPhase0Tex;
hipTextureObject_t longPhase1Tex;
hipTextureObject_t cloverTex;
hipTextureObject_t cloverNormTex;
hipTextureObject_t cloverInvTex;
hipTextureObject_t cloverInvNormTex;
#endif
};
DslashParam dslashParam;
// these are set in initDslashConst
int Vspatial;
#ifdef PTHREADS
static hipEvent_t interiorDslashEnd;
#endif
static hipEvent_t packEnd[Nstream];
static hipEvent_t gatherStart[Nstream];
static hipEvent_t gatherEnd[Nstream];
static hipEvent_t scatterStart[Nstream];
static hipEvent_t scatterEnd[Nstream];
static hipEvent_t dslashStart;
static hipEvent_t dslashEnd;
static FaceBuffer *face[2];
static cudaColorSpinorField *inSpinor;
static FullClover *inClover = NULL;
static FullClover *inCloverInv = NULL;
// For tuneLaunch() to uniquely identify a suitable set of launch parameters, we need copies of a few of
// the constants set by initDslashConstants().
static struct {
int x[4];
int Ls;
unsigned long long VolumeCB() { return x[0]*x[1]*x[2]*x[3]/2; }
// In the future, we may also want to add gauge_fixed, sp_stride, ga_stride, cl_stride, etc.
} dslashConstants;
// determines whether the temporal ghost zones are packed with a gather kernel,
// as opposed to multiple calls to hipMemcpy()
static bool kernelPackT = false;
void setKernelPackT(bool packT) { kernelPackT = packT; }
bool getKernelPackT() { return kernelPackT; }
//these params are needed for twisted mass (in particular, for packing twisted spinor)
static bool twistPack = false;
void setTwistPack(bool flag) { twistPack = flag; }
bool getTwistPack() { return twistPack; }
#ifdef MULTI_GPU
static double twist_a = 0.0;
static double twist_b = 0.0;
#endif
#include <dslash_textures.h>
#include <dslash_constants.h>
#if defined(DIRECT_ACCESS_LINK) || defined(DIRECT_ACCESS_WILSON_SPINOR) || \
defined(DIRECT_ACCESS_WILSON_ACCUM) || defined(DIRECT_ACCESS_WILSON_PACK_SPINOR) || \
defined(DIRECT_ACCESS_WILSON_INTER) || defined(DIRECT_ACCESS_WILSON_PACK_SPINOR) || \
defined(DIRECT_ACCESS_CLOVER)
static inline __device__ float short2float(short a) {
return (float)a/MAX_SHORT;
}
static inline __device__ short float2short(float c, float a) {
return (short)(a*c*MAX_SHORT);
}
static inline __device__ short4 float42short4(float c, float4 a) {
return make_short4(float2short(c, a.x), float2short(c, a.y), float2short(c, a.z), float2short(c, a.w));
}
static inline __device__ float4 short42float4(short4 a) {
return make_float4(short2float(a.x), short2float(a.y), short2float(a.z), short2float(a.w));
}
static inline __device__ float2 short22float2(short2 a) {
return make_float2(short2float(a.x), short2float(a.y));
}
#endif // DIRECT_ACCESS inclusions
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#include <pack_face_def.h> // kernels for packing the ghost zones and general indexing
#include <staggered_dslash_def.h> // staggered Dslash kernels
#include <wilson_dslash_def.h> // Wilson Dslash kernels (including clover)
#include <dw_dslash_def.h> // Domain Wall kernels
#include <dw_dslash4_def.h> // Dslash4 Domain Wall kernels
#include <dw_dslash5_def.h> // Dslash5 Domain Wall kernels
#include <dw_dslash5inv_def.h> // Dslash5inv Domain Wall kernels
#include <mdw_dslash4_def.h> // Dslash4, intermediate operator for Mobius Mat_4 kernels
#include <mdw_dslash4pre_def.h> // Dslash4pre, intermediate operator for Mobius Mat_4 kernels
#include <mdw_dslash5_def.h> // Dslash5 Mobius Domain Wall kernels
#include <mdw_dslash5inv_def.h> // Dslash5inv Mobius Domain Wall kernels
#include <tm_dslash_def.h> // Twisted Mass kernels
#include <tm_core.h> // solo twisted mass kernel
#include <clover_def.h> // kernels for applying the clover term alone
#include <tm_ndeg_dslash_def.h> // Non-degenerate twisted Mass
#include <tmc_dslash_def.h> // Twisted Clover kernels
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#ifndef CLOVER_SHARED_FLOATS_PER_THREAD
#define CLOVER_SHARED_FLOATS_PER_THREAD 0
#endif
#ifndef NDEGTM_SHARED_FLOATS_PER_THREAD
#define NDEGTM_SHARED_FLOATS_PER_THREAD 0
#endif
void setFace(const FaceBuffer &Face1, const FaceBuffer &Face2) {
face[0] = (FaceBuffer*)&(Face1);
face[1] = (FaceBuffer*)&(Face2); // nasty
}
static int it = 0;
void createDslashEvents()
{
// add hipEventDisableTiming for lower sync overhead
for (int i=0; i<Nstream; i++) {
hipEventCreate(&packEnd[i], hipEventDisableTiming);
hipEventCreate(&gatherStart[i], hipEventDisableTiming);
hipEventCreate(&gatherEnd[i], hipEventDisableTiming);
hipEventCreateWithFlags(&scatterStart[i], hipEventDisableTiming);
hipEventCreateWithFlags(&scatterEnd[i], hipEventDisableTiming);
}
hipEventCreateWithFlags(&dslashStart, hipEventDisableTiming);
hipEventCreateWithFlags(&dslashEnd, hipEventDisableTiming);
#ifdef PTHREADS
hipEventCreateWithFlags(&interiorDslashEnd, hipEventDisableTiming);
#endif
checkCudaError();
}
void destroyDslashEvents()
{
for (int i=0; i<Nstream; i++) {
hipEventDestroy(packEnd[i]);
hipEventDestroy(gatherStart[i]);
hipEventDestroy(gatherEnd[i]);
hipEventDestroy(scatterStart[i]);
hipEventDestroy(scatterEnd[i]);
}
hipEventDestroy(dslashStart);
hipEventDestroy(dslashEnd);
#ifdef PTHREADS
hipEventDestroy(interiorDslashEnd);
#endif
checkCudaError();
}
#define MORE_GENERIC_DSLASH(FUNC, DAG, X, kernel_type, gridDim, blockDim, shared, stream, param, ...) \
if (x==0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} \
} else { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>) , dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} \
}
#define MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, kernel_type, gridDim, blockDim, shared, stream, param, ...) \
if (x==0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_13) { \
FUNC ## 13 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_9) { \
FUNC ## 9 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} \
} else { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_13) { \
FUNC ## 13 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_9) { \
FUNC ## 9 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>) , dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>) , dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} \
}
#ifndef MULTI_GPU
#define GENERIC_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
default: \
errorQuda("KernelType %d not defined for single GPU", param.kernel_type); \
}
#define GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
default: \
errorQuda("KernelType %d not defined for single GPU", param.kernel_type); \
}
#else
#define GENERIC_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_X: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_X, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Y: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Y, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Z: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Z, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_T: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_T, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
}
#define GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_X: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_X, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Y: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Y, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Z: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Z, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_T: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_T, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
}
#endif
// macro used for dslash types with dagger kernel defined (Wilson, domain wall, etc.)
#define DSLASH(FUNC, gridDim, blockDim, shared, stream, param, ...) \
if (!dagger) { \
GENERIC_DSLASH(FUNC, , Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
} else { \
GENERIC_DSLASH(FUNC, Dagger, Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
}
// macro used for staggered dslash
#define STAGGERED_DSLASH(gridDim, blockDim, shared, stream, param, ...) \
GENERIC_STAGGERED_DSLASH(staggeredDslash, , Axpy, gridDim, blockDim, shared, stream, param, __VA_ARGS__)
#define IMPROVED_STAGGERED_DSLASH(gridDim, blockDim, shared, stream, param, ...) \
GENERIC_STAGGERED_DSLASH(improvedStaggeredDslash, , Axpy, gridDim, blockDim, shared, stream, param, __VA_ARGS__)
#define MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, kernel_type, gridDim, blockDim, shared, stream, param, ...) \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>) , dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
}
#ifndef MULTI_GPU
#define GENERIC_ASYM_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
default: \
errorQuda("KernelType %d not defined for single GPU", param.kernel_type); \
}
#else
#define GENERIC_ASYM_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_X: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_X, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Y: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Y, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Z: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Z, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_T: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_T, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
}
#endif
// macro used for dslash types with dagger kernel defined (Wilson, domain wall, etc.)
#define ASYM_DSLASH(FUNC, gridDim, blockDim, shared, stream, param, ...) \
if (!dagger) { \
GENERIC_ASYM_DSLASH(FUNC, , Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
} else { \
GENERIC_ASYM_DSLASH(FUNC, Dagger, Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
}
//macro used for twisted mass dslash:
#define MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, kernel_type, gridDim, blockDim, shared, stream, param, ...) \
if (x == 0 && d == 0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## Twist ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## Twist ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else { \
FUNC ## 8 ## DAG ## Twist ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} \
} else if (x != 0 && d == 0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## Twist ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## Twist ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## Twist ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>) , dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} \
} else if (x == 0 && d != 0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else { \
FUNC ## 8 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} \
} else{ \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>) , dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} \
}
#ifndef MULTI_GPU
#define GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
default: \
errorQuda("KernelType %d not defined for single GPU", param.kernel_type); \
}
#else
#define GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_X: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_X, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Y: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Y, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Z: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Z, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_T: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_T, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
}
#endif
#define NDEG_TM_DSLASH(FUNC, gridDim, blockDim, shared, stream, param, ...) \
if (!dagger) { \
GENERIC_NDEG_TM_DSLASH(FUNC, , Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
} else { \
GENERIC_NDEG_TM_DSLASH(FUNC, Dagger, Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
}
//end of tm dslash macro
// Use an abstract class interface to drive the different CUDA dslash
// kernels. All parameters are curried into the derived classes to
// allow a simple interface.
class DslashCuda : public Tunable {
protected:
cudaColorSpinorField *out;
const cudaColorSpinorField *in;
const cudaColorSpinorField *x;
const QudaReconstructType reconstruct;
char *saveOut, *saveOutNorm;
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return dslashConstants.VolumeCB(); }
char aux[6][256];
void fillAux(KernelType kernel_type, const char *kernel_str) {
strcpy(aux[kernel_type],kernel_str);
#ifdef MULTI_GPU
char comm[5];
comm[0] = (dslashParam.commDim[0] ? '1' : '0');
comm[1] = (dslashParam.commDim[1] ? '1' : '0');
comm[2] = (dslashParam.commDim[2] ? '1' : '0');
comm[3] = (dslashParam.commDim[3] ? '1' : '0');
comm[4] = '\0';
strcat(aux[kernel_type],",comm=");
strcat(aux[kernel_type],comm);
if (kernel_type == INTERIOR_KERNEL) {
char ghost[5];
ghost[0] = (dslashParam.ghostDim[0] ? '1' : '0');
ghost[1] = (dslashParam.ghostDim[1] ? '1' : '0');
ghost[2] = (dslashParam.ghostDim[2] ? '1' : '0');
ghost[3] = (dslashParam.ghostDim[3] ? '1' : '0');
ghost[4] = '\0';
strcat(aux[kernel_type],",ghost=");
strcat(aux[kernel_type],ghost);
}
#endif
if (reconstruct == QUDA_RECONSTRUCT_NO)
strcat(aux[kernel_type],",reconstruct=18");
else if (reconstruct == QUDA_RECONSTRUCT_13)
strcat(aux[kernel_type],",reconstruct=13");
else if (reconstruct == QUDA_RECONSTRUCT_12)
strcat(aux[kernel_type],",reconstruct=12");
else if (reconstruct == QUDA_RECONSTRUCT_9)
strcat(aux[kernel_type],",reconstruct=9");
else if (reconstruct == QUDA_RECONSTRUCT_8)
strcat(aux[kernel_type],",reconstruct=8");
if (x) strcat(aux[kernel_type],",Xpay");
}
public:
DslashCuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const QudaReconstructType reconstruct)
: out(out), in(in), x(x), reconstruct(reconstruct), saveOut(0), saveOutNorm(0) {
#ifdef MULTI_GPU
fillAux(INTERIOR_KERNEL, "type=interior");
fillAux(EXTERIOR_KERNEL_X, "type=exterior_x");
fillAux(EXTERIOR_KERNEL_Y, "type=exterior_y");
fillAux(EXTERIOR_KERNEL_Z, "type=exterior_z");
fillAux(EXTERIOR_KERNEL_T, "type=exterior_t");
#else
fillAux(INTERIOR_KERNEL, "type=single-GPU");
#endif // MULTI_GPU
}
virtual ~DslashCuda() { }
virtual TuneKey tuneKey() const
{ return TuneKey(in->VolString(), typeid(*this).name(), aux[dslashParam.kernel_type]); }
std::string paramString(const TuneParam ¶m) const // Don't bother printing the grid dim.
{
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
virtual int Nface() { return 2; }
virtual void preTune()
{
if (dslashParam.kernel_type < 5) { // exterior kernel
saveOut = new char[in->Bytes()];
hipMemcpy(saveOut, out->V(), in->Bytes(), hipMemcpyDeviceToHost);
if (out->Precision() == QUDA_HALF_PRECISION) {
saveOutNorm = new char[in->NormBytes()];
hipMemcpy(saveOutNorm, out->Norm(), in->NormBytes(), hipMemcpyDeviceToHost);
}
}
}
virtual void postTune()
{
if (dslashParam.kernel_type < 5) { // exterior kernel
hipMemcpy(out->V(), saveOut, in->Bytes(), hipMemcpyHostToDevice);
delete[] saveOut;
if (out->Precision() == QUDA_HALF_PRECISION) {
hipMemcpy(out->Norm(), saveOutNorm, in->NormBytes(), hipMemcpyHostToDevice);
delete[] saveOutNorm;
}
}
}
};
/** This derived class is specifically for driving the Dslash kernels
that use shared memory blocking. This only applies on Fermi and
upwards, and only for the interior kernels. */
#if (__COMPUTE_CAPABILITY__ >= 200 && defined(SHARED_WILSON_DSLASH))
class SharedDslashCuda : public DslashCuda {
protected:
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; } // FIXME: this isn't quite true, but works
bool advanceSharedBytes(TuneParam ¶m) const {
if (dslashParam.kernel_type != INTERIOR_KERNEL) return DslashCuda::advanceSharedBytes(param);
else return false;
} // FIXME - shared memory tuning only supported on exterior kernels
/** Helper function to set the shared memory size from the 3-d block size */
int sharedBytes(const dim3 &block) const {
int warpSize = 32; // FIXME - query from device properties
int block_xy = block.x*block.y;
if (block_xy % warpSize != 0) block_xy = ((block_xy / warpSize) + 1)*warpSize;
return block_xy*block.z*sharedBytesPerThread();
}
/** Helper function to set the 3-d grid size from the 3-d block size */
dim3 createGrid(const dim3 &block) const {
unsigned int gx = ((dslashConstants.x[0]/2)*dslashConstants.x[3] + block.x - 1) / block.x;
unsigned int gy = (dslashConstants.x[1] + block.y - 1 ) / block.y;
unsigned int gz = (dslashConstants.x[2] + block.z - 1) / block.z;
return dim3(gx, gy, gz);
}
/** Advance the 3-d block size. */
bool advanceBlockDim(TuneParam ¶m) const {
if (dslashParam.kernel_type != INTERIOR_KERNEL) return DslashCuda::advanceBlockDim(param);
const unsigned int min_threads = 2;
const unsigned int max_threads = 512; // FIXME: use deviceProp.maxThreadsDim[0];
const unsigned int max_shared = 16384*3; // FIXME: use deviceProp.sharedMemPerBlock;
// set the x-block dimension equal to the entire x dimension
bool set = false;
dim3 blockInit = param.block;
blockInit.z++;
for (unsigned bx=blockInit.x; bx<=dslashConstants.x[0]/2; bx++) {
//unsigned int gx = (dslashConstants.x[0]*dslashConstants.x[3] + bx - 1) / bx;
for (unsigned by=blockInit.y; by<=dslashConstants.x[1]; by++) {
unsigned int gy = (dslashConstants.x[1] + by - 1 ) / by;
if (by > 1 && (by%2) != 0) continue; // can't handle odd blocks yet except by=1
for (unsigned bz=blockInit.z; bz<=dslashConstants.x[2]; bz++) {
unsigned int gz = (dslashConstants.x[2] + bz - 1) / bz;
if (bz > 1 && (bz%2) != 0) continue; // can't handle odd blocks yet except bz=1
if (bx*by*bz > max_threads) continue;
if (bx*by*bz < min_threads) continue;
// can't yet handle the last block properly in shared memory addressing
if (by*gy != dslashConstants.x[1]) continue;
if (bz*gz != dslashConstants.x[2]) continue;
if (sharedBytes(dim3(bx, by, bz)) > max_shared) continue;
param.block = dim3(bx, by, bz);
set = true; break;
}
if (set) break;
blockInit.z = 1;
}
if (set) break;
blockInit.y = 1;
}
if (param.block.x > dslashConstants.x[0]/2 && param.block.y > dslashConstants.x[1] &&
param.block.z > dslashConstants.x[2] || !set) {
//||sharedBytesPerThread()*param.block.x > max_shared) {
param.block = dim3(dslashConstants.x[0]/2, 1, 1);
return false;
} else {
param.grid = createGrid(param.block);
param.shared_bytes = sharedBytes(param.block);
return true;
}
}
public:
SharedDslashCuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const QudaReconstructType reconstruct)
: DslashCuda(out, in, x, reconstruct) { ; }
virtual ~SharedDslashCuda() { ; }
std::string paramString(const TuneParam ¶m) const // override and print out grid as well
{
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "grid=(" << param.grid.x << "," << param.grid.y << "," << param.grid.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
virtual void initTuneParam(TuneParam ¶m) const
{
if (dslashParam.kernel_type != INTERIOR_KERNEL) return DslashCuda::initTuneParam(param);
param.block = dim3(dslashConstants.x[0]/2, 1, 1);
param.grid = createGrid(param.block);
param.shared_bytes = sharedBytes(param.block);
}
/** Sets default values for when tuning is disabled - this is guaranteed to work, but will be slow */
virtual void defaultTuneParam(TuneParam ¶m) const
{
if (dslashParam.kernel_type != INTERIOR_KERNEL) DslashCuda::defaultTuneParam(param);
else initTuneParam(param);
}
};
#else /** For pre-Fermi architectures */
class SharedDslashCuda : public DslashCuda {
public:
SharedDslashCuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, QudaReconstructType reconstruct)
: DslashCuda(out, in, x, reconstruct) { }
virtual ~SharedDslashCuda() { }
};
#endif
template <typename sFloat, typename gFloat>
class WilsonDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const int dagger;
const double a;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200) // Fermi uses shared memory for common input
if (dslashParam.kernel_type == INTERIOR_KERNEL) { // Interior kernels use shared memory for common iunput
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else { // Exterior kernels use no shared memory
return 0;
}
#else // Pre-Fermi uses shared memory only for pseudo-registers
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
WilsonDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct), gauge0(gauge0), gauge1(gauge1),
dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~WilsonDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
void apply(const hipStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
DSLASH(dslash, tp.grid, tp.block, tp.shared_bytes, stream,
dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
long long flops() const { return (x ? 1368ll : 1320ll) * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
template <typename sFloat, typename gFloat, typename cFloat>
class CloverDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const cFloat *clover;
const float *cloverNorm;
const int dagger;
const double a;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
CloverDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cFloat *clover,
const float *cloverNorm, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct), gauge0(gauge0), gauge1(gauge1), clover(clover),
cloverNorm(cloverNorm), dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~CloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
void apply(const hipStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
DSLASH(cloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cloverNorm,
(sFloat*)in->V(), (float*)in->Norm(), (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
long long flops() const { return (x ? 1872ll : 1824ll) * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
template <typename sFloat, typename gFloat, typename cFloat>
class AsymCloverDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const cFloat *clover;
const float *cloverNorm;
const int dagger;
const double a;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
AsymCloverDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cFloat *clover,
const float *cloverNorm, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct), gauge0(gauge0), gauge1(gauge1), clover(clover),
cloverNorm(cloverNorm), dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
if (!x) errorQuda("Asymmetric clover dslash only defined for Xpay");
}
virtual ~AsymCloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
void apply(const hipStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
ASYM_DSLASH(asymCloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cloverNorm,
(sFloat*)in->V(), (float*)in->Norm(), (sFloat*)x, (float*)x->Norm(), a);
}
long long flops() const { return 1872ll * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
void setTwistParam(double &a, double &b, const double &kappa, const double &mu,
const int dagger, const QudaTwistGamma5Type twist) {
if (twist == QUDA_TWIST_GAMMA5_DIRECT) {
a = 2.0 * kappa * mu;
b = 1.0;
} else if (twist == QUDA_TWIST_GAMMA5_INVERSE) {
a = -2.0 * kappa * mu;
b = 1.0 / (1.0 + a*a);
} else {
errorQuda("Twist type %d not defined\n", twist);
}
if (dagger) a *= -1.0;
}
template <typename sFloat, typename gFloat>
class TwistedDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaTwistDslashType dslashType;
const int dagger;
double a, b, c, d;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return ((in->TwistFlavor() == QUDA_TWIST_PLUS || in->TwistFlavor() == QUDA_TWIST_MINUS) ? DSLASH_SHARED_FLOATS_PER_THREAD * reg_size : NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size);
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return ((in->TwistFlavor() == QUDA_TWIST_PLUS || in->TwistFlavor() == QUDA_TWIST_MINUS) ? DSLASH_SHARED_FLOATS_PER_THREAD * reg_size : NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size);
#endif
}
public:
TwistedDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in, const cudaColorSpinorField *x,
const QudaTwistDslashType dslashType, const double kappa, const double mu,
const double epsilon, const double k, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct), gauge0(gauge0), gauge1(gauge1),
dslashType(dslashType), dagger(dagger)
{
bindSpinorTex<sFloat>(in, out, x);
a = kappa;
b = mu;
c = epsilon;
d = k;
}
virtual ~TwistedDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
switch(dslashType){
case QUDA_DEG_TWIST_INV_DSLASH:
strcat(key.aux,",TwistInvDslash");
break;
case QUDA_DEG_DSLASH_TWIST_INV:
strcat(key.aux,",");
break;
case QUDA_DEG_DSLASH_TWIST_XPAY:
strcat(key.aux,",DslashTwist");
break;
case QUDA_NONDEG_DSLASH:
strcat(key.aux,",NdegDslash");
break;
}
return key;
}
void apply(const hipStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch(dslashType){
case QUDA_DEG_TWIST_INV_DSLASH:
DSLASH(twistedMassTwistInvDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_TWIST_INV:
DSLASH(twistedMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_TWIST_XPAY:
DSLASH(twistedMassDslashTwist, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)x->V(), (float*)x->Norm());
break;
case QUDA_NONDEG_DSLASH:
NDEG_TM_DSLASH(twistedNdegMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, c, d, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
default: errorQuda("Invalid twisted mass dslash type");
}
}
long long flops() const { return (x ? 1416ll : 1392ll) * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
template <typename sFloat, typename gFloat, typename cFloat>
class TwistedCloverDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaTwistCloverDslashType dslashType;
const int dagger;
double a, b, c, d;
const cFloat *clover;
const float *cNorm;
const cFloat *cloverInv;
const float *cNrm2;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
TwistedCloverDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cFloat *clover, const float *cNorm,
const cFloat *cloverInv, const float *cNrm2, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const QudaTwistCloverDslashType dslashType, const double kappa,
const double mu, const double epsilon, const double k, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct),gauge0(gauge0), gauge1(gauge1), clover(clover),
cNorm(cNorm), cloverInv(cloverInv), cNrm2(cNrm2),
dslashType(dslashType), dagger(dagger)
{
bindSpinorTex<sFloat>(in, out, x);
a = kappa;
b = mu;
c = epsilon;
d = k;
}
virtual ~TwistedCloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
switch(dslashType){
case QUDA_DEG_CLOVER_TWIST_INV_DSLASH:
strcat(key.aux,",CloverTwistInvDslash");
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_INV:
strcat(key.aux,",");
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_XPAY:
strcat(key.aux,",DslashCloverTwist");
break;
}
return key;
}
void apply(const hipStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch(dslashType){
case QUDA_DEG_CLOVER_TWIST_INV_DSLASH:
DSLASH(twistedCloverInvDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cNorm, cloverInv, cNrm2,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_INV:
DSLASH(twistedCloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cNorm, cloverInv, cNrm2,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_XPAY:
DSLASH(twistedCloverDslashTwist, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cNorm, cloverInv, cNrm2,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)x->V(), (float*)x->Norm());
break;
default: errorQuda("Invalid twisted clover dslash type");
}
}
long long flops() const { return (x ? 1416ll : 1392ll) * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
template <typename sFloat, typename gFloat>
class DomainWallDslashCuda : public DslashCuda {
private:
const gFloat *gauge0, *gauge1;
const int dagger;
const double mferm;
const double a;
bool checkGrid(TuneParam ¶m) const {
if (param.grid.x > deviceProp.maxGridSize[0] || param.grid.y > deviceProp.maxGridSize[1]) {
warningQuda("Autotuner is skipping blockDim=(%u,%u,%u), gridDim=(%u,%u,%u) because lattice volume is too large",
param.block.x, param.block.y, param.block.z,
param.grid.x, param.grid.y, param.grid.z);
return false;
} else {
return true;
}
}
protected:
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock;
const int step[2] = { deviceProp.warpSize, 1 };
bool advance[2] = { false, false };
// first try to advance block.x
param.block.x += step[0];
if (param.block.x > deviceProp.maxThreadsDim[0] ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[0] = false;
param.block.x = step[0]; // reset block.x
} else {
advance[0] = true; // successfully advanced block.x
}
if (!advance[0]) { // if failed to advance block.x, now try block.y
param.block.y += step[1];
if (param.block.y > in->X(4) ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[1] = false;
param.block.y = step[1]; // reset block.x
} else {
advance[1] = true; // successfully advanced block.y
}
}
if (advance[0] || advance[1]) {
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool advance = true;
if (!checkGrid(param)) advance = advanceBlockDim(param);
return advance;
} else {
return false;
}
}
unsigned int sharedBytesPerThread() const { return 0; }
public:
DomainWallDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double mferm,
const double a, const int dagger)
: DslashCuda(out, in, x, reconstruct), gauge0(gauge0), gauge1(gauge1), mferm(mferm),
dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~DomainWallDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
/** sets default values for when tuning is disabled */
virtual void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
DSLASH(domainWallDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
long long flops() const { // FIXME for multi-GPU
long long bulk = (dslashConstants.Ls-2)*(dslashConstants.VolumeCB()/dslashConstants.Ls);
long long wall = 2*dslashConstants.VolumeCB()/dslashConstants.Ls;
return (x ? 1368ll : 1320ll)*dslashConstants.VolumeCB()*dslashConstants.Ls + 96ll*bulk + 120ll*wall;
}
};
template <typename sFloat, typename gFloat>
class DomainWallDslash4DPCCuda : public DslashCuda {
private:
const gFloat *gauge0, *gauge1;
const int dagger;
const double mferm;
const double a;
const int DS_type;
bool checkGrid(TuneParam ¶m) const {
if (param.grid.x > deviceProp.maxGridSize[0] || param.grid.y > deviceProp.maxGridSize[1]) {
warningQuda("Autotuner is skipping blockDim=(%u,%u,%u), gridDim=(%u,%u,%u) because lattice volume is too large",
param.block.x, param.block.y, param.block.z,
param.grid.x, param.grid.y, param.grid.z);
return false;
} else {
return true;
}
}
protected:
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock;
const int step[2] = { deviceProp.warpSize, 1 };
bool advance[2] = { false, false };
// first try to advance block.x
param.block.x += step[0];
if (param.block.x > deviceProp.maxThreadsDim[0] ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[0] = false;
param.block.x = step[0]; // reset block.x
} else {
advance[0] = true; // successfully advanced block.x
}
if (!advance[0]) { // if failed to advance block.x, now try block.y
param.block.y += step[1];
if (param.block.y > in->X(4) ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[1] = false;
param.block.y = step[1]; // reset block.x
} else {
advance[1] = true; // successfully advanced block.y
}
}
if (advance[0] || advance[1]) {
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool advance = true;
if (!checkGrid(param)) advance = advanceBlockDim(param);
return advance;
} else {
return false;
}
}
unsigned int sharedBytesPerThread() const { return 0; }
public:
DomainWallDslash4DPCCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double mferm,
const double a, const int dagger, const int DS_type)
: DslashCuda(out, in, x, reconstruct), gauge0(gauge0), gauge1(gauge1), mferm(mferm),
dagger(dagger), a(a), DS_type(DS_type)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~DomainWallDslash4DPCCuda() { unbindSpinorTex<sFloat>(in, out, x); }
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
/** sets default values for when tuning is disabled */
virtual void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch(DS_type){
case 0:
DSLASH(domainWallDslash4, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
case 1:
DSLASH(domainWallDslash5, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
case 2:
DSLASH(domainWallDslash5inv, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
default:
errorQuda("invalid Dslash type");
}
}
long long flops() const { // FIXME for multi-GPU
long long bulk = (dslashConstants.Ls-2)*(dslashConstants.VolumeCB()/dslashConstants.Ls);
long long wall = 2*dslashConstants.VolumeCB()/dslashConstants.Ls;
long long flops_Tmp;
switch(DS_type){
case 0:
flops_Tmp = (x ? 1368ll : 1320ll)*dslashConstants.VolumeCB();
break;
case 1:
flops_Tmp = 96ll*bulk + 120ll*wall;
break;
case 2:
flops_Tmp = 144ll*dslashConstants.VolumeCB()*dslashConstants.Ls
+ 3ll*dslashConstants.Ls*(dslashConstants.Ls-1ll);
break;
default:
errorQuda("invalid Dslash type");
}
return flops_Tmp;
}
};
//Dslash class definition for Mobius Domain Wall Fermion
template <typename sFloat, typename gFloat>
class MDWFDslashPCCuda : public DslashCuda {
private:
const gFloat *gauge0, *gauge1;
const int dagger;
const double mferm, a;
double *b5, *c5;
const int DS_type;
bool checkGrid(TuneParam ¶m) const {
if (param.grid.x > deviceProp.maxGridSize[0] || param.grid.y > deviceProp.maxGridSize[1]) {
warningQuda("Autotuner is skipping blockDim=(%u,%u,%u), gridDim=(%u,%u,%u) because lattice volume is too large",
param.block.x, param.block.y, param.block.z,
param.grid.x, param.grid.y, param.grid.z);
return false;
} else {
return true;
}
}
protected:
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock;
const int step[2] = { deviceProp.warpSize, 1 };
bool advance[2] = { false, false };
// first try to advance block.x
param.block.x += step[0];
if (param.block.x > deviceProp.maxThreadsDim[0] ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[0] = false;
param.block.x = step[0]; // reset block.x
} else {
advance[0] = true; // successfully advanced block.x
}
if (!advance[0]) { // if failed to advance block.x, now try block.y
param.block.y += step[1];
if (param.block.y > in->X(4) ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[1] = false;
param.block.y = step[1]; // reset block.x
} else {
advance[1] = true; // successfully advanced block.y
}
}
if (advance[0] || advance[1]) {
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool advance = true;
if (!checkGrid(param)) advance = advanceBlockDim(param);
return advance;
} else {
return false;
}
}
unsigned int sharedBytesPerThread() const { return 0; }
public:
MDWFDslashPCCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double mferm,
const double a, const int dagger, const int DS_type)
: DslashCuda(out, in, x, reconstruct), gauge0(gauge0), gauge1(gauge1), mferm(mferm),
dagger(dagger), a(a), DS_type(DS_type)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~MDWFDslashPCCuda() { unbindSpinorTex<sFloat>(in, out, x); }
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
/** sets default values for when tuning is disabled */
virtual void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch(DS_type){
case 0:
DSLASH(MDWFDslash4, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
case 1:
DSLASH(MDWFDslash4pre, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
case 2:
DSLASH(MDWFDslash5, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
case 3:
DSLASH(MDWFDslash5inv, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
default:
errorQuda("invalid Dslash type");
}
}
long long flops() const { // FIXME for multi-GPU
long long bulk = (dslashConstants.Ls-2)*(dslashConstants.VolumeCB()/dslashConstants.Ls);
long long wall = 2*dslashConstants.VolumeCB()/dslashConstants.Ls;
long long flops_Tmp;
switch(DS_type){
case 0:
flops_Tmp = (x ? 1368ll : 1320ll)*dslashConstants.VolumeCB();
break;
case 1:
flops_Tmp = 168ll*bulk + 72ll*wall;
break;
case 2:
flops_Tmp = 144ll*bulk + 72ll*wall;
break;
case 3:
flops_Tmp = 144ll*dslashConstants.VolumeCB()*dslashConstants.Ls
+ 3ll*dslashConstants.Ls*(dslashConstants.Ls-1ll);
break;
default:
errorQuda("invalid Dslash type");
}
return flops_Tmp;
}
};
template<typename T> struct RealType {};
template<> struct RealType<double2> { typedef double type; };
template<> struct RealType<float2> { typedef float type; };
template<> struct RealType<float4> { typedef float type; };
template<> struct RealType<short2> { typedef short type; };
template<> struct RealType<short4> { typedef short type; };
template <typename sFloat, typename fatGFloat, typename longGFloat, typename phaseFloat>
class StaggeredDslashCuda : public DslashCuda {
private:
const fatGFloat *fat0, *fat1;
const longGFloat *long0, *long1;
const phaseFloat *phase0, *phase1;
const int dagger;
const double a;
QudaDslashType type;
protected:
unsigned int sharedBytesPerThread() const
{
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return 6 * reg_size;
}
public:
StaggeredDslashCuda(cudaColorSpinorField *out, const fatGFloat *fat0, const fatGFloat *fat1,
const longGFloat *long0, const longGFloat *long1,
const phaseFloat *phase0, const phaseFloat *phase1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: DslashCuda(out, in, x, reconstruct), fat0(fat0), fat1(fat1), long0(long0), long1(long1), phase0(phase0), phase1(phase1),
dagger(dagger), a(a), type(long0 ? QUDA_ASQTAD_DSLASH : QUDA_STAGGERED_DSLASH)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~StaggeredDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dim3 gridDim( (dslashParam.threads+tp.block.x-1) / tp.block.x, 1, 1);
if (type == QUDA_STAGGERED_DSLASH) {
STAGGERED_DSLASH(gridDim, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), fat0, fat1,
(sFloat*)in->V(), (float*)in->Norm(),
(sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
} else {
IMPROVED_STAGGERED_DSLASH(gridDim, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(),
fat0, fat1, long0, long1, phase0, phase1,
(sFloat*)in->V(), (float*)in->Norm(),
(sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
}
int Nface() { return type == QUDA_STAGGERED_DSLASH ? 2 : 6; }
long long flops() const {
long long flops;
if (type == QUDA_STAGGERED_DSLASH)
flops = (x ? 666ll : 654ll) * dslashConstants.VolumeCB();
else
flops = (x ? 1158ll : 1146ll) * dslashConstants.VolumeCB();
return flops;
}
};
int gatherCompleted[Nstream];
int previousDir[Nstream];
int commsCompleted[Nstream];
int dslashCompleted[Nstream];
int commDimTotal;
/**
* Initialize the arrays used for the dynamic scheduling.
*/
void inline initDslashCommsPattern() {
for (int i=0; i<Nstream-1; i++) {
#ifndef GPU_COMMS
gatherCompleted[i] = 0;
#else
gatherCompleted[i] = 1;
#endif
commsCompleted[i] = 0;
dslashCompleted[i] = 0;
}
gatherCompleted[Nstream-1] = 1;
commsCompleted[Nstream-1] = 1;
// We need to know which was the previous direction in which
// communication was issued, since we only query a given event /
// comms call after the previous the one has successfully
// completed.
for (int i=3; i>=0; i--) {
if (dslashParam.commDim[i]) {
int prev = Nstream-1;
for (int j=3; j>i; j--) if (dslashParam.commDim[j]) prev = 2*j;
previousDir[2*i + 1] = prev;
previousDir[2*i + 0] = 2*i + 1; // always valid
}
}
// this tells us how many events / comms occurances there are in
// total. Used for exiting the while loop
commDimTotal = 0;
for (int i=3; i>=0; i--) commDimTotal += dslashParam.commDim[i];
#ifndef GPU_COMMS
commDimTotal *= 4; // 2 from pipe length, 2 from direction
#else
commDimTotal *= 2; // 2 from pipe length, 2 from direction
#endif
}
#define PROFILE(f, profile, idx) \
profile.Start(idx); \
f; \
profile.Stop(idx);
void dslashCuda(DslashCuda &dslash, const size_t regSize, const int parity, const int dagger,
const int volume, const int *faceVolumeCB, TimeProfile &profile) {
profile.Start(QUDA_PROFILE_TOTAL);
dslashParam.parity = parity;
dslashParam.kernel_type = INTERIOR_KERNEL;
dslashParam.threads = volume;
#ifdef MULTI_GPU
initDslashCommsPattern();
// Record the start of the dslash
PROFILE(hipEventRecord(dslashStart, streams[Nstream-1]),
profile, QUDA_PROFILE_EVENT_RECORD);
for(int i=3; i>=0; i--){
if(!dslashParam.commDim[i]) continue;
for(int dir=1; dir>=0; dir--){
PROFILE(face[it]->recvStart(2*i+dir), profile, QUDA_PROFILE_COMMS_START);
}
}
bool pack = false;
for (int i=3; i>=0; i--)
if (dslashParam.commDim[i] && (i!=3 || getKernelPackT() || getTwistPack()))
{ pack = true; break; }
// Initialize pack from source spinor
if (inCloverInv == NULL) {
PROFILE(face[it]->pack(*inSpinor, 1-parity, dagger, streams, twist_a, twist_b),
profile, QUDA_PROFILE_PACK_KERNEL);
} else {
PROFILE(face[it]->pack(*inSpinor, *inClover, *inCloverInv, 1-parity, dagger,
streams, twist_a, twist_b), profile, QUDA_PROFILE_PACK_KERNEL);
}
if (pack) {
// Record the end of the packing
PROFILE(hipEventRecord(packEnd[0], streams[Nstream-1]),
profile, QUDA_PROFILE_EVENT_RECORD);
}
for(int i = 3; i >=0; i--){
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
hipEvent_t &event = (i!=3 || getKernelPackT() || getTwistPack()) ? packEnd[0] : dslashStart;
PROFILE(hipStreamWaitEvent(streams[2*i+dir], event, 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// Initialize host transfer from source spinor
PROFILE(face[it]->gather(*inSpinor, dagger, 2*i+dir), profile, QUDA_PROFILE_GATHER);
// Record the end of the gathering
PROFILE(hipEventRecord(gatherEnd[2*i+dir], streams[2*i+dir]),
profile, QUDA_PROFILE_EVENT_RECORD);
}
}
#endif
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
#ifdef MULTI_GPU
int completeSum = 0;
while (completeSum < commDimTotal) {
for (int i=3; i>=0; i--) {
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
// Query if gather has completed
if (!gatherCompleted[2*i+dir] && gatherCompleted[previousDir[2*i+dir]]) {
//hipError_t event_test;
//event_test = hipEventQuery(gatherEnd[2*i+dir]);
PROFILE(hipError_t event_test = hipEventQuery(gatherEnd[2*i+dir]),
profile, QUDA_PROFILE_EVENT_QUERY);
if (hipSuccess == event_test) {
gatherCompleted[2*i+dir] = 1;
completeSum++;
PROFILE(face[it]->sendStart(2*i+dir), profile, QUDA_PROFILE_COMMS_START);
}
}
// Query if comms has finished
if (!commsCompleted[2*i+dir] && commsCompleted[previousDir[2*i+dir]] &&
gatherCompleted[2*i+dir]) {
PROFILE(int comms_test = face[it]->commsQuery(2*i+dir),
profile, QUDA_PROFILE_COMMS_QUERY);
if (comms_test) {
commsCompleted[2*i+dir] = 1;
completeSum++;
// Scatter into the end zone
// Both directions use the same stream
PROFILE(face[it]->scatter(*inSpinor, dagger, 2*i+dir),
profile, QUDA_PROFILE_SCATTER);
}
}
}
// enqueue the boundary dslash kernel as soon as the scatters have been enqueued
if (!dslashCompleted[2*i] && commsCompleted[2*i] && commsCompleted[2*i+1] ) {
// Record the end of the scattering
PROFILE(hipEventRecord(scatterEnd[2*i], streams[2*i]),
profile, QUDA_PROFILE_EVENT_RECORD);
dslashParam.kernel_type = static_cast<KernelType>(i);
dslashParam.threads = dslash.Nface()*faceVolumeCB[i]; // updating 2 or 6 faces
// wait for scattering to finish and then launch dslash
PROFILE(hipStreamWaitEvent(streams[Nstream-1], scatterEnd[2*i], 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// all faces use this stream
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
dslashCompleted[2*i] = 1;
}
}
}
it = (it^1);
#endif // MULTI_GPU
profile.Stop(QUDA_PROFILE_TOTAL);
}
#ifdef PTHREADS
#include <pthread.h>
struct ReceiveParam
{
TimeProfile* profile;
int nFace;
int dagger;
};
void *issueMPIReceive(void* receiveParam)
{
ReceiveParam* param = static_cast<ReceiveParam*>(receiveParam);
for(int i=3; i>=0; i--){
if(!dslashParam.commDim[i]) continue;
for(int dir=1; dir>=0; dir--){
PROFILE(inSpinor->recvStart(param->nFace, 2*i+dir, param->dagger), (*(param->profile)), QUDA_PROFILE_COMMS_START);
}
}
return NULL;
}
struct InteriorParam
{
TimeProfile* profile;
DslashCuda* dslash;
int current_device;
};
void* launchInteriorKernel(void* interiorParam)
{
InteriorParam* param = static_cast<InteriorParam*>(interiorParam);
hipSetDevice(param->current_device); // set device in the new thread
PROFILE(param->dslash->apply(streams[Nstream-1]), (*(param->profile)), QUDA_PROFILE_DSLASH_KERNEL);
return NULL;
}
#endif
void dslashCuda2(DslashCuda &dslash, const size_t regSize, const int parity, const int dagger,
const int volume, const int *faceVolumeCB, TimeProfile &profile) {
profile.Start(QUDA_PROFILE_TOTAL);
dslashParam.parity = parity;
dslashParam.kernel_type = INTERIOR_KERNEL;
dslashParam.threads = volume;
#ifdef MULTI_GPU
// Record the start of the dslash if doing communication in T and not kernel packing
#ifndef PTHREADS
if (dslashParam.commDim[3] && !(getKernelPackT() || getTwistPack()))
#endif
{
PROFILE(hipEventRecord(dslashStart, streams[Nstream-1]),
profile, QUDA_PROFILE_EVENT_RECORD);
}
inSpinor->allocateGhostBuffer(dslash.Nface()/2);
inSpinor->createComms(dslash.Nface()/2);
initDslashCommsPattern();
inSpinor->streamInit(streams);
#ifdef PTHREADS // create two new threads to issue MPI receives
// and launch the interior dslash kernel
const int packIndex = Nstream-2;
//const int packIndex = Nstream-1;
pthread_t receiveThread, interiorThread;
ReceiveParam receiveParam;
receiveParam.profile = &profile;
receiveParam.nFace = (dslash.Nface() >> 1);
receiveParam.dagger = dagger;
if(pthread_create(&receiveThread, NULL, issueMPIReceive, &receiveParam)){
errorQuda("pthread_create failed");
}
InteriorParam interiorParam;
interiorParam.dslash = &dslash;
interiorParam.profile = &profile;
hipGetDevice(&(interiorParam.current_device)); // get the current device number
// PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
if(pthread_create(&interiorThread, NULL, launchInteriorKernel, &interiorParam)){
errorQuda("pthread_create failed");
}
#else // single CPU thread per MPI process
const int packIndex = Nstream-1;
for(int i=3; i>=0; i--){
if(!dslashParam.commDim[i]) continue;
for(int dir=1; dir>=0; dir--){
PROFILE(inSpinor->recvStart(dslash.Nface()/2, 2*i+dir, dagger), profile, QUDA_PROFILE_COMMS_START);
}
}
#endif
bool pack = false;
for (int i=3; i>=0; i--)
if (dslashParam.commDim[i] && (i!=3 || getKernelPackT() || getTwistPack()))
{ pack = true; break; }
// if(pthread_join(interiorThread, NULL)) errorQuda("pthread_join failed");
#ifdef PTHREADS
if (pack){
PROFILE(hipStreamWaitEvent(streams[packIndex], dslashStart, 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
}
#endif
// Initialize pack from source spinor
if (inCloverInv == NULL) {
PROFILE(inSpinor->pack(dslash.Nface()/2, 1-parity, dagger, packIndex, twist_a, twist_b),
profile, QUDA_PROFILE_PACK_KERNEL);
} else {
PROFILE(inSpinor->pack(*inClover, *inCloverInv, dslash.Nface()/2, 1-parity, dagger, packIndex, twist_a),
profile, QUDA_PROFILE_PACK_KERNEL);
}
if (pack) {
// Record the end of the packing
PROFILE(hipEventRecord(packEnd[0], streams[packIndex]),
profile, QUDA_PROFILE_EVENT_RECORD);
}
#ifndef GPU_COMMS
for(int i = 3; i >=0; i--){
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
hipEvent_t &event = (i!=3 || getKernelPackT() || getTwistPack()) ? packEnd[0] : dslashStart;
PROFILE(hipStreamWaitEvent(streams[2*i+dir], event, 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// Initialize host transfer from source spinor
PROFILE(inSpinor->gather(dslash.Nface()/2, dagger, 2*i+dir), profile, QUDA_PROFILE_GATHER);
// Record the end of the gathering
PROFILE(hipEventRecord(gatherEnd[2*i+dir], streams[2*i+dir]),
profile, QUDA_PROFILE_EVENT_RECORD);
}
}
#endif // GPU_COMMS
#endif // MULTI_GPU
#if (!defined MULTI_GPU) || (!defined PTHREADS)
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
#endif
#ifdef MULTI_GPU
#ifdef PTHREADS
if(pthread_join(receiveThread, NULL)) errorQuda("pthread_join failed");
// if(pthread_join(interiorThread, NULL)) errorQuda("pthread_join failed");
#endif
#ifdef GPU_COMMS
bool pack_event = false;
for (int i=3; i>=0; i--) {
if (!dslashParam.commDim[i]) continue;
if ((i!=3 || getKernelPackT() || getTwistPack()) && !pack_event) {
hipEventSynchronize(packEnd[0]);
pack_event = true;
} else {
hipEventSynchronize(dslashStart);
}
for (int dir=1; dir>=0; dir--) {
PROFILE(inSpinor->sendStart(dslash.Nface()/2, 2*i+dir, dagger), profile, QUDA_PROFILE_COMMS_START);
inSpinor->commsQuery(dslash.Nface()/2, 2*i+dir, dagger); // do a comms query to ensure MPI has begun
}
}
#endif
bool interiorLaunched = false;
int completeSum = 0;
while (completeSum < commDimTotal) {
for (int i=3; i>=0; i--) {
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
#ifndef GPU_COMMS
// Query if gather has completed
if (!gatherCompleted[2*i+dir] && gatherCompleted[previousDir[2*i+dir]]) {
PROFILE(hipError_t event_test = hipEventQuery(gatherEnd[2*i+dir]),
profile, QUDA_PROFILE_EVENT_QUERY);
if (hipSuccess == event_test) {
gatherCompleted[2*i+dir] = 1;
completeSum++;
PROFILE(inSpinor->sendStart(dslash.Nface()/2, 2*i+dir, dagger), profile, QUDA_PROFILE_COMMS_START);
}
}
#endif
// Query if comms has finished
if (!commsCompleted[2*i+dir] && commsCompleted[previousDir[2*i+dir]] &&
gatherCompleted[2*i+dir]) {
PROFILE(int comms_test = inSpinor->commsQuery(dslash.Nface()/2, 2*i+dir, dagger),
profile, QUDA_PROFILE_COMMS_QUERY);
if (comms_test) {
commsCompleted[2*i+dir] = 1;
completeSum++;
// Scatter into the end zone
// Both directions use the same stream
#ifndef GPU_COMMS
PROFILE(inSpinor->scatter(dslash.Nface()/2, dagger, 2*i+dir),
profile, QUDA_PROFILE_SCATTER);
#endif
}
}
} // dir=0,1
// enqueue the boundary dslash kernel as soon as the scatters have been enqueued
if (!dslashCompleted[2*i] && commsCompleted[2*i] && commsCompleted[2*i+1] ) {
// Record the end of the scattering
#ifndef GPU_COMMS
PROFILE(hipEventRecord(scatterEnd[2*i], streams[2*i]),
profile, QUDA_PROFILE_EVENT_RECORD);
#ifdef PTHREADS
if(!interiorLaunched){
if(pthread_join(interiorThread, NULL)) errorQuda("pthread_join failed");
interiorLaunched = true;
}
#endif
// wait for scattering to finish and then launch dslash
PROFILE(hipStreamWaitEvent(streams[Nstream-1], scatterEnd[2*i], 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
#endif
dslashParam.kernel_type = static_cast<KernelType>(i);
dslashParam.threads = dslash.Nface()*faceVolumeCB[i]; // updating 2 or 6 faces
// all faces use this stream
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
dslashCompleted[2*i] = 1;
}
}
}
it = (it^1);
#endif // MULTI_GPU
profile.Stop(QUDA_PROFILE_TOTAL);
}
/**
Variation of multi-gpu dslash where the packing kernel writes
buffers directly to host memory
*/
void dslashZeroCopyCuda(DslashCuda &dslash, const size_t regSize, const int parity, const int dagger,
const int volume, const int *faceVolumeCB, TimeProfile &profile) {
profile.Start(QUDA_PROFILE_TOTAL);
dslashParam.parity = parity;
dslashParam.kernel_type = INTERIOR_KERNEL;
dslashParam.threads = volume;
#ifdef MULTI_GPU
initDslashCommsPattern();
for(int i=3; i>=0; i--){
if(!dslashParam.commDim[i]) continue;
for(int dir=1; dir>=0; dir--){
PROFILE(face[it]->recvStart(2*i+dir), profile, QUDA_PROFILE_COMMS_START);
}
}
setKernelPackT(true);
// Record the end of the packing
PROFILE(hipEventRecord(dslashStart, streams[Nstream-1]),
profile, QUDA_PROFILE_EVENT_RECORD);
PROFILE(hipStreamWaitEvent(streams[0], dslashStart, 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// Initialize pack from source spinor
PROFILE(face[it]->pack(*inSpinor, 1-parity, dagger, streams, true, twist_a, twist_b),
profile, QUDA_PROFILE_PACK_KERNEL);
// Record the end of the packing
PROFILE(hipEventRecord(packEnd[0], streams[0]),
profile, QUDA_PROFILE_EVENT_RECORD);
#endif
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
#ifdef MULTI_GPU
int doda=0;
while (doda++>=0) {
PROFILE(hipError_t event_test = hipEventQuery(packEnd[0]),
profile, QUDA_PROFILE_EVENT_QUERY);
if (event_test == hipSuccess) doda=-1;
}
for (int i=3; i>=0; i--) {
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
PROFILE(face[it]->sendStart(2*i+dir), profile, QUDA_PROFILE_COMMS_START);
}
}
int completeSum = 0;
commDimTotal /= 2; // pipe is shorter for zero-variant
while (completeSum < commDimTotal) {
for (int i=3; i>=0; i--) {
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
// Query if comms have finished
if (!commsCompleted[2*i+dir] && commsCompleted[previousDir[2*i+dir]]) {
PROFILE(int comms_test = face[it]->commsQuery(2*i+dir),
profile, QUDA_PROFILE_COMMS_QUERY);
if (comms_test) {
commsCompleted[2*i+dir] = 1;
completeSum++;
// Scatter into the end zone
// Both directions use the same stream
PROFILE(face[it]->scatter(*inSpinor, dagger, 2*i+dir),
profile, QUDA_PROFILE_SCATTER);
}
}
}
// enqueue the boundary dslash kernel as soon as the scatters have been enqueued
if (!dslashCompleted[2*i] && commsCompleted[2*i] && commsCompleted[2*i+1] ) {
// Record the end of the scattering
PROFILE(hipEventRecord(scatterEnd[2*i], streams[2*i]),
profile, QUDA_PROFILE_EVENT_RECORD);
dslashParam.kernel_type = static_cast<KernelType>(i);
dslashParam.threads = dslash.Nface()*faceVolumeCB[i]; // updating 2 or 6 faces
// wait for scattering to finish and then launch dslash
PROFILE(hipStreamWaitEvent(streams[Nstream-1], scatterEnd[2*i], 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// all faces use this stream
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
dslashCompleted[2*i] = 1;
}
}
}
it = (it^1);
#endif // MULTI_GPU
profile.Stop(QUDA_PROFILE_TOTAL);
}
void dslashCudaNC(DslashCuda &dslash, const size_t regSize, const int parity, const int dagger,
const int volume, const int *faceVolumeCB, TimeProfile &profile) {
profile.Start(QUDA_PROFILE_TOTAL);
dslashParam.parity = parity;
dslashParam.kernel_type = INTERIOR_KERNEL;
dslashParam.threads = volume;
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
profile.Stop(QUDA_PROFILE_TOTAL);
}
// Wilson wrappers
void wilsonDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const cudaColorSpinorField *in,
const int parity, const int dagger, const cudaColorSpinorField *x, const double &k,
const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_WILSON_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge %d and spinor %d precision not supported",
gauge.Precision(), in->Precision());
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new WilsonDslashCuda<double2, double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new WilsonDslashCuda<float4, float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new WilsonDslashCuda<short4, short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
}
dslashCuda2(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Wilson dslash has not been built");
#endif // GPU_WILSON_DIRAC
}
void cloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover cloverInv,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &a, const int *commOverride,
TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_CLOVER_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *cloverP, *cloverNormP;
QudaPrecision clover_prec = bindCloverTex(cloverInv, parity, &cloverP, &cloverNormP);
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new CloverDslashCuda<double2, double2, double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), (double2*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new CloverDslashCuda<float4, float4, float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), (float4*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new CloverDslashCuda<short4, short4, short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), (short4*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
}
dslashCuda2(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindGaugeTex(gauge);
unbindCloverTex(cloverInv);
checkCudaError();
#else
errorQuda("Clover dslash has not been built");
#endif
}
void asymCloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover cloverInv,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &a, const int *commOverride,
TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_CLOVER_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *cloverP, *cloverNormP;
QudaPrecision clover_prec = bindCloverTex(cloverInv, parity, &cloverP, &cloverNormP);
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new AsymCloverDslashCuda<double2, double2, double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), (double2*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new AsymCloverDslashCuda<float4, float4, float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), (float4*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new AsymCloverDslashCuda<short4, short4, short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), (short4*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
}
dslashCuda2(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindGaugeTex(gauge);
unbindCloverTex(cloverInv);
checkCudaError();
#else
errorQuda("Clover dslash has not been built");
#endif
}
void twistedMassDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const QudaTwistDslashType type, const double &kappa, const double &mu,
const double &epsilon, const double &k, const int *commOverride,
TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_TWISTED_MASS_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
int ghost_threads[4] = {0};
int bulk_threads = ((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) ? in->Volume() : in->Volume() / 2;
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
ghost_threads[i] = ((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) ? in->GhostFace()[i] : in->GhostFace()[i] / 2;
}
#ifdef MULTI_GPU
if(type == QUDA_DEG_TWIST_INV_DSLASH){
setTwistPack(true);
twist_a = kappa;
twist_b = mu;
}
#endif
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new TwistedDslashCuda<double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new TwistedDslashCuda<float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new TwistedDslashCuda<short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
}
dslashCuda(*dslash, regSize, parity, dagger, bulk_threads, ghost_threads, profile);
delete dslash;
#ifdef MULTI_GPU
if(type == QUDA_DEG_TWIST_INV_DSLASH){
setTwistPack(false);
twist_a = 0.0;
twist_b = 0.0;
}
#endif
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Twisted mass dslash has not been built");
#endif
}
void twistedCloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover *clover, const FullClover *cloverInv,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const QudaTwistCloverDslashType type, const double &kappa, const double &mu,
const double &epsilon, const double &k, const int *commOverride,
TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
inClover = (FullClover*) clover;
inCloverInv = (FullClover*) cloverInv;
#ifdef GPU_TWISTED_CLOVER_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
int ghost_threads[4] = {0};
int bulk_threads = ((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) ? in->Volume() : in->Volume() / 2;
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
ghost_threads[i] = ((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) ? in->GhostFace()[i] : in->GhostFace()[i] / 2;
}
#ifdef MULTI_GPU
twist_a = 2.*mu*kappa;
#endif
/*
#ifdef MULTI_GPU
if(type == QUDA_DEG_CLOVER_TWIST_INV_DSLASH){
setTwistPack(true);
twist_a = kappa;
twist_b = mu;
}
#endif
*/
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
void *cloverP, *cloverNormP, *cloverInvP, *cloverInvNormP;
QudaPrecision clover_prec = bindTwistedCloverTex(*clover, *cloverInv, parity, &cloverP, &cloverNormP, &cloverInvP, &cloverInvNormP);
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new TwistedCloverDslashCuda<double2,double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), (double2*)cloverP, (float*)cloverNormP,
(double2*)cloverInvP, (float*)cloverInvNormP, in, x, type, kappa, mu, epsilon, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new TwistedCloverDslashCuda<float4,float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), (float4*)cloverP, (float*)cloverNormP,
(float4*)cloverInvP, (float*)cloverInvNormP, in, x, type, kappa, mu, epsilon, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new TwistedCloverDslashCuda<short4,short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), (short4*)cloverP, (float*)cloverNormP,
(short4*)cloverInvP, (float*)cloverInvNormP, in, x, type, kappa, mu, epsilon, k, dagger);
}
// dslashCuda(*dslash, regSize, parity, dagger, bulk_threads, ghost_threads, profile);
dslashCuda2(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
/*
#ifdef MULTI_GPU
if(type == QUDA_DEG_CLOVER_TWIST_INV_DSLASH){
setTwistPack(false);
twist_a = 0.0;
twist_b = 0.0;
}
#endif
*/
unbindGaugeTex(gauge);
unbindTwistedCloverTex(*clover);
checkCudaError();
#else
errorQuda("Twisted clover dslash has not been built");
#endif
}
void domainWallDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &m_f, const double &k2,
const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
dslashParam.parity = parity;
#ifdef GPU_DOMAIN_WALL_DIRAC
//currently splitting in space-time is impelemented:
int dirs = 4;
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i = 0;i < dirs; i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new DomainWallDslashCuda<double2,double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new DomainWallDslashCuda<float4,float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new DomainWallDslashCuda<short4,short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger);
}
// the parameters passed to dslashCuda must be 4-d volume and 3-d
// faces because Ls is added as the y-dimension in thread space
int ghostFace[QUDA_MAX_DIM];
for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4);
dslashCuda(*dslash, regSize, parity, dagger, in->Volume() / in->X(4), ghostFace, profile);
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Domain wall dslash has not been built");
#endif
}
//-----------------------------------------------------
// Modification for 4D preconditioned DWF operator
// Additional Arg. is added to give a function name.
//
// pre-defined DS_type list
// 0 = dslash4
// 1 = dslash5
// 2 = dslash5inv
//-----------------------------------------------------
void domainWallDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &m_f, const double &k2,
const int *commOverride, const int DS_type, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
dslashParam.parity = parity;
#ifdef GPU_DOMAIN_WALL_DIRAC
//currently splitting in space-time is impelemented:
int dirs = 4;
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i = 0;i < dirs; i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new DomainWallDslash4DPCCuda<double2,double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new DomainWallDslash4DPCCuda<float4,float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new DomainWallDslash4DPCCuda<short4,short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
}
// the parameters passed to dslashCuda must be 4-d volume and 3-d
// faces because Ls is added as the y-dimension in thread space
int ghostFace[QUDA_MAX_DIM];
for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4);
if(DS_type != 0)
dslashCudaNC(*dslash, regSize, parity, dagger, in->Volume() / in->X(4), ghostFace, profile);
else
dslashCuda(*dslash, regSize, parity, dagger, in->Volume() / in->X(4), ghostFace, profile);
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("4D preconditioned Domain wall dslash has not been built");
#endif
}
//-----------------------------------------------------
// Modification for 4D preconditioned Mobius DWF operator
// Additional Arg. is added to give a function name.
//
// pre-defined DS_type list
// 0 = MDWF dslash4
// 1 = MDWF dslash4pre
// 2 = MDWF dslash5
// 3 = MDWF dslash5inv
//-----------------------------------------------------
void MDWFDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &m_f, const double &k2,
const int *commOverride, const int DS_type, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
dslashParam.parity = parity;
#ifdef GPU_DOMAIN_WALL_DIRAC
//currently splitting in space-time is impelemented:
int dirs = 4;
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i = 0;i < dirs; i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new MDWFDslashPCCuda<double2,double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new MDWFDslashPCCuda<float4,float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new MDWFDslashPCCuda<short4,short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
}
// the parameters passed to dslashCuda must be 4-d volume and 3-d
// faces because Ls is added as the y-dimension in thread space
int ghostFace[QUDA_MAX_DIM];
for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4);
if(DS_type !=0)
dslashCudaNC(*dslash, regSize, parity, dagger, in->Volume() / in->X(4), ghostFace, profile);
else
dslashCuda(*dslash, regSize, parity, dagger, in->Volume() / in->X(4), ghostFace, profile);
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Domain wall dslash has not been built");
#endif
}
void staggeredDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity,
const int dagger, const cudaColorSpinorField *x,
const double &k, const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_STAGGERED_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
dslashParam.parity = parity;
dslashParam.sp_stride = in->Stride();
dslashParam.gauge_stride = gauge.Stride();
dslashParam.fat_link_max = gauge.LinkMax(); // May need to use this in the preconditioning step
// in the solver for the improved staggered action
for(int i=0;i<4;i++){
dslashParam.X[i] = in->X()[i];
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
dslashParam.X[0] *= 2; // because color spinor fields are defined on a half lattice
void *gauge0, *gauge1;
bindFatGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision()) {
errorQuda("Mixing precisions gauge=%d and spinor=%d not supported",
gauge.Precision(), in->Precision());
}
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new StaggeredDslashCuda<double2, double2, double2, double>
(out, (double2*)gauge0, (double2*)gauge1, 0, 0, 0, 0, gauge.Reconstruct(), in, x, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new StaggeredDslashCuda<float2, float2, float4, float>
(out, (float2*)gauge0, (float2*)gauge1, 0, 0, 0, 0, gauge.Reconstruct(), in, x, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new StaggeredDslashCuda<short2, short2, short4, short>
(out, (short2*)gauge0, (short2*)gauge1, 0, 0, 0, 0, gauge.Reconstruct(), in, x, k, dagger);
}
dslashCuda2(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindFatGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Staggered dslash has not been built");
#endif // GPU_STAGGERED_DIRAC
}
void
improvedStaggeredDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &fatGauge,
const cudaGaugeField &longGauge, const cudaColorSpinorField *in,
const int parity, const int dagger, const cudaColorSpinorField *x,
const double &k, const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_STAGGERED_DIRAC
#ifdef MULTI_GPU
for(int i=0;i < 4; i++){
if(commDimPartitioned(i) && (fatGauge.X()[i] < 6)){
errorQuda("ERROR: partitioned dimension with local size less than 6 is not supported in staggered dslash\n");
}
}
#endif
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
dslashParam.sp_stride = in->Stride();
dslashParam.parity = parity;
dslashParam.gauge_stride = fatGauge.Stride();
dslashParam.long_gauge_stride = longGauge.Stride();
dslashParam.fat_link_max = fatGauge.LinkMax();
for(int i=0;i<4;i++){
dslashParam.X[i] = in->X()[i];
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
dslashParam.X[0] *= 2;
void *fatGauge0, *fatGauge1;
void* longGauge0, *longGauge1;
bindFatGaugeTex(fatGauge, parity, &fatGauge0, &fatGauge1);
bindLongGaugeTex(longGauge, parity, &longGauge0, &longGauge1);
void *longPhase0 = (char*)longGauge0 + longGauge.PhaseOffset();
void *longPhase1 = (char*)longGauge1 + longGauge.PhaseOffset();
if (in->Precision() != fatGauge.Precision() || in->Precision() != longGauge.Precision()){
errorQuda("Mixing gauge and spinor precision not supported"
"(precision=%d, fatlinkGauge.precision=%d, longGauge.precision=%d",
in->Precision(), fatGauge.Precision(), longGauge.Precision());
}
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new StaggeredDslashCuda<double2, double2, double2, double>
(out, (double2*)fatGauge0, (double2*)fatGauge1,
(double2*)longGauge0, (double2*)longGauge1,
(double*)longPhase0, (double*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new StaggeredDslashCuda<float2, float2, float4, float>
(out, (float2*)fatGauge0, (float2*)fatGauge1,
(float4*)longGauge0, (float4*)longGauge1,
(float*)longPhase0, (float*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new StaggeredDslashCuda<short2, short2, short4, short>
(out, (short2*)fatGauge0, (short2*)fatGauge1,
(short4*)longGauge0, (short4*)longGauge1,
(short*)longPhase0, (short*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
}
dslashCuda2(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindFatGaugeTex(fatGauge);
unbindLongGaugeTex(longGauge);
checkCudaError();
#else
errorQuda("Staggered dslash has not been built");
#endif // GPU_STAGGERED_DIRAC
}
template <typename sFloat, typename cFloat>
class CloverCuda : public Tunable {
private:
cudaColorSpinorField *out;
float *outNorm;
char *saveOut, *saveOutNorm;
const cFloat *clover;
const float *cloverNorm;
const cudaColorSpinorField *in;
protected:
unsigned int sharedBytesPerThread() const
{
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return CLOVER_SHARED_FLOATS_PER_THREAD * reg_size;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return dslashConstants.VolumeCB(); }
public:
CloverCuda(cudaColorSpinorField *out, const cFloat *clover, const float *cloverNorm,
const cudaColorSpinorField *in)
: out(out), clover(clover), cloverNorm(cloverNorm), in(in)
{
bindSpinorTex<sFloat>(in);
}
virtual ~CloverCuda() { unbindSpinorTex<sFloat>(in); }
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dim3 gridDim( (dslashParam.threads+tp.block.x-1) / tp.block.x, 1, 1);
hipLaunchKernelGGL(( cloverKernel), dim3(gridDim), dim3(tp.block), tp.shared_bytes, stream,
(sFloat*)out->V(), (float*)out->Norm(), clover, cloverNorm,
(sFloat*)in->V(), (float*)in->Norm(), dslashParam);
}
virtual TuneKey tuneKey() const { return TuneKey(in->VolString(), typeid(*this).name()); }
// Need to save the out field if it aliases the in field
void preTune() {
if (in == out) {
saveOut = new char[out->Bytes()];
hipMemcpy(saveOut, out->V(), out->Bytes(), hipMemcpyDeviceToHost);
if (typeid(sFloat) == typeid(short4)) {
saveOutNorm = new char[out->NormBytes()];
hipMemcpy(saveOutNorm, out->Norm(), out->NormBytes(), hipMemcpyDeviceToHost);
}
}
}
// Restore if the in and out fields alias
void postTune() {
if (in == out) {
hipMemcpy(out->V(), saveOut, out->Bytes(), hipMemcpyHostToDevice);
delete[] saveOut;
if (typeid(sFloat) == typeid(short4)) {
hipMemcpy(out->Norm(), saveOutNorm, out->NormBytes(), hipMemcpyHostToDevice);
delete[] saveOutNorm;
}
}
}
std::string paramString(const TuneParam ¶m) const // Don't bother printing the grid dim.
{
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 504ll * dslashConstants.VolumeCB(); }
};
void cloverCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover clover,
const cudaColorSpinorField *in, const int parity) {
dslashParam.parity = parity;
dslashParam.threads = in->Volume();
#ifdef GPU_CLOVER_DIRAC
Tunable *clov = 0;
void *cloverP, *cloverNormP;
QudaPrecision clover_prec = bindCloverTex(clover, parity, &cloverP, &cloverNormP);
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
clov = new CloverCuda<double2, double2>(out, (double2*)cloverP, (float*)cloverNormP, in);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
clov = new CloverCuda<float4, float4>(out, (float4*)cloverP, (float*)cloverNormP, in);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
clov = new CloverCuda<short4, short4>(out, (short4*)cloverP, (float*)cloverNormP, in);
}
clov->apply(0);
unbindCloverTex(clover);
checkCudaError();
delete clov;
#else
errorQuda("Clover dslash has not been built");
#endif
}
template <typename sFloat>
class TwistGamma5Cuda : public Tunable {
private:
cudaColorSpinorField *out;
const cudaColorSpinorField *in;
double a;
double b;
double c;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return dslashConstants.VolumeCB(); }
char *saveOut, *saveOutNorm;
public:
TwistGamma5Cuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
double kappa, double mu, double epsilon, const int dagger, QudaTwistGamma5Type twist) :
out(out), in(in)
{
bindSpinorTex<sFloat>(in);
if((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS))
setTwistParam(a, b, kappa, mu, dagger, twist);
else{//twist doublet
a = kappa, b = mu, c = epsilon;
}
}
virtual ~TwistGamma5Cuda() {
unbindSpinorTex<sFloat>(in);
}
TuneKey tuneKey() const { return TuneKey(in->VolString(), typeid(*this).name(), in->AuxString()); }
void apply(const hipStream_t &stream)
{
#if (defined GPU_TWISTED_MASS_DIRAC) || (defined GPU_NDEG_TWISTED_MASS_DIRAC)
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dim3 gridDim( (dslashParam.threads+tp.block.x-1) / tp.block.x, 1, 1);
if((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) {
hipLaunchKernelGGL(( twistGamma5Kernel), dim3(gridDim), dim3(tp.block), tp.shared_bytes, stream,
(sFloat*)out->V(), (float*)out->Norm(), a, b,
(sFloat*)in->V(), (float*)in->Norm(), dslashParam);
} else {
hipLaunchKernelGGL(( twistGamma5Kernel), dim3(gridDim), dim3(tp.block), tp.shared_bytes, stream,
(sFloat*)out->V(), (float*)out->Norm(), a, b, c,
(sFloat*)in->V(), (float*)in->Norm(), dslashParam);
}
#endif
}
void preTune() {
saveOut = new char[out->Bytes()];
hipMemcpy(saveOut, out->V(), out->Bytes(), hipMemcpyDeviceToHost);
if (typeid(sFloat) == typeid(short4)) {
saveOutNorm = new char[out->NormBytes()];
hipMemcpy(saveOutNorm, out->Norm(), out->NormBytes(), hipMemcpyDeviceToHost);
}
}
void postTune() {
hipMemcpy(out->V(), saveOut, out->Bytes(), hipMemcpyHostToDevice);
delete[] saveOut;
if (typeid(sFloat) == typeid(short4)) {
hipMemcpy(out->Norm(), saveOutNorm, out->NormBytes(), hipMemcpyHostToDevice);
delete[] saveOutNorm;
}
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 24ll * dslashConstants.VolumeCB(); }
long long bytes() const { return in->Bytes() + in->NormBytes() + out->Bytes() + out->NormBytes(); }
};
//!ndeg tm:
void twistGamma5Cuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
const int dagger, const double &kappa, const double &mu, const double &epsilon, const QudaTwistGamma5Type twist)
{
if(in->TwistFlavor() == QUDA_TWIST_PLUS || in->TwistFlavor() == QUDA_TWIST_MINUS)
dslashParam.threads = in->Volume();
else //twist doublet
dslashParam.threads = in->Volume() / 2;
#if (defined GPU_TWISTED_MASS_DIRAC) || (defined GPU_NDEG_TWISTED_MASS_DIRAC)
Tunable *twistGamma5 = 0;
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
twistGamma5 = new TwistGamma5Cuda<double2>(out, in, kappa, mu, epsilon, dagger, twist);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
twistGamma5 = new TwistGamma5Cuda<float4>(out, in, kappa, mu, epsilon, dagger, twist);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
twistGamma5 = new TwistGamma5Cuda<short4>(out, in, kappa, mu, epsilon, dagger, twist);
}
twistGamma5->apply(streams[Nstream-1]);
checkCudaError();
delete twistGamma5;
#else
errorQuda("Twisted mass dslash has not been built");
#endif // GPU_TWISTED_MASS_DIRAC
}
#include "dslash_core/tmc_gamma_core.h"
template <typename cFloat, typename sFloat>
class TwistCloverGamma5Cuda : public Tunable {
private:
const cFloat *clover;
const float *cNorm;
const cFloat *cloverInv;
const float *cNrm2;
QudaTwistGamma5Type twist;
cudaColorSpinorField *out;
const cudaColorSpinorField *in;
double a;
double b;
double c;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return dslashConstants.VolumeCB(); }
char *saveOut, *saveOutNorm;
public:
TwistCloverGamma5Cuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
double kappa, double mu, double epsilon, const int dagger, QudaTwistGamma5Type tw,
cFloat *clov, const float *cN, cFloat *clovInv, const float *cN2) :
out(out), in(in)
{
bindSpinorTex<sFloat>(in);
twist = tw;
clover = clov;
cNorm = cN;
cloverInv = clovInv;
cNrm2 = cN2;
if((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS))
setTwistParam(a, b, kappa, mu, dagger, tw);
// a = 2.*kappa*mu;
else{//twist doublet
errorQuda("ERROR: Non-degenerated twisted-mass not supported in this regularization\n");
}
}
virtual ~TwistCloverGamma5Cuda() {
unbindSpinorTex<sFloat>(in);
}
TuneKey tuneKey() const {
return TuneKey(in->VolString(), typeid(*this).name(), in->AuxString());
}
void apply(const hipStream_t &stream)
{
//A.S.: should this be GPU_TWISTED_CLOVER_DIRAC instead?
#if (defined GPU_TWISTED_CLOVER_DIRAC)
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dim3 gridDim( (dslashParam.threads+tp.block.x-1) / tp.block.x, 1, 1);
if((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) { //Idea for the kernel, two spinor inputs (IN and clover applied IN), on output (Clover applied IN + ig5IN)
if (twist == QUDA_TWIST_GAMMA5_DIRECT)
hipLaunchKernelGGL(( twistCloverGamma5Kernel), dim3(gridDim), dim3(tp.block), tp.shared_bytes, stream,
(sFloat*)out->V(), (float*)out->Norm(), a,
(sFloat*)in->V(), (float*)in->Norm(), dslashParam,
clover, cNorm, cloverInv, cNrm2);
else if (twist == QUDA_TWIST_GAMMA5_INVERSE)
hipLaunchKernelGGL(( twistCloverGamma5InvKernel), dim3(gridDim), dim3(tp.block), tp.shared_bytes, stream,
(sFloat*)out->V(), (float*)out->Norm(), a,
(sFloat*)in->V(), (float*)in->Norm(), dslashParam,
clover, cNorm, cloverInv, cNrm2);
} else {
errorQuda("ERROR: Non-degenerated twisted-mass not supported in this regularization\n");
}
#endif
}
void preTune() {
saveOut = new char[out->Bytes()];
hipMemcpy(saveOut, out->V(), out->Bytes(), hipMemcpyDeviceToHost);
if (typeid(sFloat) == typeid(short4)) {
saveOutNorm = new char[out->NormBytes()];
hipMemcpy(saveOutNorm, out->Norm(), out->NormBytes(), hipMemcpyDeviceToHost);
}
}
void postTune() {
hipMemcpy(out->V(), saveOut, out->Bytes(), hipMemcpyHostToDevice);
delete[] saveOut;
if (typeid(sFloat) == typeid(short4)) {
hipMemcpy(out->Norm(), saveOutNorm, out->NormBytes(), hipMemcpyHostToDevice);
delete[] saveOutNorm;
}
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 24ll * dslashConstants.VolumeCB(); } //TODO FIX THIS NUMBER!!!
long long bytes() const { return in->Bytes() + in->NormBytes() + out->Bytes() + out->NormBytes(); }
};
void twistCloverGamma5Cuda(cudaColorSpinorField *out, const cudaColorSpinorField *in, const int dagger, const double &kappa, const double &mu,
const double &epsilon, const QudaTwistGamma5Type twist, const FullClover *clov, const FullClover *clovInv, const int parity)
{
if(in->TwistFlavor() == QUDA_TWIST_PLUS || in->TwistFlavor() == QUDA_TWIST_MINUS)
dslashParam.threads = in->Volume();
else //twist doublet
errorQuda("Twisted doublet not supported in twisted clover dslash");
#ifdef GPU_TWISTED_CLOVER_DIRAC
Tunable *tmClovGamma5 = 0;
void *clover, *cNorm, *cloverInv, *cNorm2;
QudaPrecision clover_prec = bindTwistedCloverTex(*clov, *clovInv, parity, &clover, &cNorm, &cloverInv, &cNorm2);
if (in->Precision() != clover_prec)
errorQuda("ERROR: Clover precision and spinor precision do not match\n");
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
tmClovGamma5 = new TwistCloverGamma5Cuda<double2,double2>(out, in, kappa, mu, epsilon, dagger, twist, (double2 *) clover, (float *) cNorm, (double2 *) cloverInv, (float *) cNorm2);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
tmClovGamma5 = new TwistCloverGamma5Cuda<float4,float4>(out, in, kappa, mu, epsilon, dagger, twist, (float4 *) clover, (float *) cNorm, (float4 *) cloverInv, (float *) cNorm2);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
tmClovGamma5 = new TwistCloverGamma5Cuda<short4,short4>(out, in, kappa, mu, epsilon, dagger, twist, (short4 *) clover, (float *) cNorm, (short4 *) cloverInv, (float *) cNorm2);
}
tmClovGamma5->apply(streams[Nstream-1]);
checkCudaError();
delete tmClovGamma5;
unbindTwistedCloverTex(*clov);
#else
errorQuda("Twisted clover dslash has not been built");
#endif // GPU_TWISTED_MASS_DIRAC
}
} // namespace quda
#include "misc_helpers.hip"
#if defined(GPU_FATLINK) || defined(GPU_GAUGE_FORCE) || defined(GPU_FERMION_FORCE) // || defined(GPU_UNITARIZE)
#include <force_common.h>
#endif
#ifdef GPU_FATLINK
#include "llfat_quda.cu"
#endif
#ifdef GPU_GAUGE_FORCE
#include "gauge_force_quda.hip"
#endif
#ifdef GPU_FERMION_FORCE
#include "fermion_force_quda.cu"
#endif
| da72650c7675c316f6806d96d258e9f9551ac799.cu | #include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
//these are access control for staggered action
#ifdef GPU_STAGGERED_DIRAC
#if (__COMPUTE_CAPABILITY__ >= 300) // Kepler works best with texture loads only
//#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
//#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#elif (__COMPUTE_CAPABILITY__ >= 200)
//#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#else
#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
//#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#endif
#endif // GPU_STAGGERED_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
enum KernelType {
INTERIOR_KERNEL = 5,
EXTERIOR_KERNEL_X = 0,
EXTERIOR_KERNEL_Y = 1,
EXTERIOR_KERNEL_Z = 2,
EXTERIOR_KERNEL_T = 3
};
namespace quda {
struct DslashParam {
int threads; // the desired number of active threads
int parity; // Even-Odd or Odd-Even
int commDim[QUDA_MAX_DIM]; // Whether to do comms or not
int ghostDim[QUDA_MAX_DIM]; // Whether a ghost zone has been allocated for a given dimension
int ghostOffset[QUDA_MAX_DIM+1];
int ghostNormOffset[QUDA_MAX_DIM+1];
int X[4];
KernelType kernel_type; //is it INTERIOR_KERNEL, EXTERIOR_KERNEL_X/Y/Z/T
int sp_stride; // spinor stride
#ifdef GPU_STAGGERED_DIRAC
int gauge_stride;
int long_gauge_stride;
float fat_link_max;
#endif
#ifdef USE_TEXTURE_OBJECTS
cudaTextureObject_t inTex;
cudaTextureObject_t inTexNorm;
cudaTextureObject_t xTex;
cudaTextureObject_t xTexNorm;
cudaTextureObject_t outTex;
cudaTextureObject_t outTexNorm;
cudaTextureObject_t gauge0Tex; // also applies to fat gauge
cudaTextureObject_t gauge1Tex; // also applies to fat gauge
cudaTextureObject_t longGauge0Tex;
cudaTextureObject_t longGauge1Tex;
cudaTextureObject_t longPhase0Tex;
cudaTextureObject_t longPhase1Tex;
cudaTextureObject_t cloverTex;
cudaTextureObject_t cloverNormTex;
cudaTextureObject_t cloverInvTex;
cudaTextureObject_t cloverInvNormTex;
#endif
};
DslashParam dslashParam;
// these are set in initDslashConst
int Vspatial;
#ifdef PTHREADS
static cudaEvent_t interiorDslashEnd;
#endif
static cudaEvent_t packEnd[Nstream];
static cudaEvent_t gatherStart[Nstream];
static cudaEvent_t gatherEnd[Nstream];
static cudaEvent_t scatterStart[Nstream];
static cudaEvent_t scatterEnd[Nstream];
static cudaEvent_t dslashStart;
static cudaEvent_t dslashEnd;
static FaceBuffer *face[2];
static cudaColorSpinorField *inSpinor;
static FullClover *inClover = NULL;
static FullClover *inCloverInv = NULL;
// For tuneLaunch() to uniquely identify a suitable set of launch parameters, we need copies of a few of
// the constants set by initDslashConstants().
static struct {
int x[4];
int Ls;
unsigned long long VolumeCB() { return x[0]*x[1]*x[2]*x[3]/2; }
// In the future, we may also want to add gauge_fixed, sp_stride, ga_stride, cl_stride, etc.
} dslashConstants;
// determines whether the temporal ghost zones are packed with a gather kernel,
// as opposed to multiple calls to cudaMemcpy()
static bool kernelPackT = false;
void setKernelPackT(bool packT) { kernelPackT = packT; }
bool getKernelPackT() { return kernelPackT; }
//these params are needed for twisted mass (in particular, for packing twisted spinor)
static bool twistPack = false;
void setTwistPack(bool flag) { twistPack = flag; }
bool getTwistPack() { return twistPack; }
#ifdef MULTI_GPU
static double twist_a = 0.0;
static double twist_b = 0.0;
#endif
#include <dslash_textures.h>
#include <dslash_constants.h>
#if defined(DIRECT_ACCESS_LINK) || defined(DIRECT_ACCESS_WILSON_SPINOR) || \
defined(DIRECT_ACCESS_WILSON_ACCUM) || defined(DIRECT_ACCESS_WILSON_PACK_SPINOR) || \
defined(DIRECT_ACCESS_WILSON_INTER) || defined(DIRECT_ACCESS_WILSON_PACK_SPINOR) || \
defined(DIRECT_ACCESS_CLOVER)
static inline __device__ float short2float(short a) {
return (float)a/MAX_SHORT;
}
static inline __device__ short float2short(float c, float a) {
return (short)(a*c*MAX_SHORT);
}
static inline __device__ short4 float42short4(float c, float4 a) {
return make_short4(float2short(c, a.x), float2short(c, a.y), float2short(c, a.z), float2short(c, a.w));
}
static inline __device__ float4 short42float4(short4 a) {
return make_float4(short2float(a.x), short2float(a.y), short2float(a.z), short2float(a.w));
}
static inline __device__ float2 short22float2(short2 a) {
return make_float2(short2float(a.x), short2float(a.y));
}
#endif // DIRECT_ACCESS inclusions
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#include <pack_face_def.h> // kernels for packing the ghost zones and general indexing
#include <staggered_dslash_def.h> // staggered Dslash kernels
#include <wilson_dslash_def.h> // Wilson Dslash kernels (including clover)
#include <dw_dslash_def.h> // Domain Wall kernels
#include <dw_dslash4_def.h> // Dslash4 Domain Wall kernels
#include <dw_dslash5_def.h> // Dslash5 Domain Wall kernels
#include <dw_dslash5inv_def.h> // Dslash5inv Domain Wall kernels
#include <mdw_dslash4_def.h> // Dslash4, intermediate operator for Mobius Mat_4 kernels
#include <mdw_dslash4pre_def.h> // Dslash4pre, intermediate operator for Mobius Mat_4 kernels
#include <mdw_dslash5_def.h> // Dslash5 Mobius Domain Wall kernels
#include <mdw_dslash5inv_def.h> // Dslash5inv Mobius Domain Wall kernels
#include <tm_dslash_def.h> // Twisted Mass kernels
#include <tm_core.h> // solo twisted mass kernel
#include <clover_def.h> // kernels for applying the clover term alone
#include <tm_ndeg_dslash_def.h> // Non-degenerate twisted Mass
#include <tmc_dslash_def.h> // Twisted Clover kernels
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#ifndef CLOVER_SHARED_FLOATS_PER_THREAD
#define CLOVER_SHARED_FLOATS_PER_THREAD 0
#endif
#ifndef NDEGTM_SHARED_FLOATS_PER_THREAD
#define NDEGTM_SHARED_FLOATS_PER_THREAD 0
#endif
void setFace(const FaceBuffer &Face1, const FaceBuffer &Face2) {
face[0] = (FaceBuffer*)&(Face1);
face[1] = (FaceBuffer*)&(Face2); // nasty
}
static int it = 0;
void createDslashEvents()
{
// add cudaEventDisableTiming for lower sync overhead
for (int i=0; i<Nstream; i++) {
cudaEventCreate(&packEnd[i], cudaEventDisableTiming);
cudaEventCreate(&gatherStart[i], cudaEventDisableTiming);
cudaEventCreate(&gatherEnd[i], cudaEventDisableTiming);
cudaEventCreateWithFlags(&scatterStart[i], cudaEventDisableTiming);
cudaEventCreateWithFlags(&scatterEnd[i], cudaEventDisableTiming);
}
cudaEventCreateWithFlags(&dslashStart, cudaEventDisableTiming);
cudaEventCreateWithFlags(&dslashEnd, cudaEventDisableTiming);
#ifdef PTHREADS
cudaEventCreateWithFlags(&interiorDslashEnd, cudaEventDisableTiming);
#endif
checkCudaError();
}
void destroyDslashEvents()
{
for (int i=0; i<Nstream; i++) {
cudaEventDestroy(packEnd[i]);
cudaEventDestroy(gatherStart[i]);
cudaEventDestroy(gatherEnd[i]);
cudaEventDestroy(scatterStart[i]);
cudaEventDestroy(scatterEnd[i]);
}
cudaEventDestroy(dslashStart);
cudaEventDestroy(dslashEnd);
#ifdef PTHREADS
cudaEventDestroy(interiorDslashEnd);
#endif
checkCudaError();
}
#define MORE_GENERIC_DSLASH(FUNC, DAG, X, kernel_type, gridDim, blockDim, shared, stream, param, ...) \
if (x==0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} \
} else { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## X ## Kernel<kernel_type> <<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} \
}
#define MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, kernel_type, gridDim, blockDim, shared, stream, param, ...) \
if (x==0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_13) { \
FUNC ## 13 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_9) { \
FUNC ## 9 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} \
} else { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_13) { \
FUNC ## 13 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_9) { \
FUNC ## 9 ## DAG ## X ## Kernel<kernel_type> <<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## X ## Kernel<kernel_type> <<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} \
}
#ifndef MULTI_GPU
#define GENERIC_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
default: \
errorQuda("KernelType %d not defined for single GPU", param.kernel_type); \
}
#define GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
default: \
errorQuda("KernelType %d not defined for single GPU", param.kernel_type); \
}
#else
#define GENERIC_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_X: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_X, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Y: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Y, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Z: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Z, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_T: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_T, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
}
#define GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_X: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_X, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Y: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Y, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Z: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Z, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_T: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_T, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
}
#endif
// macro used for dslash types with dagger kernel defined (Wilson, domain wall, etc.)
#define DSLASH(FUNC, gridDim, blockDim, shared, stream, param, ...) \
if (!dagger) { \
GENERIC_DSLASH(FUNC, , Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
} else { \
GENERIC_DSLASH(FUNC, Dagger, Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
}
// macro used for staggered dslash
#define STAGGERED_DSLASH(gridDim, blockDim, shared, stream, param, ...) \
GENERIC_STAGGERED_DSLASH(staggeredDslash, , Axpy, gridDim, blockDim, shared, stream, param, __VA_ARGS__)
#define IMPROVED_STAGGERED_DSLASH(gridDim, blockDim, shared, stream, param, ...) \
GENERIC_STAGGERED_DSLASH(improvedStaggeredDslash, , Axpy, gridDim, blockDim, shared, stream, param, __VA_ARGS__)
#define MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, kernel_type, gridDim, blockDim, shared, stream, param, ...) \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## X ## Kernel<kernel_type> <<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
}
#ifndef MULTI_GPU
#define GENERIC_ASYM_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
default: \
errorQuda("KernelType %d not defined for single GPU", param.kernel_type); \
}
#else
#define GENERIC_ASYM_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_X: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_X, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Y: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Y, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Z: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Z, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_T: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_T, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
}
#endif
// macro used for dslash types with dagger kernel defined (Wilson, domain wall, etc.)
#define ASYM_DSLASH(FUNC, gridDim, blockDim, shared, stream, param, ...) \
if (!dagger) { \
GENERIC_ASYM_DSLASH(FUNC, , Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
} else { \
GENERIC_ASYM_DSLASH(FUNC, Dagger, Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
}
//macro used for twisted mass dslash:
#define MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, kernel_type, gridDim, blockDim, shared, stream, param, ...) \
if (x == 0 && d == 0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## Twist ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## Twist ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else { \
FUNC ## 8 ## DAG ## Twist ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} \
} else if (x != 0 && d == 0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## Twist ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## Twist ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## Twist ## X ## Kernel<kernel_type> <<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} \
} else if (x == 0 && d != 0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else { \
FUNC ## 8 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} \
} else{ \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## X ## Kernel<kernel_type> <<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} \
}
#ifndef MULTI_GPU
#define GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
default: \
errorQuda("KernelType %d not defined for single GPU", param.kernel_type); \
}
#else
#define GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_X: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_X, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Y: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Y, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Z: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Z, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_T: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_T, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
}
#endif
#define NDEG_TM_DSLASH(FUNC, gridDim, blockDim, shared, stream, param, ...) \
if (!dagger) { \
GENERIC_NDEG_TM_DSLASH(FUNC, , Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
} else { \
GENERIC_NDEG_TM_DSLASH(FUNC, Dagger, Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
}
//end of tm dslash macro
// Use an abstract class interface to drive the different CUDA dslash
// kernels. All parameters are curried into the derived classes to
// allow a simple interface.
class DslashCuda : public Tunable {
protected:
cudaColorSpinorField *out;
const cudaColorSpinorField *in;
const cudaColorSpinorField *x;
const QudaReconstructType reconstruct;
char *saveOut, *saveOutNorm;
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return dslashConstants.VolumeCB(); }
char aux[6][256];
void fillAux(KernelType kernel_type, const char *kernel_str) {
strcpy(aux[kernel_type],kernel_str);
#ifdef MULTI_GPU
char comm[5];
comm[0] = (dslashParam.commDim[0] ? '1' : '0');
comm[1] = (dslashParam.commDim[1] ? '1' : '0');
comm[2] = (dslashParam.commDim[2] ? '1' : '0');
comm[3] = (dslashParam.commDim[3] ? '1' : '0');
comm[4] = '\0';
strcat(aux[kernel_type],",comm=");
strcat(aux[kernel_type],comm);
if (kernel_type == INTERIOR_KERNEL) {
char ghost[5];
ghost[0] = (dslashParam.ghostDim[0] ? '1' : '0');
ghost[1] = (dslashParam.ghostDim[1] ? '1' : '0');
ghost[2] = (dslashParam.ghostDim[2] ? '1' : '0');
ghost[3] = (dslashParam.ghostDim[3] ? '1' : '0');
ghost[4] = '\0';
strcat(aux[kernel_type],",ghost=");
strcat(aux[kernel_type],ghost);
}
#endif
if (reconstruct == QUDA_RECONSTRUCT_NO)
strcat(aux[kernel_type],",reconstruct=18");
else if (reconstruct == QUDA_RECONSTRUCT_13)
strcat(aux[kernel_type],",reconstruct=13");
else if (reconstruct == QUDA_RECONSTRUCT_12)
strcat(aux[kernel_type],",reconstruct=12");
else if (reconstruct == QUDA_RECONSTRUCT_9)
strcat(aux[kernel_type],",reconstruct=9");
else if (reconstruct == QUDA_RECONSTRUCT_8)
strcat(aux[kernel_type],",reconstruct=8");
if (x) strcat(aux[kernel_type],",Xpay");
}
public:
DslashCuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const QudaReconstructType reconstruct)
: out(out), in(in), x(x), reconstruct(reconstruct), saveOut(0), saveOutNorm(0) {
#ifdef MULTI_GPU
fillAux(INTERIOR_KERNEL, "type=interior");
fillAux(EXTERIOR_KERNEL_X, "type=exterior_x");
fillAux(EXTERIOR_KERNEL_Y, "type=exterior_y");
fillAux(EXTERIOR_KERNEL_Z, "type=exterior_z");
fillAux(EXTERIOR_KERNEL_T, "type=exterior_t");
#else
fillAux(INTERIOR_KERNEL, "type=single-GPU");
#endif // MULTI_GPU
}
virtual ~DslashCuda() { }
virtual TuneKey tuneKey() const
{ return TuneKey(in->VolString(), typeid(*this).name(), aux[dslashParam.kernel_type]); }
std::string paramString(const TuneParam ¶m) const // Don't bother printing the grid dim.
{
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
virtual int Nface() { return 2; }
virtual void preTune()
{
if (dslashParam.kernel_type < 5) { // exterior kernel
saveOut = new char[in->Bytes()];
cudaMemcpy(saveOut, out->V(), in->Bytes(), cudaMemcpyDeviceToHost);
if (out->Precision() == QUDA_HALF_PRECISION) {
saveOutNorm = new char[in->NormBytes()];
cudaMemcpy(saveOutNorm, out->Norm(), in->NormBytes(), cudaMemcpyDeviceToHost);
}
}
}
virtual void postTune()
{
if (dslashParam.kernel_type < 5) { // exterior kernel
cudaMemcpy(out->V(), saveOut, in->Bytes(), cudaMemcpyHostToDevice);
delete[] saveOut;
if (out->Precision() == QUDA_HALF_PRECISION) {
cudaMemcpy(out->Norm(), saveOutNorm, in->NormBytes(), cudaMemcpyHostToDevice);
delete[] saveOutNorm;
}
}
}
};
/** This derived class is specifically for driving the Dslash kernels
that use shared memory blocking. This only applies on Fermi and
upwards, and only for the interior kernels. */
#if (__COMPUTE_CAPABILITY__ >= 200 && defined(SHARED_WILSON_DSLASH))
class SharedDslashCuda : public DslashCuda {
protected:
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; } // FIXME: this isn't quite true, but works
bool advanceSharedBytes(TuneParam ¶m) const {
if (dslashParam.kernel_type != INTERIOR_KERNEL) return DslashCuda::advanceSharedBytes(param);
else return false;
} // FIXME - shared memory tuning only supported on exterior kernels
/** Helper function to set the shared memory size from the 3-d block size */
int sharedBytes(const dim3 &block) const {
int warpSize = 32; // FIXME - query from device properties
int block_xy = block.x*block.y;
if (block_xy % warpSize != 0) block_xy = ((block_xy / warpSize) + 1)*warpSize;
return block_xy*block.z*sharedBytesPerThread();
}
/** Helper function to set the 3-d grid size from the 3-d block size */
dim3 createGrid(const dim3 &block) const {
unsigned int gx = ((dslashConstants.x[0]/2)*dslashConstants.x[3] + block.x - 1) / block.x;
unsigned int gy = (dslashConstants.x[1] + block.y - 1 ) / block.y;
unsigned int gz = (dslashConstants.x[2] + block.z - 1) / block.z;
return dim3(gx, gy, gz);
}
/** Advance the 3-d block size. */
bool advanceBlockDim(TuneParam ¶m) const {
if (dslashParam.kernel_type != INTERIOR_KERNEL) return DslashCuda::advanceBlockDim(param);
const unsigned int min_threads = 2;
const unsigned int max_threads = 512; // FIXME: use deviceProp.maxThreadsDim[0];
const unsigned int max_shared = 16384*3; // FIXME: use deviceProp.sharedMemPerBlock;
// set the x-block dimension equal to the entire x dimension
bool set = false;
dim3 blockInit = param.block;
blockInit.z++;
for (unsigned bx=blockInit.x; bx<=dslashConstants.x[0]/2; bx++) {
//unsigned int gx = (dslashConstants.x[0]*dslashConstants.x[3] + bx - 1) / bx;
for (unsigned by=blockInit.y; by<=dslashConstants.x[1]; by++) {
unsigned int gy = (dslashConstants.x[1] + by - 1 ) / by;
if (by > 1 && (by%2) != 0) continue; // can't handle odd blocks yet except by=1
for (unsigned bz=blockInit.z; bz<=dslashConstants.x[2]; bz++) {
unsigned int gz = (dslashConstants.x[2] + bz - 1) / bz;
if (bz > 1 && (bz%2) != 0) continue; // can't handle odd blocks yet except bz=1
if (bx*by*bz > max_threads) continue;
if (bx*by*bz < min_threads) continue;
// can't yet handle the last block properly in shared memory addressing
if (by*gy != dslashConstants.x[1]) continue;
if (bz*gz != dslashConstants.x[2]) continue;
if (sharedBytes(dim3(bx, by, bz)) > max_shared) continue;
param.block = dim3(bx, by, bz);
set = true; break;
}
if (set) break;
blockInit.z = 1;
}
if (set) break;
blockInit.y = 1;
}
if (param.block.x > dslashConstants.x[0]/2 && param.block.y > dslashConstants.x[1] &&
param.block.z > dslashConstants.x[2] || !set) {
//||sharedBytesPerThread()*param.block.x > max_shared) {
param.block = dim3(dslashConstants.x[0]/2, 1, 1);
return false;
} else {
param.grid = createGrid(param.block);
param.shared_bytes = sharedBytes(param.block);
return true;
}
}
public:
SharedDslashCuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const QudaReconstructType reconstruct)
: DslashCuda(out, in, x, reconstruct) { ; }
virtual ~SharedDslashCuda() { ; }
std::string paramString(const TuneParam ¶m) const // override and print out grid as well
{
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "grid=(" << param.grid.x << "," << param.grid.y << "," << param.grid.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
virtual void initTuneParam(TuneParam ¶m) const
{
if (dslashParam.kernel_type != INTERIOR_KERNEL) return DslashCuda::initTuneParam(param);
param.block = dim3(dslashConstants.x[0]/2, 1, 1);
param.grid = createGrid(param.block);
param.shared_bytes = sharedBytes(param.block);
}
/** Sets default values for when tuning is disabled - this is guaranteed to work, but will be slow */
virtual void defaultTuneParam(TuneParam ¶m) const
{
if (dslashParam.kernel_type != INTERIOR_KERNEL) DslashCuda::defaultTuneParam(param);
else initTuneParam(param);
}
};
#else /** For pre-Fermi architectures */
class SharedDslashCuda : public DslashCuda {
public:
SharedDslashCuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, QudaReconstructType reconstruct)
: DslashCuda(out, in, x, reconstruct) { }
virtual ~SharedDslashCuda() { }
};
#endif
template <typename sFloat, typename gFloat>
class WilsonDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const int dagger;
const double a;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200) // Fermi uses shared memory for common input
if (dslashParam.kernel_type == INTERIOR_KERNEL) { // Interior kernels use shared memory for common iunput
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else { // Exterior kernels use no shared memory
return 0;
}
#else // Pre-Fermi uses shared memory only for pseudo-registers
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
WilsonDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct), gauge0(gauge0), gauge1(gauge1),
dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~WilsonDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
void apply(const cudaStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
DSLASH(dslash, tp.grid, tp.block, tp.shared_bytes, stream,
dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
long long flops() const { return (x ? 1368ll : 1320ll) * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
template <typename sFloat, typename gFloat, typename cFloat>
class CloverDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const cFloat *clover;
const float *cloverNorm;
const int dagger;
const double a;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
CloverDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cFloat *clover,
const float *cloverNorm, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct), gauge0(gauge0), gauge1(gauge1), clover(clover),
cloverNorm(cloverNorm), dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~CloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
void apply(const cudaStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
DSLASH(cloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cloverNorm,
(sFloat*)in->V(), (float*)in->Norm(), (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
long long flops() const { return (x ? 1872ll : 1824ll) * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
template <typename sFloat, typename gFloat, typename cFloat>
class AsymCloverDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const cFloat *clover;
const float *cloverNorm;
const int dagger;
const double a;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
AsymCloverDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cFloat *clover,
const float *cloverNorm, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct), gauge0(gauge0), gauge1(gauge1), clover(clover),
cloverNorm(cloverNorm), dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
if (!x) errorQuda("Asymmetric clover dslash only defined for Xpay");
}
virtual ~AsymCloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
void apply(const cudaStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
ASYM_DSLASH(asymCloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cloverNorm,
(sFloat*)in->V(), (float*)in->Norm(), (sFloat*)x, (float*)x->Norm(), a);
}
long long flops() const { return 1872ll * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
void setTwistParam(double &a, double &b, const double &kappa, const double &mu,
const int dagger, const QudaTwistGamma5Type twist) {
if (twist == QUDA_TWIST_GAMMA5_DIRECT) {
a = 2.0 * kappa * mu;
b = 1.0;
} else if (twist == QUDA_TWIST_GAMMA5_INVERSE) {
a = -2.0 * kappa * mu;
b = 1.0 / (1.0 + a*a);
} else {
errorQuda("Twist type %d not defined\n", twist);
}
if (dagger) a *= -1.0;
}
template <typename sFloat, typename gFloat>
class TwistedDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaTwistDslashType dslashType;
const int dagger;
double a, b, c, d;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return ((in->TwistFlavor() == QUDA_TWIST_PLUS || in->TwistFlavor() == QUDA_TWIST_MINUS) ? DSLASH_SHARED_FLOATS_PER_THREAD * reg_size : NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size);
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return ((in->TwistFlavor() == QUDA_TWIST_PLUS || in->TwistFlavor() == QUDA_TWIST_MINUS) ? DSLASH_SHARED_FLOATS_PER_THREAD * reg_size : NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size);
#endif
}
public:
TwistedDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in, const cudaColorSpinorField *x,
const QudaTwistDslashType dslashType, const double kappa, const double mu,
const double epsilon, const double k, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct), gauge0(gauge0), gauge1(gauge1),
dslashType(dslashType), dagger(dagger)
{
bindSpinorTex<sFloat>(in, out, x);
a = kappa;
b = mu;
c = epsilon;
d = k;
}
virtual ~TwistedDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
switch(dslashType){
case QUDA_DEG_TWIST_INV_DSLASH:
strcat(key.aux,",TwistInvDslash");
break;
case QUDA_DEG_DSLASH_TWIST_INV:
strcat(key.aux,",");
break;
case QUDA_DEG_DSLASH_TWIST_XPAY:
strcat(key.aux,",DslashTwist");
break;
case QUDA_NONDEG_DSLASH:
strcat(key.aux,",NdegDslash");
break;
}
return key;
}
void apply(const cudaStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch(dslashType){
case QUDA_DEG_TWIST_INV_DSLASH:
DSLASH(twistedMassTwistInvDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_TWIST_INV:
DSLASH(twistedMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_TWIST_XPAY:
DSLASH(twistedMassDslashTwist, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)x->V(), (float*)x->Norm());
break;
case QUDA_NONDEG_DSLASH:
NDEG_TM_DSLASH(twistedNdegMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, c, d, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
default: errorQuda("Invalid twisted mass dslash type");
}
}
long long flops() const { return (x ? 1416ll : 1392ll) * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
template <typename sFloat, typename gFloat, typename cFloat>
class TwistedCloverDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaTwistCloverDslashType dslashType;
const int dagger;
double a, b, c, d;
const cFloat *clover;
const float *cNorm;
const cFloat *cloverInv;
const float *cNrm2;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
TwistedCloverDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cFloat *clover, const float *cNorm,
const cFloat *cloverInv, const float *cNrm2, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const QudaTwistCloverDslashType dslashType, const double kappa,
const double mu, const double epsilon, const double k, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct),gauge0(gauge0), gauge1(gauge1), clover(clover),
cNorm(cNorm), cloverInv(cloverInv), cNrm2(cNrm2),
dslashType(dslashType), dagger(dagger)
{
bindSpinorTex<sFloat>(in, out, x);
a = kappa;
b = mu;
c = epsilon;
d = k;
}
virtual ~TwistedCloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
switch(dslashType){
case QUDA_DEG_CLOVER_TWIST_INV_DSLASH:
strcat(key.aux,",CloverTwistInvDslash");
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_INV:
strcat(key.aux,",");
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_XPAY:
strcat(key.aux,",DslashCloverTwist");
break;
}
return key;
}
void apply(const cudaStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch(dslashType){
case QUDA_DEG_CLOVER_TWIST_INV_DSLASH:
DSLASH(twistedCloverInvDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cNorm, cloverInv, cNrm2,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_INV:
DSLASH(twistedCloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cNorm, cloverInv, cNrm2,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_XPAY:
DSLASH(twistedCloverDslashTwist, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cNorm, cloverInv, cNrm2,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)x->V(), (float*)x->Norm());
break;
default: errorQuda("Invalid twisted clover dslash type");
}
}
long long flops() const { return (x ? 1416ll : 1392ll) * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
template <typename sFloat, typename gFloat>
class DomainWallDslashCuda : public DslashCuda {
private:
const gFloat *gauge0, *gauge1;
const int dagger;
const double mferm;
const double a;
bool checkGrid(TuneParam ¶m) const {
if (param.grid.x > deviceProp.maxGridSize[0] || param.grid.y > deviceProp.maxGridSize[1]) {
warningQuda("Autotuner is skipping blockDim=(%u,%u,%u), gridDim=(%u,%u,%u) because lattice volume is too large",
param.block.x, param.block.y, param.block.z,
param.grid.x, param.grid.y, param.grid.z);
return false;
} else {
return true;
}
}
protected:
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock;
const int step[2] = { deviceProp.warpSize, 1 };
bool advance[2] = { false, false };
// first try to advance block.x
param.block.x += step[0];
if (param.block.x > deviceProp.maxThreadsDim[0] ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[0] = false;
param.block.x = step[0]; // reset block.x
} else {
advance[0] = true; // successfully advanced block.x
}
if (!advance[0]) { // if failed to advance block.x, now try block.y
param.block.y += step[1];
if (param.block.y > in->X(4) ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[1] = false;
param.block.y = step[1]; // reset block.x
} else {
advance[1] = true; // successfully advanced block.y
}
}
if (advance[0] || advance[1]) {
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool advance = true;
if (!checkGrid(param)) advance = advanceBlockDim(param);
return advance;
} else {
return false;
}
}
unsigned int sharedBytesPerThread() const { return 0; }
public:
DomainWallDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double mferm,
const double a, const int dagger)
: DslashCuda(out, in, x, reconstruct), gauge0(gauge0), gauge1(gauge1), mferm(mferm),
dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~DomainWallDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
/** sets default values for when tuning is disabled */
virtual void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
DSLASH(domainWallDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
long long flops() const { // FIXME for multi-GPU
long long bulk = (dslashConstants.Ls-2)*(dslashConstants.VolumeCB()/dslashConstants.Ls);
long long wall = 2*dslashConstants.VolumeCB()/dslashConstants.Ls;
return (x ? 1368ll : 1320ll)*dslashConstants.VolumeCB()*dslashConstants.Ls + 96ll*bulk + 120ll*wall;
}
};
template <typename sFloat, typename gFloat>
class DomainWallDslash4DPCCuda : public DslashCuda {
private:
const gFloat *gauge0, *gauge1;
const int dagger;
const double mferm;
const double a;
const int DS_type;
bool checkGrid(TuneParam ¶m) const {
if (param.grid.x > deviceProp.maxGridSize[0] || param.grid.y > deviceProp.maxGridSize[1]) {
warningQuda("Autotuner is skipping blockDim=(%u,%u,%u), gridDim=(%u,%u,%u) because lattice volume is too large",
param.block.x, param.block.y, param.block.z,
param.grid.x, param.grid.y, param.grid.z);
return false;
} else {
return true;
}
}
protected:
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock;
const int step[2] = { deviceProp.warpSize, 1 };
bool advance[2] = { false, false };
// first try to advance block.x
param.block.x += step[0];
if (param.block.x > deviceProp.maxThreadsDim[0] ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[0] = false;
param.block.x = step[0]; // reset block.x
} else {
advance[0] = true; // successfully advanced block.x
}
if (!advance[0]) { // if failed to advance block.x, now try block.y
param.block.y += step[1];
if (param.block.y > in->X(4) ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[1] = false;
param.block.y = step[1]; // reset block.x
} else {
advance[1] = true; // successfully advanced block.y
}
}
if (advance[0] || advance[1]) {
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool advance = true;
if (!checkGrid(param)) advance = advanceBlockDim(param);
return advance;
} else {
return false;
}
}
unsigned int sharedBytesPerThread() const { return 0; }
public:
DomainWallDslash4DPCCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double mferm,
const double a, const int dagger, const int DS_type)
: DslashCuda(out, in, x, reconstruct), gauge0(gauge0), gauge1(gauge1), mferm(mferm),
dagger(dagger), a(a), DS_type(DS_type)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~DomainWallDslash4DPCCuda() { unbindSpinorTex<sFloat>(in, out, x); }
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
/** sets default values for when tuning is disabled */
virtual void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch(DS_type){
case 0:
DSLASH(domainWallDslash4, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
case 1:
DSLASH(domainWallDslash5, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
case 2:
DSLASH(domainWallDslash5inv, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
default:
errorQuda("invalid Dslash type");
}
}
long long flops() const { // FIXME for multi-GPU
long long bulk = (dslashConstants.Ls-2)*(dslashConstants.VolumeCB()/dslashConstants.Ls);
long long wall = 2*dslashConstants.VolumeCB()/dslashConstants.Ls;
long long flops_Tmp;
switch(DS_type){
case 0:
flops_Tmp = (x ? 1368ll : 1320ll)*dslashConstants.VolumeCB();
break;
case 1:
flops_Tmp = 96ll*bulk + 120ll*wall;
break;
case 2:
flops_Tmp = 144ll*dslashConstants.VolumeCB()*dslashConstants.Ls
+ 3ll*dslashConstants.Ls*(dslashConstants.Ls-1ll);
break;
default:
errorQuda("invalid Dslash type");
}
return flops_Tmp;
}
};
//Dslash class definition for Mobius Domain Wall Fermion
template <typename sFloat, typename gFloat>
class MDWFDslashPCCuda : public DslashCuda {
private:
const gFloat *gauge0, *gauge1;
const int dagger;
const double mferm, a;
double *b5, *c5;
const int DS_type;
bool checkGrid(TuneParam ¶m) const {
if (param.grid.x > deviceProp.maxGridSize[0] || param.grid.y > deviceProp.maxGridSize[1]) {
warningQuda("Autotuner is skipping blockDim=(%u,%u,%u), gridDim=(%u,%u,%u) because lattice volume is too large",
param.block.x, param.block.y, param.block.z,
param.grid.x, param.grid.y, param.grid.z);
return false;
} else {
return true;
}
}
protected:
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock;
const int step[2] = { deviceProp.warpSize, 1 };
bool advance[2] = { false, false };
// first try to advance block.x
param.block.x += step[0];
if (param.block.x > deviceProp.maxThreadsDim[0] ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[0] = false;
param.block.x = step[0]; // reset block.x
} else {
advance[0] = true; // successfully advanced block.x
}
if (!advance[0]) { // if failed to advance block.x, now try block.y
param.block.y += step[1];
if (param.block.y > in->X(4) ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[1] = false;
param.block.y = step[1]; // reset block.x
} else {
advance[1] = true; // successfully advanced block.y
}
}
if (advance[0] || advance[1]) {
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool advance = true;
if (!checkGrid(param)) advance = advanceBlockDim(param);
return advance;
} else {
return false;
}
}
unsigned int sharedBytesPerThread() const { return 0; }
public:
MDWFDslashPCCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double mferm,
const double a, const int dagger, const int DS_type)
: DslashCuda(out, in, x, reconstruct), gauge0(gauge0), gauge1(gauge1), mferm(mferm),
dagger(dagger), a(a), DS_type(DS_type)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~MDWFDslashPCCuda() { unbindSpinorTex<sFloat>(in, out, x); }
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
/** sets default values for when tuning is disabled */
virtual void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch(DS_type){
case 0:
DSLASH(MDWFDslash4, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
case 1:
DSLASH(MDWFDslash4pre, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
case 2:
DSLASH(MDWFDslash5, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
case 3:
DSLASH(MDWFDslash5inv, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
default:
errorQuda("invalid Dslash type");
}
}
long long flops() const { // FIXME for multi-GPU
long long bulk = (dslashConstants.Ls-2)*(dslashConstants.VolumeCB()/dslashConstants.Ls);
long long wall = 2*dslashConstants.VolumeCB()/dslashConstants.Ls;
long long flops_Tmp;
switch(DS_type){
case 0:
flops_Tmp = (x ? 1368ll : 1320ll)*dslashConstants.VolumeCB();
break;
case 1:
flops_Tmp = 168ll*bulk + 72ll*wall;
break;
case 2:
flops_Tmp = 144ll*bulk + 72ll*wall;
break;
case 3:
flops_Tmp = 144ll*dslashConstants.VolumeCB()*dslashConstants.Ls
+ 3ll*dslashConstants.Ls*(dslashConstants.Ls-1ll);
break;
default:
errorQuda("invalid Dslash type");
}
return flops_Tmp;
}
};
template<typename T> struct RealType {};
template<> struct RealType<double2> { typedef double type; };
template<> struct RealType<float2> { typedef float type; };
template<> struct RealType<float4> { typedef float type; };
template<> struct RealType<short2> { typedef short type; };
template<> struct RealType<short4> { typedef short type; };
template <typename sFloat, typename fatGFloat, typename longGFloat, typename phaseFloat>
class StaggeredDslashCuda : public DslashCuda {
private:
const fatGFloat *fat0, *fat1;
const longGFloat *long0, *long1;
const phaseFloat *phase0, *phase1;
const int dagger;
const double a;
QudaDslashType type;
protected:
unsigned int sharedBytesPerThread() const
{
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return 6 * reg_size;
}
public:
StaggeredDslashCuda(cudaColorSpinorField *out, const fatGFloat *fat0, const fatGFloat *fat1,
const longGFloat *long0, const longGFloat *long1,
const phaseFloat *phase0, const phaseFloat *phase1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: DslashCuda(out, in, x, reconstruct), fat0(fat0), fat1(fat1), long0(long0), long1(long1), phase0(phase0), phase1(phase1),
dagger(dagger), a(a), type(long0 ? QUDA_ASQTAD_DSLASH : QUDA_STAGGERED_DSLASH)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~StaggeredDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dim3 gridDim( (dslashParam.threads+tp.block.x-1) / tp.block.x, 1, 1);
if (type == QUDA_STAGGERED_DSLASH) {
STAGGERED_DSLASH(gridDim, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), fat0, fat1,
(sFloat*)in->V(), (float*)in->Norm(),
(sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
} else {
IMPROVED_STAGGERED_DSLASH(gridDim, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(),
fat0, fat1, long0, long1, phase0, phase1,
(sFloat*)in->V(), (float*)in->Norm(),
(sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
}
int Nface() { return type == QUDA_STAGGERED_DSLASH ? 2 : 6; }
long long flops() const {
long long flops;
if (type == QUDA_STAGGERED_DSLASH)
flops = (x ? 666ll : 654ll) * dslashConstants.VolumeCB();
else
flops = (x ? 1158ll : 1146ll) * dslashConstants.VolumeCB();
return flops;
}
};
int gatherCompleted[Nstream];
int previousDir[Nstream];
int commsCompleted[Nstream];
int dslashCompleted[Nstream];
int commDimTotal;
/**
* Initialize the arrays used for the dynamic scheduling.
*/
void inline initDslashCommsPattern() {
for (int i=0; i<Nstream-1; i++) {
#ifndef GPU_COMMS
gatherCompleted[i] = 0;
#else
gatherCompleted[i] = 1;
#endif
commsCompleted[i] = 0;
dslashCompleted[i] = 0;
}
gatherCompleted[Nstream-1] = 1;
commsCompleted[Nstream-1] = 1;
// We need to know which was the previous direction in which
// communication was issued, since we only query a given event /
// comms call after the previous the one has successfully
// completed.
for (int i=3; i>=0; i--) {
if (dslashParam.commDim[i]) {
int prev = Nstream-1;
for (int j=3; j>i; j--) if (dslashParam.commDim[j]) prev = 2*j;
previousDir[2*i + 1] = prev;
previousDir[2*i + 0] = 2*i + 1; // always valid
}
}
// this tells us how many events / comms occurances there are in
// total. Used for exiting the while loop
commDimTotal = 0;
for (int i=3; i>=0; i--) commDimTotal += dslashParam.commDim[i];
#ifndef GPU_COMMS
commDimTotal *= 4; // 2 from pipe length, 2 from direction
#else
commDimTotal *= 2; // 2 from pipe length, 2 from direction
#endif
}
#define PROFILE(f, profile, idx) \
profile.Start(idx); \
f; \
profile.Stop(idx);
void dslashCuda(DslashCuda &dslash, const size_t regSize, const int parity, const int dagger,
const int volume, const int *faceVolumeCB, TimeProfile &profile) {
profile.Start(QUDA_PROFILE_TOTAL);
dslashParam.parity = parity;
dslashParam.kernel_type = INTERIOR_KERNEL;
dslashParam.threads = volume;
#ifdef MULTI_GPU
initDslashCommsPattern();
// Record the start of the dslash
PROFILE(cudaEventRecord(dslashStart, streams[Nstream-1]),
profile, QUDA_PROFILE_EVENT_RECORD);
for(int i=3; i>=0; i--){
if(!dslashParam.commDim[i]) continue;
for(int dir=1; dir>=0; dir--){
PROFILE(face[it]->recvStart(2*i+dir), profile, QUDA_PROFILE_COMMS_START);
}
}
bool pack = false;
for (int i=3; i>=0; i--)
if (dslashParam.commDim[i] && (i!=3 || getKernelPackT() || getTwistPack()))
{ pack = true; break; }
// Initialize pack from source spinor
if (inCloverInv == NULL) {
PROFILE(face[it]->pack(*inSpinor, 1-parity, dagger, streams, twist_a, twist_b),
profile, QUDA_PROFILE_PACK_KERNEL);
} else {
PROFILE(face[it]->pack(*inSpinor, *inClover, *inCloverInv, 1-parity, dagger,
streams, twist_a, twist_b), profile, QUDA_PROFILE_PACK_KERNEL);
}
if (pack) {
// Record the end of the packing
PROFILE(cudaEventRecord(packEnd[0], streams[Nstream-1]),
profile, QUDA_PROFILE_EVENT_RECORD);
}
for(int i = 3; i >=0; i--){
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
cudaEvent_t &event = (i!=3 || getKernelPackT() || getTwistPack()) ? packEnd[0] : dslashStart;
PROFILE(cudaStreamWaitEvent(streams[2*i+dir], event, 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// Initialize host transfer from source spinor
PROFILE(face[it]->gather(*inSpinor, dagger, 2*i+dir), profile, QUDA_PROFILE_GATHER);
// Record the end of the gathering
PROFILE(cudaEventRecord(gatherEnd[2*i+dir], streams[2*i+dir]),
profile, QUDA_PROFILE_EVENT_RECORD);
}
}
#endif
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
#ifdef MULTI_GPU
int completeSum = 0;
while (completeSum < commDimTotal) {
for (int i=3; i>=0; i--) {
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
// Query if gather has completed
if (!gatherCompleted[2*i+dir] && gatherCompleted[previousDir[2*i+dir]]) {
//CUresult event_test;
//event_test = cuEventQuery(gatherEnd[2*i+dir]);
PROFILE(cudaError_t event_test = cudaEventQuery(gatherEnd[2*i+dir]),
profile, QUDA_PROFILE_EVENT_QUERY);
if (cudaSuccess == event_test) {
gatherCompleted[2*i+dir] = 1;
completeSum++;
PROFILE(face[it]->sendStart(2*i+dir), profile, QUDA_PROFILE_COMMS_START);
}
}
// Query if comms has finished
if (!commsCompleted[2*i+dir] && commsCompleted[previousDir[2*i+dir]] &&
gatherCompleted[2*i+dir]) {
PROFILE(int comms_test = face[it]->commsQuery(2*i+dir),
profile, QUDA_PROFILE_COMMS_QUERY);
if (comms_test) {
commsCompleted[2*i+dir] = 1;
completeSum++;
// Scatter into the end zone
// Both directions use the same stream
PROFILE(face[it]->scatter(*inSpinor, dagger, 2*i+dir),
profile, QUDA_PROFILE_SCATTER);
}
}
}
// enqueue the boundary dslash kernel as soon as the scatters have been enqueued
if (!dslashCompleted[2*i] && commsCompleted[2*i] && commsCompleted[2*i+1] ) {
// Record the end of the scattering
PROFILE(cudaEventRecord(scatterEnd[2*i], streams[2*i]),
profile, QUDA_PROFILE_EVENT_RECORD);
dslashParam.kernel_type = static_cast<KernelType>(i);
dslashParam.threads = dslash.Nface()*faceVolumeCB[i]; // updating 2 or 6 faces
// wait for scattering to finish and then launch dslash
PROFILE(cudaStreamWaitEvent(streams[Nstream-1], scatterEnd[2*i], 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// all faces use this stream
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
dslashCompleted[2*i] = 1;
}
}
}
it = (it^1);
#endif // MULTI_GPU
profile.Stop(QUDA_PROFILE_TOTAL);
}
#ifdef PTHREADS
#include <pthread.h>
struct ReceiveParam
{
TimeProfile* profile;
int nFace;
int dagger;
};
void *issueMPIReceive(void* receiveParam)
{
ReceiveParam* param = static_cast<ReceiveParam*>(receiveParam);
for(int i=3; i>=0; i--){
if(!dslashParam.commDim[i]) continue;
for(int dir=1; dir>=0; dir--){
PROFILE(inSpinor->recvStart(param->nFace, 2*i+dir, param->dagger), (*(param->profile)), QUDA_PROFILE_COMMS_START);
}
}
return NULL;
}
struct InteriorParam
{
TimeProfile* profile;
DslashCuda* dslash;
int current_device;
};
void* launchInteriorKernel(void* interiorParam)
{
InteriorParam* param = static_cast<InteriorParam*>(interiorParam);
cudaSetDevice(param->current_device); // set device in the new thread
PROFILE(param->dslash->apply(streams[Nstream-1]), (*(param->profile)), QUDA_PROFILE_DSLASH_KERNEL);
return NULL;
}
#endif
void dslashCuda2(DslashCuda &dslash, const size_t regSize, const int parity, const int dagger,
const int volume, const int *faceVolumeCB, TimeProfile &profile) {
profile.Start(QUDA_PROFILE_TOTAL);
dslashParam.parity = parity;
dslashParam.kernel_type = INTERIOR_KERNEL;
dslashParam.threads = volume;
#ifdef MULTI_GPU
// Record the start of the dslash if doing communication in T and not kernel packing
#ifndef PTHREADS
if (dslashParam.commDim[3] && !(getKernelPackT() || getTwistPack()))
#endif
{
PROFILE(cudaEventRecord(dslashStart, streams[Nstream-1]),
profile, QUDA_PROFILE_EVENT_RECORD);
}
inSpinor->allocateGhostBuffer(dslash.Nface()/2);
inSpinor->createComms(dslash.Nface()/2);
initDslashCommsPattern();
inSpinor->streamInit(streams);
#ifdef PTHREADS // create two new threads to issue MPI receives
// and launch the interior dslash kernel
const int packIndex = Nstream-2;
//const int packIndex = Nstream-1;
pthread_t receiveThread, interiorThread;
ReceiveParam receiveParam;
receiveParam.profile = &profile;
receiveParam.nFace = (dslash.Nface() >> 1);
receiveParam.dagger = dagger;
if(pthread_create(&receiveThread, NULL, issueMPIReceive, &receiveParam)){
errorQuda("pthread_create failed");
}
InteriorParam interiorParam;
interiorParam.dslash = &dslash;
interiorParam.profile = &profile;
cudaGetDevice(&(interiorParam.current_device)); // get the current device number
// PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
if(pthread_create(&interiorThread, NULL, launchInteriorKernel, &interiorParam)){
errorQuda("pthread_create failed");
}
#else // single CPU thread per MPI process
const int packIndex = Nstream-1;
for(int i=3; i>=0; i--){
if(!dslashParam.commDim[i]) continue;
for(int dir=1; dir>=0; dir--){
PROFILE(inSpinor->recvStart(dslash.Nface()/2, 2*i+dir, dagger), profile, QUDA_PROFILE_COMMS_START);
}
}
#endif
bool pack = false;
for (int i=3; i>=0; i--)
if (dslashParam.commDim[i] && (i!=3 || getKernelPackT() || getTwistPack()))
{ pack = true; break; }
// if(pthread_join(interiorThread, NULL)) errorQuda("pthread_join failed");
#ifdef PTHREADS
if (pack){
PROFILE(cudaStreamWaitEvent(streams[packIndex], dslashStart, 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
}
#endif
// Initialize pack from source spinor
if (inCloverInv == NULL) {
PROFILE(inSpinor->pack(dslash.Nface()/2, 1-parity, dagger, packIndex, twist_a, twist_b),
profile, QUDA_PROFILE_PACK_KERNEL);
} else {
PROFILE(inSpinor->pack(*inClover, *inCloverInv, dslash.Nface()/2, 1-parity, dagger, packIndex, twist_a),
profile, QUDA_PROFILE_PACK_KERNEL);
}
if (pack) {
// Record the end of the packing
PROFILE(cudaEventRecord(packEnd[0], streams[packIndex]),
profile, QUDA_PROFILE_EVENT_RECORD);
}
#ifndef GPU_COMMS
for(int i = 3; i >=0; i--){
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
cudaEvent_t &event = (i!=3 || getKernelPackT() || getTwistPack()) ? packEnd[0] : dslashStart;
PROFILE(cudaStreamWaitEvent(streams[2*i+dir], event, 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// Initialize host transfer from source spinor
PROFILE(inSpinor->gather(dslash.Nface()/2, dagger, 2*i+dir), profile, QUDA_PROFILE_GATHER);
// Record the end of the gathering
PROFILE(cudaEventRecord(gatherEnd[2*i+dir], streams[2*i+dir]),
profile, QUDA_PROFILE_EVENT_RECORD);
}
}
#endif // GPU_COMMS
#endif // MULTI_GPU
#if (!defined MULTI_GPU) || (!defined PTHREADS)
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
#endif
#ifdef MULTI_GPU
#ifdef PTHREADS
if(pthread_join(receiveThread, NULL)) errorQuda("pthread_join failed");
// if(pthread_join(interiorThread, NULL)) errorQuda("pthread_join failed");
#endif
#ifdef GPU_COMMS
bool pack_event = false;
for (int i=3; i>=0; i--) {
if (!dslashParam.commDim[i]) continue;
if ((i!=3 || getKernelPackT() || getTwistPack()) && !pack_event) {
cudaEventSynchronize(packEnd[0]);
pack_event = true;
} else {
cudaEventSynchronize(dslashStart);
}
for (int dir=1; dir>=0; dir--) {
PROFILE(inSpinor->sendStart(dslash.Nface()/2, 2*i+dir, dagger), profile, QUDA_PROFILE_COMMS_START);
inSpinor->commsQuery(dslash.Nface()/2, 2*i+dir, dagger); // do a comms query to ensure MPI has begun
}
}
#endif
bool interiorLaunched = false;
int completeSum = 0;
while (completeSum < commDimTotal) {
for (int i=3; i>=0; i--) {
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
#ifndef GPU_COMMS
// Query if gather has completed
if (!gatherCompleted[2*i+dir] && gatherCompleted[previousDir[2*i+dir]]) {
PROFILE(cudaError_t event_test = cudaEventQuery(gatherEnd[2*i+dir]),
profile, QUDA_PROFILE_EVENT_QUERY);
if (cudaSuccess == event_test) {
gatherCompleted[2*i+dir] = 1;
completeSum++;
PROFILE(inSpinor->sendStart(dslash.Nface()/2, 2*i+dir, dagger), profile, QUDA_PROFILE_COMMS_START);
}
}
#endif
// Query if comms has finished
if (!commsCompleted[2*i+dir] && commsCompleted[previousDir[2*i+dir]] &&
gatherCompleted[2*i+dir]) {
PROFILE(int comms_test = inSpinor->commsQuery(dslash.Nface()/2, 2*i+dir, dagger),
profile, QUDA_PROFILE_COMMS_QUERY);
if (comms_test) {
commsCompleted[2*i+dir] = 1;
completeSum++;
// Scatter into the end zone
// Both directions use the same stream
#ifndef GPU_COMMS
PROFILE(inSpinor->scatter(dslash.Nface()/2, dagger, 2*i+dir),
profile, QUDA_PROFILE_SCATTER);
#endif
}
}
} // dir=0,1
// enqueue the boundary dslash kernel as soon as the scatters have been enqueued
if (!dslashCompleted[2*i] && commsCompleted[2*i] && commsCompleted[2*i+1] ) {
// Record the end of the scattering
#ifndef GPU_COMMS
PROFILE(cudaEventRecord(scatterEnd[2*i], streams[2*i]),
profile, QUDA_PROFILE_EVENT_RECORD);
#ifdef PTHREADS
if(!interiorLaunched){
if(pthread_join(interiorThread, NULL)) errorQuda("pthread_join failed");
interiorLaunched = true;
}
#endif
// wait for scattering to finish and then launch dslash
PROFILE(cudaStreamWaitEvent(streams[Nstream-1], scatterEnd[2*i], 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
#endif
dslashParam.kernel_type = static_cast<KernelType>(i);
dslashParam.threads = dslash.Nface()*faceVolumeCB[i]; // updating 2 or 6 faces
// all faces use this stream
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
dslashCompleted[2*i] = 1;
}
}
}
it = (it^1);
#endif // MULTI_GPU
profile.Stop(QUDA_PROFILE_TOTAL);
}
/**
Variation of multi-gpu dslash where the packing kernel writes
buffers directly to host memory
*/
void dslashZeroCopyCuda(DslashCuda &dslash, const size_t regSize, const int parity, const int dagger,
const int volume, const int *faceVolumeCB, TimeProfile &profile) {
profile.Start(QUDA_PROFILE_TOTAL);
dslashParam.parity = parity;
dslashParam.kernel_type = INTERIOR_KERNEL;
dslashParam.threads = volume;
#ifdef MULTI_GPU
initDslashCommsPattern();
for(int i=3; i>=0; i--){
if(!dslashParam.commDim[i]) continue;
for(int dir=1; dir>=0; dir--){
PROFILE(face[it]->recvStart(2*i+dir), profile, QUDA_PROFILE_COMMS_START);
}
}
setKernelPackT(true);
// Record the end of the packing
PROFILE(cudaEventRecord(dslashStart, streams[Nstream-1]),
profile, QUDA_PROFILE_EVENT_RECORD);
PROFILE(cudaStreamWaitEvent(streams[0], dslashStart, 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// Initialize pack from source spinor
PROFILE(face[it]->pack(*inSpinor, 1-parity, dagger, streams, true, twist_a, twist_b),
profile, QUDA_PROFILE_PACK_KERNEL);
// Record the end of the packing
PROFILE(cudaEventRecord(packEnd[0], streams[0]),
profile, QUDA_PROFILE_EVENT_RECORD);
#endif
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
#ifdef MULTI_GPU
int doda=0;
while (doda++>=0) {
PROFILE(cudaError_t event_test = cudaEventQuery(packEnd[0]),
profile, QUDA_PROFILE_EVENT_QUERY);
if (event_test == cudaSuccess) doda=-1;
}
for (int i=3; i>=0; i--) {
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
PROFILE(face[it]->sendStart(2*i+dir), profile, QUDA_PROFILE_COMMS_START);
}
}
int completeSum = 0;
commDimTotal /= 2; // pipe is shorter for zero-variant
while (completeSum < commDimTotal) {
for (int i=3; i>=0; i--) {
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
// Query if comms have finished
if (!commsCompleted[2*i+dir] && commsCompleted[previousDir[2*i+dir]]) {
PROFILE(int comms_test = face[it]->commsQuery(2*i+dir),
profile, QUDA_PROFILE_COMMS_QUERY);
if (comms_test) {
commsCompleted[2*i+dir] = 1;
completeSum++;
// Scatter into the end zone
// Both directions use the same stream
PROFILE(face[it]->scatter(*inSpinor, dagger, 2*i+dir),
profile, QUDA_PROFILE_SCATTER);
}
}
}
// enqueue the boundary dslash kernel as soon as the scatters have been enqueued
if (!dslashCompleted[2*i] && commsCompleted[2*i] && commsCompleted[2*i+1] ) {
// Record the end of the scattering
PROFILE(cudaEventRecord(scatterEnd[2*i], streams[2*i]),
profile, QUDA_PROFILE_EVENT_RECORD);
dslashParam.kernel_type = static_cast<KernelType>(i);
dslashParam.threads = dslash.Nface()*faceVolumeCB[i]; // updating 2 or 6 faces
// wait for scattering to finish and then launch dslash
PROFILE(cudaStreamWaitEvent(streams[Nstream-1], scatterEnd[2*i], 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// all faces use this stream
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
dslashCompleted[2*i] = 1;
}
}
}
it = (it^1);
#endif // MULTI_GPU
profile.Stop(QUDA_PROFILE_TOTAL);
}
void dslashCudaNC(DslashCuda &dslash, const size_t regSize, const int parity, const int dagger,
const int volume, const int *faceVolumeCB, TimeProfile &profile) {
profile.Start(QUDA_PROFILE_TOTAL);
dslashParam.parity = parity;
dslashParam.kernel_type = INTERIOR_KERNEL;
dslashParam.threads = volume;
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
profile.Stop(QUDA_PROFILE_TOTAL);
}
// Wilson wrappers
void wilsonDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const cudaColorSpinorField *in,
const int parity, const int dagger, const cudaColorSpinorField *x, const double &k,
const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_WILSON_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge %d and spinor %d precision not supported",
gauge.Precision(), in->Precision());
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new WilsonDslashCuda<double2, double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new WilsonDslashCuda<float4, float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new WilsonDslashCuda<short4, short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
}
dslashCuda2(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Wilson dslash has not been built");
#endif // GPU_WILSON_DIRAC
}
void cloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover cloverInv,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &a, const int *commOverride,
TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_CLOVER_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *cloverP, *cloverNormP;
QudaPrecision clover_prec = bindCloverTex(cloverInv, parity, &cloverP, &cloverNormP);
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new CloverDslashCuda<double2, double2, double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), (double2*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new CloverDslashCuda<float4, float4, float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), (float4*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new CloverDslashCuda<short4, short4, short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), (short4*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
}
dslashCuda2(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindGaugeTex(gauge);
unbindCloverTex(cloverInv);
checkCudaError();
#else
errorQuda("Clover dslash has not been built");
#endif
}
void asymCloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover cloverInv,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &a, const int *commOverride,
TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_CLOVER_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *cloverP, *cloverNormP;
QudaPrecision clover_prec = bindCloverTex(cloverInv, parity, &cloverP, &cloverNormP);
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new AsymCloverDslashCuda<double2, double2, double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), (double2*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new AsymCloverDslashCuda<float4, float4, float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), (float4*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new AsymCloverDslashCuda<short4, short4, short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), (short4*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
}
dslashCuda2(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindGaugeTex(gauge);
unbindCloverTex(cloverInv);
checkCudaError();
#else
errorQuda("Clover dslash has not been built");
#endif
}
void twistedMassDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const QudaTwistDslashType type, const double &kappa, const double &mu,
const double &epsilon, const double &k, const int *commOverride,
TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_TWISTED_MASS_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
int ghost_threads[4] = {0};
int bulk_threads = ((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) ? in->Volume() : in->Volume() / 2;
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
ghost_threads[i] = ((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) ? in->GhostFace()[i] : in->GhostFace()[i] / 2;
}
#ifdef MULTI_GPU
if(type == QUDA_DEG_TWIST_INV_DSLASH){
setTwistPack(true);
twist_a = kappa;
twist_b = mu;
}
#endif
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new TwistedDslashCuda<double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new TwistedDslashCuda<float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new TwistedDslashCuda<short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
}
dslashCuda(*dslash, regSize, parity, dagger, bulk_threads, ghost_threads, profile);
delete dslash;
#ifdef MULTI_GPU
if(type == QUDA_DEG_TWIST_INV_DSLASH){
setTwistPack(false);
twist_a = 0.0;
twist_b = 0.0;
}
#endif
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Twisted mass dslash has not been built");
#endif
}
void twistedCloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover *clover, const FullClover *cloverInv,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const QudaTwistCloverDslashType type, const double &kappa, const double &mu,
const double &epsilon, const double &k, const int *commOverride,
TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
inClover = (FullClover*) clover;
inCloverInv = (FullClover*) cloverInv;
#ifdef GPU_TWISTED_CLOVER_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
int ghost_threads[4] = {0};
int bulk_threads = ((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) ? in->Volume() : in->Volume() / 2;
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
ghost_threads[i] = ((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) ? in->GhostFace()[i] : in->GhostFace()[i] / 2;
}
#ifdef MULTI_GPU
twist_a = 2.*mu*kappa;
#endif
/*
#ifdef MULTI_GPU
if(type == QUDA_DEG_CLOVER_TWIST_INV_DSLASH){
setTwistPack(true);
twist_a = kappa;
twist_b = mu;
}
#endif
*/
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
void *cloverP, *cloverNormP, *cloverInvP, *cloverInvNormP;
QudaPrecision clover_prec = bindTwistedCloverTex(*clover, *cloverInv, parity, &cloverP, &cloverNormP, &cloverInvP, &cloverInvNormP);
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new TwistedCloverDslashCuda<double2,double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), (double2*)cloverP, (float*)cloverNormP,
(double2*)cloverInvP, (float*)cloverInvNormP, in, x, type, kappa, mu, epsilon, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new TwistedCloverDslashCuda<float4,float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), (float4*)cloverP, (float*)cloverNormP,
(float4*)cloverInvP, (float*)cloverInvNormP, in, x, type, kappa, mu, epsilon, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new TwistedCloverDslashCuda<short4,short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), (short4*)cloverP, (float*)cloverNormP,
(short4*)cloverInvP, (float*)cloverInvNormP, in, x, type, kappa, mu, epsilon, k, dagger);
}
// dslashCuda(*dslash, regSize, parity, dagger, bulk_threads, ghost_threads, profile);
dslashCuda2(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
/*
#ifdef MULTI_GPU
if(type == QUDA_DEG_CLOVER_TWIST_INV_DSLASH){
setTwistPack(false);
twist_a = 0.0;
twist_b = 0.0;
}
#endif
*/
unbindGaugeTex(gauge);
unbindTwistedCloverTex(*clover);
checkCudaError();
#else
errorQuda("Twisted clover dslash has not been built");
#endif
}
void domainWallDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &m_f, const double &k2,
const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
dslashParam.parity = parity;
#ifdef GPU_DOMAIN_WALL_DIRAC
//currently splitting in space-time is impelemented:
int dirs = 4;
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i = 0;i < dirs; i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new DomainWallDslashCuda<double2,double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new DomainWallDslashCuda<float4,float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new DomainWallDslashCuda<short4,short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger);
}
// the parameters passed to dslashCuda must be 4-d volume and 3-d
// faces because Ls is added as the y-dimension in thread space
int ghostFace[QUDA_MAX_DIM];
for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4);
dslashCuda(*dslash, regSize, parity, dagger, in->Volume() / in->X(4), ghostFace, profile);
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Domain wall dslash has not been built");
#endif
}
//-----------------------------------------------------
// Modification for 4D preconditioned DWF operator
// Additional Arg. is added to give a function name.
//
// pre-defined DS_type list
// 0 = dslash4
// 1 = dslash5
// 2 = dslash5inv
//-----------------------------------------------------
void domainWallDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &m_f, const double &k2,
const int *commOverride, const int DS_type, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
dslashParam.parity = parity;
#ifdef GPU_DOMAIN_WALL_DIRAC
//currently splitting in space-time is impelemented:
int dirs = 4;
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i = 0;i < dirs; i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new DomainWallDslash4DPCCuda<double2,double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new DomainWallDslash4DPCCuda<float4,float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new DomainWallDslash4DPCCuda<short4,short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
}
// the parameters passed to dslashCuda must be 4-d volume and 3-d
// faces because Ls is added as the y-dimension in thread space
int ghostFace[QUDA_MAX_DIM];
for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4);
if(DS_type != 0)
dslashCudaNC(*dslash, regSize, parity, dagger, in->Volume() / in->X(4), ghostFace, profile);
else
dslashCuda(*dslash, regSize, parity, dagger, in->Volume() / in->X(4), ghostFace, profile);
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("4D preconditioned Domain wall dslash has not been built");
#endif
}
//-----------------------------------------------------
// Modification for 4D preconditioned Mobius DWF operator
// Additional Arg. is added to give a function name.
//
// pre-defined DS_type list
// 0 = MDWF dslash4
// 1 = MDWF dslash4pre
// 2 = MDWF dslash5
// 3 = MDWF dslash5inv
//-----------------------------------------------------
void MDWFDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &m_f, const double &k2,
const int *commOverride, const int DS_type, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
dslashParam.parity = parity;
#ifdef GPU_DOMAIN_WALL_DIRAC
//currently splitting in space-time is impelemented:
int dirs = 4;
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i = 0;i < dirs; i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new MDWFDslashPCCuda<double2,double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new MDWFDslashPCCuda<float4,float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new MDWFDslashPCCuda<short4,short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
}
// the parameters passed to dslashCuda must be 4-d volume and 3-d
// faces because Ls is added as the y-dimension in thread space
int ghostFace[QUDA_MAX_DIM];
for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4);
if(DS_type !=0)
dslashCudaNC(*dslash, regSize, parity, dagger, in->Volume() / in->X(4), ghostFace, profile);
else
dslashCuda(*dslash, regSize, parity, dagger, in->Volume() / in->X(4), ghostFace, profile);
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Domain wall dslash has not been built");
#endif
}
void staggeredDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity,
const int dagger, const cudaColorSpinorField *x,
const double &k, const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_STAGGERED_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
dslashParam.parity = parity;
dslashParam.sp_stride = in->Stride();
dslashParam.gauge_stride = gauge.Stride();
dslashParam.fat_link_max = gauge.LinkMax(); // May need to use this in the preconditioning step
// in the solver for the improved staggered action
for(int i=0;i<4;i++){
dslashParam.X[i] = in->X()[i];
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
dslashParam.X[0] *= 2; // because color spinor fields are defined on a half lattice
void *gauge0, *gauge1;
bindFatGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision()) {
errorQuda("Mixing precisions gauge=%d and spinor=%d not supported",
gauge.Precision(), in->Precision());
}
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new StaggeredDslashCuda<double2, double2, double2, double>
(out, (double2*)gauge0, (double2*)gauge1, 0, 0, 0, 0, gauge.Reconstruct(), in, x, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new StaggeredDslashCuda<float2, float2, float4, float>
(out, (float2*)gauge0, (float2*)gauge1, 0, 0, 0, 0, gauge.Reconstruct(), in, x, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new StaggeredDslashCuda<short2, short2, short4, short>
(out, (short2*)gauge0, (short2*)gauge1, 0, 0, 0, 0, gauge.Reconstruct(), in, x, k, dagger);
}
dslashCuda2(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindFatGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Staggered dslash has not been built");
#endif // GPU_STAGGERED_DIRAC
}
void
improvedStaggeredDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &fatGauge,
const cudaGaugeField &longGauge, const cudaColorSpinorField *in,
const int parity, const int dagger, const cudaColorSpinorField *x,
const double &k, const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_STAGGERED_DIRAC
#ifdef MULTI_GPU
for(int i=0;i < 4; i++){
if(commDimPartitioned(i) && (fatGauge.X()[i] < 6)){
errorQuda("ERROR: partitioned dimension with local size less than 6 is not supported in staggered dslash\n");
}
}
#endif
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
dslashParam.sp_stride = in->Stride();
dslashParam.parity = parity;
dslashParam.gauge_stride = fatGauge.Stride();
dslashParam.long_gauge_stride = longGauge.Stride();
dslashParam.fat_link_max = fatGauge.LinkMax();
for(int i=0;i<4;i++){
dslashParam.X[i] = in->X()[i];
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
dslashParam.X[0] *= 2;
void *fatGauge0, *fatGauge1;
void* longGauge0, *longGauge1;
bindFatGaugeTex(fatGauge, parity, &fatGauge0, &fatGauge1);
bindLongGaugeTex(longGauge, parity, &longGauge0, &longGauge1);
void *longPhase0 = (char*)longGauge0 + longGauge.PhaseOffset();
void *longPhase1 = (char*)longGauge1 + longGauge.PhaseOffset();
if (in->Precision() != fatGauge.Precision() || in->Precision() != longGauge.Precision()){
errorQuda("Mixing gauge and spinor precision not supported"
"(precision=%d, fatlinkGauge.precision=%d, longGauge.precision=%d",
in->Precision(), fatGauge.Precision(), longGauge.Precision());
}
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new StaggeredDslashCuda<double2, double2, double2, double>
(out, (double2*)fatGauge0, (double2*)fatGauge1,
(double2*)longGauge0, (double2*)longGauge1,
(double*)longPhase0, (double*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new StaggeredDslashCuda<float2, float2, float4, float>
(out, (float2*)fatGauge0, (float2*)fatGauge1,
(float4*)longGauge0, (float4*)longGauge1,
(float*)longPhase0, (float*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new StaggeredDslashCuda<short2, short2, short4, short>
(out, (short2*)fatGauge0, (short2*)fatGauge1,
(short4*)longGauge0, (short4*)longGauge1,
(short*)longPhase0, (short*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
}
dslashCuda2(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindFatGaugeTex(fatGauge);
unbindLongGaugeTex(longGauge);
checkCudaError();
#else
errorQuda("Staggered dslash has not been built");
#endif // GPU_STAGGERED_DIRAC
}
template <typename sFloat, typename cFloat>
class CloverCuda : public Tunable {
private:
cudaColorSpinorField *out;
float *outNorm;
char *saveOut, *saveOutNorm;
const cFloat *clover;
const float *cloverNorm;
const cudaColorSpinorField *in;
protected:
unsigned int sharedBytesPerThread() const
{
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return CLOVER_SHARED_FLOATS_PER_THREAD * reg_size;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return dslashConstants.VolumeCB(); }
public:
CloverCuda(cudaColorSpinorField *out, const cFloat *clover, const float *cloverNorm,
const cudaColorSpinorField *in)
: out(out), clover(clover), cloverNorm(cloverNorm), in(in)
{
bindSpinorTex<sFloat>(in);
}
virtual ~CloverCuda() { unbindSpinorTex<sFloat>(in); }
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dim3 gridDim( (dslashParam.threads+tp.block.x-1) / tp.block.x, 1, 1);
cloverKernel<<<gridDim, tp.block, tp.shared_bytes, stream>>>
((sFloat*)out->V(), (float*)out->Norm(), clover, cloverNorm,
(sFloat*)in->V(), (float*)in->Norm(), dslashParam);
}
virtual TuneKey tuneKey() const { return TuneKey(in->VolString(), typeid(*this).name()); }
// Need to save the out field if it aliases the in field
void preTune() {
if (in == out) {
saveOut = new char[out->Bytes()];
cudaMemcpy(saveOut, out->V(), out->Bytes(), cudaMemcpyDeviceToHost);
if (typeid(sFloat) == typeid(short4)) {
saveOutNorm = new char[out->NormBytes()];
cudaMemcpy(saveOutNorm, out->Norm(), out->NormBytes(), cudaMemcpyDeviceToHost);
}
}
}
// Restore if the in and out fields alias
void postTune() {
if (in == out) {
cudaMemcpy(out->V(), saveOut, out->Bytes(), cudaMemcpyHostToDevice);
delete[] saveOut;
if (typeid(sFloat) == typeid(short4)) {
cudaMemcpy(out->Norm(), saveOutNorm, out->NormBytes(), cudaMemcpyHostToDevice);
delete[] saveOutNorm;
}
}
}
std::string paramString(const TuneParam ¶m) const // Don't bother printing the grid dim.
{
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 504ll * dslashConstants.VolumeCB(); }
};
void cloverCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover clover,
const cudaColorSpinorField *in, const int parity) {
dslashParam.parity = parity;
dslashParam.threads = in->Volume();
#ifdef GPU_CLOVER_DIRAC
Tunable *clov = 0;
void *cloverP, *cloverNormP;
QudaPrecision clover_prec = bindCloverTex(clover, parity, &cloverP, &cloverNormP);
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
clov = new CloverCuda<double2, double2>(out, (double2*)cloverP, (float*)cloverNormP, in);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
clov = new CloverCuda<float4, float4>(out, (float4*)cloverP, (float*)cloverNormP, in);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
clov = new CloverCuda<short4, short4>(out, (short4*)cloverP, (float*)cloverNormP, in);
}
clov->apply(0);
unbindCloverTex(clover);
checkCudaError();
delete clov;
#else
errorQuda("Clover dslash has not been built");
#endif
}
template <typename sFloat>
class TwistGamma5Cuda : public Tunable {
private:
cudaColorSpinorField *out;
const cudaColorSpinorField *in;
double a;
double b;
double c;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return dslashConstants.VolumeCB(); }
char *saveOut, *saveOutNorm;
public:
TwistGamma5Cuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
double kappa, double mu, double epsilon, const int dagger, QudaTwistGamma5Type twist) :
out(out), in(in)
{
bindSpinorTex<sFloat>(in);
if((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS))
setTwistParam(a, b, kappa, mu, dagger, twist);
else{//twist doublet
a = kappa, b = mu, c = epsilon;
}
}
virtual ~TwistGamma5Cuda() {
unbindSpinorTex<sFloat>(in);
}
TuneKey tuneKey() const { return TuneKey(in->VolString(), typeid(*this).name(), in->AuxString()); }
void apply(const cudaStream_t &stream)
{
#if (defined GPU_TWISTED_MASS_DIRAC) || (defined GPU_NDEG_TWISTED_MASS_DIRAC)
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dim3 gridDim( (dslashParam.threads+tp.block.x-1) / tp.block.x, 1, 1);
if((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) {
twistGamma5Kernel<<<gridDim, tp.block, tp.shared_bytes, stream>>>
((sFloat*)out->V(), (float*)out->Norm(), a, b,
(sFloat*)in->V(), (float*)in->Norm(), dslashParam);
} else {
twistGamma5Kernel<<<gridDim, tp.block, tp.shared_bytes, stream>>>
((sFloat*)out->V(), (float*)out->Norm(), a, b, c,
(sFloat*)in->V(), (float*)in->Norm(), dslashParam);
}
#endif
}
void preTune() {
saveOut = new char[out->Bytes()];
cudaMemcpy(saveOut, out->V(), out->Bytes(), cudaMemcpyDeviceToHost);
if (typeid(sFloat) == typeid(short4)) {
saveOutNorm = new char[out->NormBytes()];
cudaMemcpy(saveOutNorm, out->Norm(), out->NormBytes(), cudaMemcpyDeviceToHost);
}
}
void postTune() {
cudaMemcpy(out->V(), saveOut, out->Bytes(), cudaMemcpyHostToDevice);
delete[] saveOut;
if (typeid(sFloat) == typeid(short4)) {
cudaMemcpy(out->Norm(), saveOutNorm, out->NormBytes(), cudaMemcpyHostToDevice);
delete[] saveOutNorm;
}
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 24ll * dslashConstants.VolumeCB(); }
long long bytes() const { return in->Bytes() + in->NormBytes() + out->Bytes() + out->NormBytes(); }
};
//!ndeg tm:
void twistGamma5Cuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
const int dagger, const double &kappa, const double &mu, const double &epsilon, const QudaTwistGamma5Type twist)
{
if(in->TwistFlavor() == QUDA_TWIST_PLUS || in->TwistFlavor() == QUDA_TWIST_MINUS)
dslashParam.threads = in->Volume();
else //twist doublet
dslashParam.threads = in->Volume() / 2;
#if (defined GPU_TWISTED_MASS_DIRAC) || (defined GPU_NDEG_TWISTED_MASS_DIRAC)
Tunable *twistGamma5 = 0;
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
twistGamma5 = new TwistGamma5Cuda<double2>(out, in, kappa, mu, epsilon, dagger, twist);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
twistGamma5 = new TwistGamma5Cuda<float4>(out, in, kappa, mu, epsilon, dagger, twist);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
twistGamma5 = new TwistGamma5Cuda<short4>(out, in, kappa, mu, epsilon, dagger, twist);
}
twistGamma5->apply(streams[Nstream-1]);
checkCudaError();
delete twistGamma5;
#else
errorQuda("Twisted mass dslash has not been built");
#endif // GPU_TWISTED_MASS_DIRAC
}
#include "dslash_core/tmc_gamma_core.h"
template <typename cFloat, typename sFloat>
class TwistCloverGamma5Cuda : public Tunable {
private:
const cFloat *clover;
const float *cNorm;
const cFloat *cloverInv;
const float *cNrm2;
QudaTwistGamma5Type twist;
cudaColorSpinorField *out;
const cudaColorSpinorField *in;
double a;
double b;
double c;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return dslashConstants.VolumeCB(); }
char *saveOut, *saveOutNorm;
public:
TwistCloverGamma5Cuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
double kappa, double mu, double epsilon, const int dagger, QudaTwistGamma5Type tw,
cFloat *clov, const float *cN, cFloat *clovInv, const float *cN2) :
out(out), in(in)
{
bindSpinorTex<sFloat>(in);
twist = tw;
clover = clov;
cNorm = cN;
cloverInv = clovInv;
cNrm2 = cN2;
if((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS))
setTwistParam(a, b, kappa, mu, dagger, tw);
// a = 2.*kappa*mu;
else{//twist doublet
errorQuda("ERROR: Non-degenerated twisted-mass not supported in this regularization\n");
}
}
virtual ~TwistCloverGamma5Cuda() {
unbindSpinorTex<sFloat>(in);
}
TuneKey tuneKey() const {
return TuneKey(in->VolString(), typeid(*this).name(), in->AuxString());
}
void apply(const cudaStream_t &stream)
{
//A.S.: should this be GPU_TWISTED_CLOVER_DIRAC instead?
#if (defined GPU_TWISTED_CLOVER_DIRAC)
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dim3 gridDim( (dslashParam.threads+tp.block.x-1) / tp.block.x, 1, 1);
if((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) { //Idea for the kernel, two spinor inputs (IN and clover applied IN), on output (Clover applied IN + ig5IN)
if (twist == QUDA_TWIST_GAMMA5_DIRECT)
twistCloverGamma5Kernel<<<gridDim, tp.block, tp.shared_bytes, stream>>>
((sFloat*)out->V(), (float*)out->Norm(), a,
(sFloat*)in->V(), (float*)in->Norm(), dslashParam,
clover, cNorm, cloverInv, cNrm2);
else if (twist == QUDA_TWIST_GAMMA5_INVERSE)
twistCloverGamma5InvKernel<<<gridDim, tp.block, tp.shared_bytes, stream>>>
((sFloat*)out->V(), (float*)out->Norm(), a,
(sFloat*)in->V(), (float*)in->Norm(), dslashParam,
clover, cNorm, cloverInv, cNrm2);
} else {
errorQuda("ERROR: Non-degenerated twisted-mass not supported in this regularization\n");
}
#endif
}
void preTune() {
saveOut = new char[out->Bytes()];
cudaMemcpy(saveOut, out->V(), out->Bytes(), cudaMemcpyDeviceToHost);
if (typeid(sFloat) == typeid(short4)) {
saveOutNorm = new char[out->NormBytes()];
cudaMemcpy(saveOutNorm, out->Norm(), out->NormBytes(), cudaMemcpyDeviceToHost);
}
}
void postTune() {
cudaMemcpy(out->V(), saveOut, out->Bytes(), cudaMemcpyHostToDevice);
delete[] saveOut;
if (typeid(sFloat) == typeid(short4)) {
cudaMemcpy(out->Norm(), saveOutNorm, out->NormBytes(), cudaMemcpyHostToDevice);
delete[] saveOutNorm;
}
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 24ll * dslashConstants.VolumeCB(); } //TODO FIX THIS NUMBER!!!
long long bytes() const { return in->Bytes() + in->NormBytes() + out->Bytes() + out->NormBytes(); }
};
void twistCloverGamma5Cuda(cudaColorSpinorField *out, const cudaColorSpinorField *in, const int dagger, const double &kappa, const double &mu,
const double &epsilon, const QudaTwistGamma5Type twist, const FullClover *clov, const FullClover *clovInv, const int parity)
{
if(in->TwistFlavor() == QUDA_TWIST_PLUS || in->TwistFlavor() == QUDA_TWIST_MINUS)
dslashParam.threads = in->Volume();
else //twist doublet
errorQuda("Twisted doublet not supported in twisted clover dslash");
#ifdef GPU_TWISTED_CLOVER_DIRAC
Tunable *tmClovGamma5 = 0;
void *clover, *cNorm, *cloverInv, *cNorm2;
QudaPrecision clover_prec = bindTwistedCloverTex(*clov, *clovInv, parity, &clover, &cNorm, &cloverInv, &cNorm2);
if (in->Precision() != clover_prec)
errorQuda("ERROR: Clover precision and spinor precision do not match\n");
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
tmClovGamma5 = new TwistCloverGamma5Cuda<double2,double2>(out, in, kappa, mu, epsilon, dagger, twist, (double2 *) clover, (float *) cNorm, (double2 *) cloverInv, (float *) cNorm2);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
tmClovGamma5 = new TwistCloverGamma5Cuda<float4,float4>(out, in, kappa, mu, epsilon, dagger, twist, (float4 *) clover, (float *) cNorm, (float4 *) cloverInv, (float *) cNorm2);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
tmClovGamma5 = new TwistCloverGamma5Cuda<short4,short4>(out, in, kappa, mu, epsilon, dagger, twist, (short4 *) clover, (float *) cNorm, (short4 *) cloverInv, (float *) cNorm2);
}
tmClovGamma5->apply(streams[Nstream-1]);
checkCudaError();
delete tmClovGamma5;
unbindTwistedCloverTex(*clov);
#else
errorQuda("Twisted clover dslash has not been built");
#endif // GPU_TWISTED_MASS_DIRAC
}
} // namespace quda
#include "misc_helpers.cu"
#if defined(GPU_FATLINK) || defined(GPU_GAUGE_FORCE) || defined(GPU_FERMION_FORCE) // || defined(GPU_UNITARIZE)
#include <force_common.h>
#endif
#ifdef GPU_FATLINK
#include "llfat_quda.cu"
#endif
#ifdef GPU_GAUGE_FORCE
#include "gauge_force_quda.cu"
#endif
#ifdef GPU_FERMION_FORCE
#include "fermion_force_quda.cu"
#endif
|
f9257a48f24e73dc2e2fda3cc4438a4965b339ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void glcm_calculation_90(int *A,int *glcm, const int nx, const int ny,int max){
int ix = threadIdx.x + blockIdx.x* blockDim.x;
int iy = threadIdx.y + blockIdx.y* blockDim.y;
unsigned int idx =iy*nx+ix;
int i;
int k=0;
for(i=0;i<nx-1;i++){
if(idx>=i*nx && idx<((i+1) *nx)){
k=max*A[idx+nx]+A[idx];
atomicAdd(&glcm[k],1);
}
}
__syncthreads();
} | f9257a48f24e73dc2e2fda3cc4438a4965b339ff.cu | #include "includes.h"
__global__ void glcm_calculation_90(int *A,int *glcm, const int nx, const int ny,int max){
int ix = threadIdx.x + blockIdx.x* blockDim.x;
int iy = threadIdx.y + blockIdx.y* blockDim.y;
unsigned int idx =iy*nx+ix;
int i;
int k=0;
for(i=0;i<nx-1;i++){
if(idx>=i*nx && idx<((i+1) *nx)){
k=max*A[idx+nx]+A[idx];
atomicAdd(&glcm[k],1);
}
}
__syncthreads();
} |
33d6d47a7e2fa57ee6667cacae17397f4a465560.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define m(y,x) mapa[(y * cols) + x]
/*Definicin de constantes*/
#define currentGPU 0 //El nmero ms alto suele indicar la salida de vdeo
#define MAX 50
typedef struct {
int y;
int x;
} Antena;
__global__ void gpu_init(int *mapad, int max, int size)
{
/*Identificaciones necesarios*/
int IDX_Thread = threadIdx.x;
int IDY_Thread = threadIdx.y;
int IDX_block = blockIdx.x;
int IDY_block = blockIdx.y;
int shapeGrid_X = gridDim.x;
int threads_per_block = blockDim.x * blockDim.y;
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
if (position<size) mapad[position] = max;
}
__global__ void gpu_actualizar(int *mapad, int rows, int cols, Antena antena, int size)
{
int IDX_Thread = threadIdx.x;
int IDY_Thread = threadIdx.y;
int IDX_block = blockIdx.x;
int IDY_block = blockIdx.y;
int shapeGrid_X = gridDim.x;
int threads_per_block = blockDim.x * blockDim.y;
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
if(position<size)
{
int x,y;
y=(int)position/cols;
x=position-y*rows;
int dist = abs(antena.x -x) + abs(antena.y - y);
int nuevadist = dist*dist;
if(nuevadist<mapad[position])
{
mapad[position] = nuevadist;
}
}
}
__global__ void gpu_reduce(int *c, int size)
{
/*Identificaciones necesarios*/
int IDX_Thread = threadIdx.x;
int IDY_Thread = threadIdx.y;
int IDX_block = blockIdx.x;
int IDY_block = blockIdx.y;
int shapeGrid_X = gridDim.x;
int threads_per_block = blockDim.x * blockDim.y;
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
if(position<size){
if(size%2 != 0)
{
if(c[position]<c[size-1])
{
c[position]=c[size-1];
}
}else{
if(c[position]<c[position+size/2])
{
c[position]=c[position+size/2];
}
}
}
}
int reduce(int *maximo,int *c, int *v, int size,dim3 bd,dim3 gd)
{
int t=size;
while(t!=1){
hipLaunchKernelGGL(( gpu_reduce), dim3(gd),dim3(bd), 0, 0, c,t);
hipDeviceSynchronize();
if(size%2==0){
t=t/2;
}else{
size -= 1;
}
}
hipMemcpy(maximo,c,sizeof(int) * 1,hipMemcpyDeviceToHost);
return maximo[0];
}
int manhattan(Antena a, int y, int x){
int dist = abs(a.x -x) + abs(a.y - y);
return dist * dist;
}
int calcular_max(int * mapa, int rows, int cols){
int max = 0;
for(int i=0; i<rows; i++){
for(int j=0; j<cols; j++){
if(m(i,j)>max){
max = m(i,j);
}
} // j
} // i
return max;
}
Antena nueva_antena(int * mapa, int rows, int cols, int min){
for(int i=0; i<rows; i++){
for(int j=0; j<cols; j++){
if(m(i,j)==min){
Antena antena = {i,j};
return antena;
}
} // j
} // i
}
int main(int nargs, char ** vargs)
{
//
// 1. LEER DATOS DE ENTRADA
//
// Comprobar nmero de argumentos
if(nargs < 7){
fprintf(stderr,"Uso: %s rows cols distMax nAntenas x0 y0 [x1 y1, ...]\n",vargs[0]);
return -1;
}
// Leer los argumentos de entrada
int rows = atoi(vargs[1]);
int cols = atoi(vargs[2]);
int distMax = atoi(vargs[3]);
int nAntenas = atoi(vargs[4]);
int i,j;
Antena *antenas;
if ((antenas = (Antena *) calloc(rows * cols, sizeof(Antena)))==NULL){ //mapa
printf("error\n");
exit (-1);
}
// Leer antenas
for(int i=0; i<nAntenas; i++){
antenas[i].x = atoi(vargs[5+i*2]);
antenas[i].y = atoi(vargs[6+i*2]);
if(antenas[i].y<0 || antenas[i].y>=rows || antenas[i].x<0 || antenas[i].x>=cols ){
fprintf(stderr,"Antena #%d est fuera del mapa\n",i);
return -1;
}
}
//
// 2. INICIACIN
//
// Medir el tiempo
/*Declaracin e inicializacin de variables CPU (HOST)*/
int *mapa, *maximo;
/*Indicamos la GPU (DEVICE) que vamos a utilizar*/
int *mapad;
/*Reserva de memoria para de variables CPU*/
if ((mapa=(int *) calloc(rows * cols, sizeof(int)))==NULL){ //mapa
printf("error\n");
exit (-1);
}
if ((maximo=(int *) calloc(1, sizeof(int)))==NULL){
printf("error\n");
exit (-1);
}
/*Reserva de memoria para variables del DEVICE (en memoria GPU)*/
hipMalloc( (void**) &mapad, sizeof(int) * (int) rows * cols);
/*Inicializacin del mapa*/
int size = rows * cols;
int tam = (int) ceil( ((float)(rows * cols))/size);
dim3 bloqdiminit(128,1);
dim3 griddiminit(tam,1);
hipLaunchKernelGGL(( gpu_init), dim3(griddiminit),dim3(bloqdiminit), 0, 0, mapad,INT_MAX,size);
hipDeviceSynchronize();
/*Copia de datos del HOST al DEVICE*/
hipMemcpy(mapa,mapad,sizeof(int) * rows*cols,hipMemcpyDeviceToHost);
printf("matriz:\n");
for (i = 0; i<rows;i++){
for (j=0;j<cols;j++){
printf(" %d ",mapa[i*5+j]);
}
printf("\n");
}
printf("fin de la matriz\n----\n");
// Colocar las antenas iniciales
for(int i=0; i<nAntenas; i++){
hipLaunchKernelGGL(( gpu_actualizar), dim3(griddiminit),dim3(bloqdiminit), 0, 0, mapad, rows, cols, antenas[i], size);
hipDeviceSynchronize();
printf("antena n: %d\n",i);
}
/*Copia de datos del DEVICE al HOST*/
hipMemcpy(mapa,mapad,sizeof(int) * rows*cols,hipMemcpyDeviceToHost);
/*Lanzamos la funcin del DEVICE*/
hipDeviceSynchronize();
printf("matriz:\n");
for (i = 0; i<rows;i++){
for (j=0;j<cols;j++){
printf(" %d ",mapa[j*5+i]);
}
printf("\n");
}
printf("fin de la matriz\n----\n");
// Contador de antenas
int nuevas = 0;
while(1){
//calcular el maximo
int max = reduce(maximo,mapad,mapa, rows * cols,bloqdiminit,griddiminit);
hipMemcpy(mapad,mapa,sizeof(int) * rows*cols,hipMemcpyHostToDevice);
//printf("max: %d\n",max);
// Salimos si ya hemos cumplido el maximo
if (max <= distMax) break;
// Incrementamos el contador
nuevas++;
// Calculo de la nueva antena y actualizacin del mapa
Antena antena = nueva_antena(mapa, rows, cols, max);
hipLaunchKernelGGL(( gpu_actualizar), dim3(griddiminit),dim3(bloqdiminit), 0, 0, mapad, rows, cols, antena, size);
hipDeviceSynchronize();
hipMemcpy(mapa,mapad,sizeof(int) * rows*cols,hipMemcpyDeviceToHost);
/*printf("\n");
for (i = 0; i<rows;i++){
for (j=0;j<cols;j++){
printf(" %d ",mapa[j*5+i]);
}
printf("\n");
}
printf("\n");*/
}
hipDeviceSynchronize();
printf("-----\nmatriz final:\n");
for (i = 0; i<rows;i++){
for (j=0;j<cols;j++){
printf(" %d ",mapa[j*5+i]);
}
printf("\n");
}
printf("fin de la matriz final\n");
/*Liberamos memoria del DEVICE*/
hipFree(mapad);
/*Liberamos memoria del HOST*/
free(mapa);
/*Liberamos los hilos del DEVICE*/
hipDeviceReset();
} //main
| 33d6d47a7e2fa57ee6667cacae17397f4a465560.cu | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#define m(y,x) mapa[(y * cols) + x]
/*Definición de constantes*/
#define currentGPU 0 //El número más alto suele indicar la salida de vÃdeo
#define MAX 50
typedef struct {
int y;
int x;
} Antena;
__global__ void gpu_init(int *mapad, int max, int size)
{
/*Identificaciones necesarios*/
int IDX_Thread = threadIdx.x;
int IDY_Thread = threadIdx.y;
int IDX_block = blockIdx.x;
int IDY_block = blockIdx.y;
int shapeGrid_X = gridDim.x;
int threads_per_block = blockDim.x * blockDim.y;
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
if (position<size) mapad[position] = max;
}
__global__ void gpu_actualizar(int *mapad, int rows, int cols, Antena antena, int size)
{
int IDX_Thread = threadIdx.x;
int IDY_Thread = threadIdx.y;
int IDX_block = blockIdx.x;
int IDY_block = blockIdx.y;
int shapeGrid_X = gridDim.x;
int threads_per_block = blockDim.x * blockDim.y;
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
if(position<size)
{
int x,y;
y=(int)position/cols;
x=position-y*rows;
int dist = abs(antena.x -x) + abs(antena.y - y);
int nuevadist = dist*dist;
if(nuevadist<mapad[position])
{
mapad[position] = nuevadist;
}
}
}
__global__ void gpu_reduce(int *c, int size)
{
/*Identificaciones necesarios*/
int IDX_Thread = threadIdx.x;
int IDY_Thread = threadIdx.y;
int IDX_block = blockIdx.x;
int IDY_block = blockIdx.y;
int shapeGrid_X = gridDim.x;
int threads_per_block = blockDim.x * blockDim.y;
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
if(position<size){
if(size%2 != 0)
{
if(c[position]<c[size-1])
{
c[position]=c[size-1];
}
}else{
if(c[position]<c[position+size/2])
{
c[position]=c[position+size/2];
}
}
}
}
int reduce(int *maximo,int *c, int *v, int size,dim3 bd,dim3 gd)
{
int t=size;
while(t!=1){
gpu_reduce<<<gd,bd>>>(c,t);
cudaDeviceSynchronize();
if(size%2==0){
t=t/2;
}else{
size -= 1;
}
}
cudaMemcpy(maximo,c,sizeof(int) * 1,cudaMemcpyDeviceToHost);
return maximo[0];
}
int manhattan(Antena a, int y, int x){
int dist = abs(a.x -x) + abs(a.y - y);
return dist * dist;
}
int calcular_max(int * mapa, int rows, int cols){
int max = 0;
for(int i=0; i<rows; i++){
for(int j=0; j<cols; j++){
if(m(i,j)>max){
max = m(i,j);
}
} // j
} // i
return max;
}
Antena nueva_antena(int * mapa, int rows, int cols, int min){
for(int i=0; i<rows; i++){
for(int j=0; j<cols; j++){
if(m(i,j)==min){
Antena antena = {i,j};
return antena;
}
} // j
} // i
}
int main(int nargs, char ** vargs)
{
//
// 1. LEER DATOS DE ENTRADA
//
// Comprobar número de argumentos
if(nargs < 7){
fprintf(stderr,"Uso: %s rows cols distMax nAntenas x0 y0 [x1 y1, ...]\n",vargs[0]);
return -1;
}
// Leer los argumentos de entrada
int rows = atoi(vargs[1]);
int cols = atoi(vargs[2]);
int distMax = atoi(vargs[3]);
int nAntenas = atoi(vargs[4]);
int i,j;
Antena *antenas;
if ((antenas = (Antena *) calloc(rows * cols, sizeof(Antena)))==NULL){ //mapa
printf("error\n");
exit (-1);
}
// Leer antenas
for(int i=0; i<nAntenas; i++){
antenas[i].x = atoi(vargs[5+i*2]);
antenas[i].y = atoi(vargs[6+i*2]);
if(antenas[i].y<0 || antenas[i].y>=rows || antenas[i].x<0 || antenas[i].x>=cols ){
fprintf(stderr,"Antena #%d está fuera del mapa\n",i);
return -1;
}
}
//
// 2. INICIACIÃN
//
// Medir el tiempo
/*Declaración e inicialización de variables CPU (HOST)*/
int *mapa, *maximo;
/*Indicamos la GPU (DEVICE) que vamos a utilizar*/
int *mapad;
/*Reserva de memoria para de variables CPU*/
if ((mapa=(int *) calloc(rows * cols, sizeof(int)))==NULL){ //mapa
printf("error\n");
exit (-1);
}
if ((maximo=(int *) calloc(1, sizeof(int)))==NULL){
printf("error\n");
exit (-1);
}
/*Reserva de memoria para variables del DEVICE (en memoria GPU)*/
cudaMalloc( (void**) &mapad, sizeof(int) * (int) rows * cols);
/*Inicialización del mapa*/
int size = rows * cols;
int tam = (int) ceil( ((float)(rows * cols))/size);
dim3 bloqdiminit(128,1);
dim3 griddiminit(tam,1);
gpu_init<<<griddiminit,bloqdiminit>>>(mapad,INT_MAX,size);
cudaDeviceSynchronize();
/*Copia de datos del HOST al DEVICE*/
cudaMemcpy(mapa,mapad,sizeof(int) * rows*cols,cudaMemcpyDeviceToHost);
printf("matriz:\n");
for (i = 0; i<rows;i++){
for (j=0;j<cols;j++){
printf(" %d ",mapa[i*5+j]);
}
printf("\n");
}
printf("fin de la matriz\n----\n");
// Colocar las antenas iniciales
for(int i=0; i<nAntenas; i++){
gpu_actualizar<<<griddiminit,bloqdiminit>>>(mapad, rows, cols, antenas[i], size);
cudaDeviceSynchronize();
printf("antena nº: %d\n",i);
}
/*Copia de datos del DEVICE al HOST*/
cudaMemcpy(mapa,mapad,sizeof(int) * rows*cols,cudaMemcpyDeviceToHost);
/*Lanzamos la función del DEVICE*/
cudaDeviceSynchronize();
printf("matriz:\n");
for (i = 0; i<rows;i++){
for (j=0;j<cols;j++){
printf(" %d ",mapa[j*5+i]);
}
printf("\n");
}
printf("fin de la matriz\n----\n");
// Contador de antenas
int nuevas = 0;
while(1){
//calcular el maximo
int max = reduce(maximo,mapad,mapa, rows * cols,bloqdiminit,griddiminit);
cudaMemcpy(mapad,mapa,sizeof(int) * rows*cols,cudaMemcpyHostToDevice);
//printf("max: %d\n",max);
// Salimos si ya hemos cumplido el maximo
if (max <= distMax) break;
// Incrementamos el contador
nuevas++;
// Calculo de la nueva antena y actualización del mapa
Antena antena = nueva_antena(mapa, rows, cols, max);
gpu_actualizar<<<griddiminit,bloqdiminit>>>(mapad, rows, cols, antena, size);
cudaDeviceSynchronize();
cudaMemcpy(mapa,mapad,sizeof(int) * rows*cols,cudaMemcpyDeviceToHost);
/*printf("\n");
for (i = 0; i<rows;i++){
for (j=0;j<cols;j++){
printf(" %d ",mapa[j*5+i]);
}
printf("\n");
}
printf("\n");*/
}
cudaDeviceSynchronize();
printf("-----\nmatriz final:\n");
for (i = 0; i<rows;i++){
for (j=0;j<cols;j++){
printf(" %d ",mapa[j*5+i]);
}
printf("\n");
}
printf("fin de la matriz final\n");
/*Liberamos memoria del DEVICE*/
cudaFree(mapad);
/*Liberamos memoria del HOST*/
free(mapa);
/*Liberamos los hilos del DEVICE*/
cudaDeviceReset();
} //main
|
ceb6a8700d78c2460b4b2213373ce27be4c924b6.hip | // !!! This is a file automatically generated by hipify!!!
//
// pole.cu
// pole
//
// Created by Dwight Bell on 8/18/10.
// Copyright dbelll 2010. All rights reserved.
//
#include <hip/hip_runtime.h>
#include "cutil.h"
#include "cuda_rand.cu"
#include "pole.h"
#include "cuda_utils.h"
#include "main.h"
#include "cuda_row_reduction.h"
// paramaters are stored in constant memory on the device
__constant__ unsigned dc_agents;
__constant__ unsigned dc_agent_group_size;
__constant__ unsigned dc_time_steps;
__constant__ float dc_initial_sharing_wgt;
__constant__ float dc_epsilon;
__constant__ float dc_gamma;
__constant__ float dc_lambda;
__constant__ float dc_gammaXlambda;
__constant__ float dc_alpha;
__constant__ unsigned dc_num_actions;
__constant__ unsigned dc_num_actionsXagents;
__constant__ unsigned dc_num_features;
__constant__ unsigned dc_num_featuresXactionsXagents;
__constant__ unsigned dc_test_interval;
__constant__ unsigned dc_test_reps;
// fixed pointers are stored in constant memory on the device
__constant__ unsigned *dc_seeds;
__constant__ float *dc_theta;
__constant__ float *dc_theta_bias;
__constant__ float *dc_e;
__constant__ float *dc_wgt;
__constant__ float *dc_s;
__constant__ float *dc_Q;
__constant__ unsigned *dc_action;
static AGENT_DATA *last_CPU_agent_dump;
// device pointers are stored here so they can be freed prior to exit
static unsigned *d_seeds;
static float *d_theta;
static float *d_theta_bias;
static float *d_e;
static float *d_wgt;
static float *d_s;
static float *d_Q;
static unsigned *d_action;
// copy parameter values to constant memory on device
void set_constant_params(PARAMS p)
{
hipMemcpyToSymbol("dc_agents", &p.agents, sizeof(unsigned));
hipMemcpyToSymbol("dc_agent_group_size", &p.agent_group_size, sizeof(unsigned));
hipMemcpyToSymbol("dc_time_steps", &p.time_steps, sizeof(unsigned));
hipMemcpyToSymbol("dc_initial_sharing_wgt", &p.initial_sharing_wgt, sizeof(float));
hipMemcpyToSymbol("dc_epsilon", &p.epsilon, sizeof(float));
hipMemcpyToSymbol("dc_gamma", &p.gamma, sizeof(float));
hipMemcpyToSymbol("dc_lambda", &p.lambda, sizeof(float));
float gammaXlambda = p.gamma * p.lambda;
hipMemcpyToSymbol("dc_gammaXlambda", &gammaXlambda, sizeof(float));
hipMemcpyToSymbol("dc_alpha", &p.alpha, sizeof(float));
hipMemcpyToSymbol("dc_num_actions", &p.num_actions, sizeof(unsigned));
unsigned num_actionsXagents = p.num_actions * p.agents;
hipMemcpyToSymbol("dc_num_actionsXagents", &num_actionsXagents, sizeof(unsigned));
hipMemcpyToSymbol("dc_num_features", &p.num_features, sizeof(unsigned));
unsigned num_featuresXactionsXagents = p.num_features * p.num_actions * p.agents;
hipMemcpyToSymbol("dc_num_featuresXactionsXagents", &num_featuresXactionsXagents, sizeof(unsigned));
hipMemcpyToSymbol("dc_test_interval", &p.test_interval, sizeof(unsigned));
hipMemcpyToSymbol("dc_test_reps", &p.test_reps, sizeof(unsigned));
}
/*
Procedures for setting up and running the pole balancing experiements on CPU and GPU
*/
static PARAMS _p;
static unsigned g_seeds[4] = {2784565659u, 1491908209u, 3415062841u, 3293636241u};
#pragma mark CPU & GPU
// random number in an interval from -max to +max using random uniform distribution
__host__ __device__ float random_interval(unsigned *seeds, unsigned stride, float max)
{
float r = (-max) + 2 * max * RandUniform(seeds, stride);
return r;
}
// randomize the state
__host__ void randomize_state(float *s, unsigned *seeds, unsigned stride)
{
s[0] = random_interval(seeds, stride, ANGLE_MAX);
s[stride] = random_interval(seeds, stride, ANGLE_VEL_MAX/4.0f);
s[2*stride] = random_interval(seeds, stride, X_MAX);
s[3*stride] = random_interval(seeds, stride, X_VEL_MAX/4.0f);
}
__device__ void randomize_stateGPU(float *s, unsigned *seeds)
{
s[0] = random_interval(seeds, dc_agents, ANGLE_MAX);
s[BLOCK_SIZE] = random_interval(seeds, dc_agents, ANGLE_VEL_MAX/4.0f);
s[2*BLOCK_SIZE] = random_interval(seeds, dc_agents, X_MAX);
s[3*BLOCK_SIZE] = random_interval(seeds, dc_agents, X_VEL_MAX/4.0f);
}
// reset eligibility traces to 0.0f
__host__ void reset_trace(float *e, unsigned num_features, unsigned num_actions,
unsigned stride)
{
for (int f = 0; f < num_features; f++) {
for (int a = 0; a < num_actions; a++) {
e[(a + f * num_actions) * stride] = 0.0f;
}
}
}
__device__ void reset_traceGPU(float *e)
{
for (int f = 0; f < dc_num_featuresXactionsXagents; f += dc_num_actionsXagents) {
for (int a = 0; a < dc_num_actionsXagents; a += dc_agents) {
e[a + f] = 0.0f;
}
}
}
__device__ __host__ unsigned terminal_state(float *s, unsigned stride)
{
float s2 = s[2*stride];
return (s2 < X_MIN) || (s2 > X_MAX) || (s[0] < ANGLE_MIN) || (s[0] > ANGLE_MAX);
}
// take an action from the current state, s, returning the reward and saving the new state in s_prime
__device__ __host__ float take_action(unsigned a, float *s, float *s_prime, unsigned stride)
{
// formulas are from: Brownlee. The pole balancing problem: a benchmark control theory
// problem.hdl.handle.net (2005)
// determine force from the action
float F = a ? FORCE : -FORCE;
float ang = s[0];
float ang_vel = s[stride];
float cos_a = cos(ang);
float sin_a = sin(ang);
// calculate angular acceleration
float ang_accel = GRAV * sin_a;
ang_accel += cos_a * (-F - POLE_MASS * POLE_LENGTH * ang_vel * ang_vel * sin_a) /
(CART_MASS + POLE_MASS);
ang_accel /= POLE_LENGTH * (4.0f/3.0f - POLE_MASS * cos_a * cos_a / (CART_MASS + POLE_MASS));
float x = s[2*stride];
float x_vel = s[3*stride];
// calculate x acceleration
float x_accel = F + POLE_MASS * POLE_LENGTH * (ang_vel * ang_vel * sin_a - ang_accel * cos_a);
x_accel /= (CART_MASS + POLE_MASS);
// update ang, ang_vel and x, x_vel
s_prime[0] = ang + TAU * ang_vel;
s_prime[stride] = ang_vel + TAU * ang_accel;
s_prime[2*stride] = x + TAU * x_vel;
s_prime[3*stride] = x_vel + TAU * x_accel;
// determine the reward
float reward = terminal_state(s_prime, stride) ? REWARD_FAIL : REWARD_NON_FAIL;
return reward;
}
// Calculate which feature division the state value falls into, based on the min, max,
// and number of divisions.
__device__ __host__ unsigned feature_val_for_state_val(float s, float minv, float maxv,
unsigned div)
{
return (unsigned)max(0.0f, min(((float)(div)-1.0f), ((s-minv)/(maxv-minv) * (float)div)));
}
// Determine which feature corresponds to the given state
__device__ __host__ unsigned feature_for_state(float *s, unsigned stride)
{
unsigned feature = feature_val_for_state_val(s[0], ANGLE_MIN, ANGLE_MAX, ANGLE_DIV);
feature += (ANGLE_DIV) *
feature_val_for_state_val(s[stride], ANGLE_VEL_MIN, ANGLE_VEL_MAX, ANGLE_VEL_DIV);
feature += (ANGLE_DIV * ANGLE_VEL_DIV) *
feature_val_for_state_val(s[2 * stride], X_MIN, X_MAX, X_DIV);
feature += (ANGLE_DIV * ANGLE_VEL_DIV * X_DIV) *
feature_val_for_state_val(s[3 * stride], X_VEL_MIN, X_VEL_MAX, X_VEL_DIV);
return feature;
}
// Calculate a number with the division for each state variable
__device__ __host__ unsigned divs_for_feature(unsigned feature)
{
unsigned divs = feature % ANGLE_DIV;
feature /= ANGLE_DIV;
divs += 16 * (feature % ANGLE_VEL_DIV);
feature /= ANGLE_VEL_DIV;
divs += 256 * (feature % X_DIV);
feature /= X_DIV;
divs += 4096 * feature;
return divs;
}
// lookup the Q value for an action from a state
// can also be used to lookup theta_bias values
__host__ float calc_Q(float *s, unsigned a, float *theta, unsigned stride, unsigned num_actions)
{
// only one feature corresponds with any given state
unsigned feature = feature_for_state(s, stride);
float Q = theta[(a + feature * num_actions) * stride];
return Q;
}
__device__ float calc_QGPU(float *s, unsigned a, float *theta, unsigned feature)
{
// only one feature corresponds with any given state
float Q = theta[(a + feature * NUM_ACTIONS) * dc_agents];
return Q;
}
__host__ void update_stored_Q(float *Q, float *s, float *theta, unsigned stride, unsigned num_actions)
{
for (int a = 0; a < num_actions; a++) {
Q[a * stride] = calc_Q(s, a, theta, stride, num_actions);
}
}
__device__ void update_stored_QGPU(float *Q, float *s, float *theta, unsigned feature)
{
for (int a = 0; a < NUM_ACTIONS; a++) {
Q[a * BLOCK_SIZE] = calc_QGPU(s, a, theta, feature);
}
}
// Calculate the Q value for each action from the given state, storing the values in Q
// Return the action with the highest Q value
__host__ unsigned best_action(float *s, float *theta, float *Q, unsigned stride, unsigned num_actions)
{
// calculate the Q value for each action
Q[0] = calc_Q(s, 0, theta, stride, num_actions);
unsigned best_action = 0;
float bestQ = Q[0];
for (int a = 1; a < num_actions; a++) {
Q[a * stride] = calc_Q(s, a, theta, stride, num_actions);
if (Q[a * stride] > bestQ) {
bestQ = Q[a * stride];
best_action = a;
}
}
return best_action;
}
__host__ unsigned best_action_biased(float *s, float *theta, float *theta_bias, float *Q, unsigned stride, unsigned num_actions)
{
// calculate the Q value for each action
Q[0] = calc_Q(s, 0, theta, stride, num_actions);
float bias = calc_Q(s, 0, theta_bias, stride, num_actions);
unsigned best_action = 0;
float bestQ_biased = Q[0] + bias;
#ifdef LOG_BIAS
printf("Q[0]=%7.4f, bias = %7.4f ...", Q[0], bias);
#endif
for (int a = 1; a < num_actions; a++) {
Q[a * stride] = calc_Q(s, a, theta, stride, num_actions);
bias = calc_Q(s, a, theta_bias, stride, num_actions);
#ifdef LOG_BIAS
printf("Q[%d]=%7.4f, bias = %7.4f ...", a, Q[a*stride], bias);
#endif
if ((Q[a * stride]+bias) > bestQ_biased) {
bestQ_biased = (Q[a * stride]+bias);
best_action = a;
#ifdef LOG_BIAS
if (Q[0] > Q[a*stride]) {
printf("<----------- bias applied !!!!");
}
#endif
}
#ifdef LOG_BIAS
else {
if (Q[0] < Q[a*stride]) {
printf("<----------- bias applied !!!!");
}
}
#endif
}
#ifdef LOG_BIAS
printf("best action is %d\n", best_action);
#endif
return best_action;
}
__device__ unsigned best_actionGPU(float *s, float *theta, float *Q, unsigned feature)
{
// calculate the Q value for each action
Q[0] = calc_QGPU(s, 0, theta, feature);
unsigned best_action = 0;
float bestQ = Q[0];
unsigned index = BLOCK_SIZE;
for (int a = 1; a < NUM_ACTIONS; a++, index += BLOCK_SIZE) {
Q[index] = calc_QGPU(s, a, theta, feature);
if (Q[index] > bestQ) {
bestQ = Q[index];
best_action = a;
}
}
return best_action;
}
__device__ unsigned best_action_biasedGPU(float *s, float *theta, float *theta_bias, float *Q, unsigned feature)
{
// calculate the Q value for each action
Q[0] = calc_QGPU(s, 0, theta, feature);
float bias = calc_QGPU(s, 0, theta_bias, feature);
unsigned best_action = 0;
float bestQ_biased = Q[0] + bias;
unsigned index = BLOCK_SIZE;
for (int a = 1; a < NUM_ACTIONS; a++, index += BLOCK_SIZE) {
Q[index] = calc_QGPU(s, a, theta, feature);
bias = calc_QGPU(s, a, theta_bias, feature);
if ((Q[index]+bias) > bestQ_biased) {
bestQ_biased = (Q[index]+bias);
best_action = a;
}
}
return best_action;
}
// choose action from current state, storing Q values for each possible action in Q
__host__ unsigned choose_action(float *s, float *theta, float epsilon, unsigned stride,
float *Q, unsigned num_actions, unsigned *seeds)
{
// always calcualte the best action and store all the Q values for each action
unsigned a = best_action(s, theta, Q, stride, num_actions);
if (epsilon > 0.0f && RandUniform(seeds, stride) < epsilon){
// choose random action
float r = RandUniform(seeds, stride);
a = r * num_actions;
}
return a;
}
// choose action from current state, storing Q values for each possible action in Q
__host__ unsigned choose_action_biased(float *s, float *theta, float *theta_bias, float epsilon, unsigned stride, float *Q, unsigned num_actions, unsigned *seeds)
{
// always calcualte the best action and store all the Q values for each action
unsigned a = best_action_biased(s, theta, theta_bias, Q, stride, num_actions);
if (epsilon > 0.0f && RandUniform(seeds, stride) < epsilon){
// choose random action
float r = RandUniform(seeds, stride);
a = r * num_actions;
}
return a;
}
__device__ unsigned choose_actionGPU(float *s, float *theta, float *Q, unsigned *seeds, unsigned feature)
{
// always calcualte the best action and store all the Q values for each action
unsigned a = best_actionGPU(s, theta, Q, feature);
if (dc_epsilon > 0.0f && RandUniform(seeds, dc_agents) < dc_epsilon){
// choose random action
float r = RandUniform(seeds, dc_agents);
a = r * NUM_ACTIONS;
}
return a;
}
__device__ unsigned choose_action_biasedGPU(float *s, float *theta, float *theta_bias, float *Q, unsigned *seeds, unsigned feature)
{
// always calcualte the best action and store all the Q values for each action
unsigned a = best_action_biasedGPU(s, theta, theta_bias, Q, feature);
if (dc_epsilon > 0.0f && RandUniform(seeds, dc_agents) < dc_epsilon){
// choose random action
float r = RandUniform(seeds, dc_agents);
a = r * NUM_ACTIONS;
}
return a;
}
// Update eligibility traces based on action and state
__host__ void update_trace(unsigned action, float *s, float *e, unsigned num_features,
unsigned num_actions, unsigned stride)
{
unsigned feature = feature_for_state(s, stride);
float gl = _p.gamma * _p.lambda;
for (int f = 0; f < num_features; f++) {
for (int a = 0; a < num_actions; a++) {
unsigned index = (a + f * num_actions) * stride;
// Replacing trace with optional block
if (f == feature) {
// set to 1.0 for action selected from current state,
// set to 0.0 for actions not taken from current state
e[index] = (a == action) ? 1.0f : 0.0f;
}else {
// decay all other values
e[index] *= gl;
}
}
}
}
__device__ void update_traceGPU(unsigned action, float *s, float *e, unsigned feature)
{
unsigned ff = feature * dc_num_actionsXagents;
unsigned aa = action * dc_agents;
for (unsigned f = 0; f < dc_num_featuresXactionsXagents; f += dc_num_actionsXagents) {
for (unsigned a = 0; a < dc_num_actionsXagents; a += dc_agents) {
unsigned index = a + f;
// Replacing trace with optional block
if (f == ff) {
// set to 1.0 for action selected from current state,
// set to 0.0 for actions not taken from current state
e[index] = (a == aa) ? 1.0f : 0.0f;
}else{
// decay all other values
e[index] *= dc_gammaXlambda;
}
}
}
}
// Update theta values for one agent
// theta = theta + alpha * delta * eligibility trace
__host__ void update_thetas(float *theta, float *e, float *wgt, float alpha, float delta, unsigned num_features, unsigned stride, unsigned num_actions)
{
#ifdef DUMP_THETA_UPDATE_CALCULATIONS
printf("updating thetas for alpha = %9.6f, delta = %9.6f\n", alpha, delta);
#endif
for (int fa = 0; fa < num_features * num_actions * stride; fa += stride) {
#ifdef DUMP_THETA_UPDATE_CALCULATIONS
printf(" feature-action %5d(%4x) %3d with trace %9.6f changed from %9.6f", (fa/num_actions), divs_for_feature(fa/num_actions), (fa%num_actions), e[fa*stride], theta[fa*stride]);
#endif
theta[fa] += alpha * delta * e[fa];
wgt[fa] += alpha * e[fa];
#ifdef DUMP_THETA_UPDATE_CALCULATIONS
printf(" to %9.6f\n", theta[fa*stride]);
#endif
}
}
__device__ void update_thetasGPU(float *theta, float *e, float *wgt, float delta)
{
float ad = dc_alpha * delta;
for (int fa = 0; fa < dc_num_featuresXactionsXagents; fa += dc_agents) {
theta[fa] += ad * e[fa];
wgt[fa] += dc_alpha * e[fa];
}
}
#pragma mark -
#pragma mark CPU
void set_params(PARAMS p){ _p = p;}
void dump_agent(AGENT_DATA *ag, unsigned agent)
{
printf("[agent %d]: ", agent);
printf(" seeds = %u, %u, %u, %u\n", ag->seeds[agent], ag->seeds[agent + _p.agents],
ag->seeds[agent + 2*_p.agents], ag->seeds[agent + 3*_p.agents]);
#ifdef AGENT_DUMP_INCLUDE_THETA_E
printf("FEATURE ACTION THETA E WGT BIAS\n");
for (int f = 0; f < _p.num_features; f++) {
for (int action = 0; action < _p.num_actions; action++) {
printf("%7d %4x %7d %9.4f %9.4f %9.2f %9.4f\n", f, divs_for_feature(f), action,
ag->theta[agent + (action + f * _p.num_actions) * _p.agents],
ag->e[agent + (action + f * _p.num_actions) * _p.agents],
ag->wgt[agent + (action + f * _p.num_actions) * _p.agents],
ag->theta_bias[agent + (action + f * _p.num_actions) * _p.agents]);
}
}
#endif
printf(" angle angleV x xV Q0 Q1 feature\n");
unsigned feature = feature_for_state(ag->s + agent, _p.agents);
printf("%9.6f %9.6f %9.6f %9.6f %9.6f %9.6f %7d(%4x)\n", ag->s[agent], ag->s[agent + _p.agents], ag->s[agent + 2*_p.agents], ag->s[agent + 3*_p.agents], ag->Q[agent], ag->Q[agent + _p.agents],
feature, divs_for_feature(feature));
printf("ACTION Q-value\n");
for (int action = 0; action < _p.num_actions; action++) {
(action == ag->action[agent]) ? printf("-->") : printf(" ");
printf("%3d %9.6f\n", action, ag->Q[agent + action * _p.agents]);
}
printf("\n");
}
void dump_agents(const char *str, AGENT_DATA *ag)
{
last_CPU_agent_dump = ag;
printf("%s\n", str);
for (int agent = 0; agent < _p.agents; agent++) {
dump_agent(ag, agent);
}
}
void dump_one_agent(const char *str, AGENT_DATA *ag)
{
printf("%s\n", str);
dump_agent(ag, 0);
}
// generate random seeds for the sepecified number of agents
unsigned *create_seeds(unsigned num_agents)
{
unsigned *seeds = (unsigned *)malloc(num_agents * 4 * sizeof(unsigned));
for (int i = 0; i < num_agents * 4; i++) {
seeds[i] = RandUniformui(g_seeds, 1);
}
return seeds;
}
// create wgts set initially to random values between RAND_WGT_MIN and RAND_WGT_MAX
float *create_theta(unsigned num_agents, unsigned num_features, unsigned num_actions, float theta_min, float theta_max)
{
#ifdef VERBOSE
printf("create_theta for %d agents and %d features\n", num_agents, num_features);
#endif
float *theta = (float *)malloc(num_agents * num_features * num_actions * sizeof(float));
for (int i = 0; i < num_agents * num_features * num_actions; i++) {
theta[i] = (theta_max - theta_min) * RandUniform(g_seeds, 1) + theta_min;
}
return theta;
}
// create theta_bias amounts set initially to random values between -THETA_BIAS_MAX and +THEAT_BIAS_MAX
float *create_theta_bias(unsigned num_agents, unsigned num_features, unsigned num_actions, float theta_bias_max)
{
#ifdef VERBOSE
printf("create_theta_bias for %d agents and %d features\n", num_agents, num_features);
#endif
float *bias = (float *)malloc(num_agents * num_features * num_actions * sizeof(float));
for (int a = 0; a < num_agents; a++) {
for (int fa = 0; fa < num_features * num_actions; fa++) {
if (theta_bias_max > 0.0f) {
bias[fa * num_agents + a] = random_interval(g_seeds, 1, theta_bias_max);
}else {
bias[fa * num_agents + a] = 0.0f;
}
}
}
return bias;
}
// initial eligibility traces to 0.0f
float *create_e(unsigned num_agents, unsigned num_features, unsigned num_actions)
{
#ifdef VERBOSE
printf("create_e for %d agents and %d features and %d actions\n", num_agents, num_features, num_actions);
#endif
float *e = (float *)malloc(num_agents * num_features * num_actions * sizeof(float));
for (int i = 0; i < num_agents * num_features * num_actions; i++) {
e[i] = 0.0f;
}
return e;
}
// initial wgt's set to initial_sharing_wgt
float *create_wgt(unsigned num_agents, unsigned num_features, unsigned num_actions, float initial_sharing_wgt)
{
#ifdef VERBOSE
printf("create_wgt for %d agents and %d features and %d actions\n", num_agents, num_features, num_actions);
#endif
float *wgt = (float *)malloc(num_agents * num_features * num_actions * sizeof(float));
for (int i = 0; i < num_agents * num_features * num_actions; i++) {
wgt[i] = initial_sharing_wgt;
}
return wgt;
}
// initial random states
float *create_states(unsigned num_agents, unsigned *seeds)
{
float *states = (float *)malloc(num_agents * _p.state_size * sizeof(float));
for (int i = 0; i < num_agents; i++) {
randomize_state(states + i, seeds + i, num_agents);
}
return states;
}
RESULTS *initialize_results()
{
#ifdef VERBOSE
printf("initializing result arrays...\n");
#endif
RESULTS *r = (RESULTS *)malloc(sizeof(RESULTS));
r->avg_fail = (float *)malloc((_p.time_steps / _p.test_interval) * sizeof(float));
return r;
}
void free_results(RESULTS *r)
{
#ifdef VERBOSE
printf("freeing result arrays...\n");
#endif
if (r) {
if (r->avg_fail) free(r->avg_fail);
free(r);
}
}
void display_results(const char *str, RESULTS *r)
{
printf("%s \n", str);
printf(" TEST Avg Episode\n");
for (int i = 0; i < _p.num_tests; i++) {
printf(" [%4d]%9.0f\n", i, r->avg_fail[i]);
}
}
unsigned *create_actions(unsigned num_agents, unsigned num_actions)
{
unsigned *actions = (unsigned *)malloc(num_agents * num_actions * sizeof(unsigned));
for (int i = 0; i < num_agents * num_actions; i++) {
actions[i] = num_actions; // not possible action
}
return actions;
}
// Initialize agents on the CPU. Some values will be re-used for GPU agents
AGENT_DATA *initialize_agentsCPU()
{
#ifdef VERBOSE
printf("initializing agents on CPU...\n");
#endif
AGENT_DATA *ag = (AGENT_DATA *)malloc(sizeof(AGENT_DATA));
ag->seeds = create_seeds(_p.agents);
ag->theta = create_theta(_p.agents, _p.num_features, _p.num_actions, _p.initial_theta_min, _p.initial_theta_max);
ag->theta_bias = create_theta_bias(_p.agents, _p.num_features, _p.num_actions, _p.theta_bias_max);
ag->e = create_e(_p.agents, _p.num_features, _p.num_actions);
ag->wgt = create_wgt(_p.agents, _p.num_features, _p.num_actions, _p.initial_sharing_wgt);
ag->s = create_states(_p.agents, ag->seeds);
ag->Q = (float *)malloc(_p.agents * _p.num_actions * sizeof(float));
ag->action = create_actions(_p.agents, _p.num_actions);
return ag;
}
void dump_state(float *s, unsigned stride)
{
printf("(%9.6f,%9.6f,%9.6f,%9.6f)[%d]\n", s[0], s[stride], s[2*stride], s[3*stride],
feature_for_state(s, stride));
}
// run tests for all agents and return the average failures
float run_test(AGENT_DATA *ag)
{
float total_time = 0.0f;
// initialize all agent states
for (int agent = 0; agent < _p.agents; agent++) {
// save agent state prior to testing
float s0 = ag->s[agent];
float s1 = ag->s[agent + _p.agents];
float s2 = ag->s[agent + 2*_p.agents];
float s3 = ag->s[agent + 3*_p.agents];
unsigned act = ag->action[agent];
float Q0 = ag->Q[agent];
float Q1 = ag->Q[agent + _p.agents];
randomize_state(ag->s + agent, ag->seeds + agent, _p.agents);
ag->action[agent] = best_action(ag->s + agent, ag->theta + agent, ag->Q + agent, _p.agents, _p.num_actions);
// run the test for up to the specified number of reps or first failure
int t;
for (t = 0; t < _p.test_reps; t++) {
take_action(ag->action[agent], ag->s+agent, ag->s+agent, _p.agents);
if (terminal_state(ag->s + agent, _p.agents)){
break;
}
// choose best action
ag->action[agent] = best_action(ag->s + agent, ag->theta + agent, ag->Q + agent, _p.agents, _p.num_actions);
}
total_time += t;
// restore agent state
ag->s[agent] = s0;
ag->s[agent + _p.agents] = s1;
ag->s[agent + 2*_p.agents] = s2;
ag->s[agent + 3*_p.agents] = s3;
act = ag->action[agent] = act;
ag->Q[agent] = Q0;
ag->Q[agent + _p.agents] = Q1;
}
return total_time / (float)_p.agents;
}
void clear_traces(AGENT_DATA *ag)
{
for (int i = 0; i < _p.agents * _p.num_features * _p.num_actions; i++) {
ag->e[i] = 0.0f;
}
}
void randomize_all_states(AGENT_DATA *ag)
{
// randomize the state for all agents, preparing for a new test session
for (int agent = 0; agent < _p.agents; agent++) {
randomize_state(ag->s + agent, ag->seeds + agent, _p.agents);
ag->action[agent] = choose_action(ag->s + agent, ag->theta + agent, _p.epsilon, _p.agents,
ag->Q + agent, _p.num_actions, ag->seeds + agent);
update_trace(ag->action[agent], ag->s + agent, ag->e + agent, _p.num_features,
_p.num_actions, _p.agents);
}
}
void randomize_all_states_biased(AGENT_DATA *ag)
{
// randomize the state for all agents, preparing for a new test session
for (int agent = 0; agent < _p.agents; agent++) {
randomize_state(ag->s + agent, ag->seeds + agent, _p.agents);
ag->action[agent] = choose_action_biased(ag->s + agent, ag->theta + agent, ag->theta_bias + agent, _p.epsilon, _p.agents, ag->Q + agent, _p.num_actions, ag->seeds + agent);
update_trace(ag->action[agent], ag->s + agent, ag->e + agent, _p.num_features,
_p.num_actions, _p.agents);
}
}
void learning_session(AGENT_DATA *ag)
{
// run learning session for all agents for one chunk of time
for (int agent = 0; agent < _p.agents; agent++) {
// loop over the time steps in the chunk
for (int t = 0; t < _p.chunk_interval; t++) {
float reward = take_action(ag->action[agent], ag->s + agent, ag->s + agent, _p.agents);
unsigned fail = terminal_state(ag->s + agent, _p.agents);
if (fail) randomize_state(ag->s + agent, ag->seeds + agent, _p.agents);
float Q_a = ag->Q[agent + ag->action[agent] * _p.agents];
ag->action[agent] = choose_action_biased(ag->s + agent, ag->theta + agent, ag->theta_bias + agent, _p.epsilon, _p.agents, ag->Q + agent, _p.num_actions, ag->seeds + agent);
float Q_a_prime = ag->Q[agent + ag->action[agent] * _p.agents];
float delta = reward - Q_a + (fail ? 0 : _p.gamma * Q_a_prime);
update_thetas(ag->theta + agent, ag->e + agent, ag->wgt + agent, _p.alpha, delta, _p.num_features, _p.agents, _p.num_actions);
if (fail) reset_trace(ag->e + agent, _p.num_features, _p.num_actions, _p.agents);
update_stored_Q(ag->Q + agent, ag->s + agent, ag->theta + agent, _p.agents,
_p.num_actions);
update_trace(ag->action[agent], ag->s + agent, ag->e + agent, _p.num_features,
_p.num_actions, _p.agents);
}
}
}
// calculate average theta values within each agent group and duplicate
// for all agents in the group
void share_theta(AGENT_DATA *ag)
{
// loop over every agent group and accumulate the theta values and wgt's
// in agent 0 in that group, then duplicate for all agents in group
for (int i = 0; i < _p.trials; i++) {
for (int fa = 0; fa < _p.num_features * _p.num_actions; fa++) {
unsigned agent0 = i * _p.agent_group_size + fa * _p.agents;
float block_theta = 0.0f;
float block_wgt = 0.0f;
// accumulate wgtd theta and total wgt
for (int a = agent0; a < agent0 + _p.agent_group_size; a++) {
block_theta += ag->theta[a] * ag->wgt[a];
block_wgt += ag->wgt[a];
}
if (block_wgt > 0.0f){
block_theta /= block_wgt; // convert to the average theta
// block_wgt /= _p.agent_group_size; // evenly divide total wgt over all agents
// store the new theta (with bias) and reset the sharing weight to initial value
for (int a = agent0; a < agent0 + _p.agent_group_size; a++) {
ag->theta[a] = block_theta; // add in bias
ag->wgt[a] = _p.initial_sharing_wgt;
}
}
}
}
}
/*
Multiply all theta bias amounts by a factor, k
*/
void reduce_theta_bias(AGENT_DATA *ag, float k)
{
for (int i = 0; i < _p.agents * _p.num_features * _p.num_actions; i++) {
ag->theta_bias[i] *= k;
}
}
// helper functions to print a timing indicator to stdout
static int _k_ = 1;
void timing_feedback_header(unsigned n)
{
_k_ = 1;
if (n > 40) {
_k_ = (1 + (n-1)/40);
}
for (int i = 0; i < (n/_k_); i++) {
printf("-");
}
printf("|\n");
}
void timing_feedback_dot(unsigned i)
{
if (0 == (i+1) % _k_) { printf("."); fflush(NULL); }
}
void run_CPU_aux(AGENT_DATA *ag, RESULTS *r)
{
// on entry the agent's theta, eligibility trace, and state values have been initialized
timing_feedback_header(_p.num_chunks);
#ifdef VERBOSE
printf("%d chunks per share\n", _p.chunks_per_share);
#endif
for (int i = 0; i < _p.num_chunks; i++) {
#ifdef VERBOSE
printf("--------------- new chunk [%d]------------------\n", i);
#endif
timing_feedback_dot(i);
if(0 == (i % _p.chunks_per_restart)){
#ifdef VERBOSE
printf("clearing traces ...\n");
#endif
clear_traces(ag);
#ifdef VERBOSE
printf("randomizing state ...\n");
#endif
randomize_all_states_biased(ag);
}
#ifdef VERBOSE
printf("learning session ...\n");
#endif
learning_session(ag);
if ((_p.agent_group_size > 1) && 0 == ((i+1)%_p.chunks_per_share)) {
#ifdef VERBOSE
printf("sharing ...\n");
#endif
share_theta(ag);
reduce_theta_bias(ag, THETA_BIAS_REDUCTION_FACTOR);
}
if (0 == ((i+1)%_p.chunks_per_test)) {
#ifdef VERBOSE
printf("testing...\n");
#endif
r->avg_fail[i/_p.chunks_per_test] = run_test(ag);
}
}
#ifdef DUMP_TERMINAL_AGENT_STATE
printf("\n----------------------------------------------\n");
dump_agents(" ENDING AGENT STATES\n", ag);
#endif
printf("\n");
if (_p.dump1) {
dump_one_agent("----------------------------------------------\n Agent 0 Ending State\n", ag);
}
}
void run_CPU(AGENT_DATA *ag, RESULTS *r)
{
#ifdef VERBOSE
printf("\n==============================================\nrunning on CPU...\n");
#endif
#ifdef DUMP_INITIAL_AGENTS
dump_agents("Initial agents on CPU", ag);
#endif
unsigned timer;
CREATE_TIMER(&timer);
START_TIMER(timer);
_p.agent_group_size > 1 ? run_CPU_aux(ag, r) : run_CPU_aux(ag, r);
STOP_TIMER(timer, "run on CPU");
}
void free_agentsCPU(AGENT_DATA *ag)
{
#ifdef VERBOSE
printf("freeing agents on CPU...\n");
#endif
if (ag) {
if (ag->seeds) free(ag->seeds);
if (ag->theta) free(ag->theta);
if (ag->theta_bias) free(ag->theta_bias);
if (ag->e) free(ag->e);
if (ag->wgt) free(ag->wgt);
if (ag->s) free(ag->s);
if (ag->Q) free(ag->Q);
if (ag->action) free(ag->action);
free(ag);
}
}
#pragma mark -
#pragma mark GPU
AGENT_DATA *copy_GPU_agents()
{
AGENT_DATA *agGPUcopy = (AGENT_DATA *)malloc(sizeof(AGENT_DATA));
agGPUcopy->seeds = host_copyui(d_seeds, _p.agents * 4);
agGPUcopy->theta = host_copyf(d_theta, _p.agents * _p.num_features * _p.num_actions);
agGPUcopy->theta_bias = host_copyf(d_theta_bias, _p.agents * _p.num_features * _p.num_actions);
agGPUcopy->e = host_copyf(d_e, _p.agents * _p.num_features * _p.num_actions);
agGPUcopy->wgt = host_copyf(d_wgt, _p.agents * _p.num_features * _p.num_actions);
agGPUcopy->s = host_copyf(d_s, _p.agents * _p.state_size);
agGPUcopy->Q = host_copyf(d_Q, _p.agents * _p.num_actions);
agGPUcopy->action = host_copyui(d_action, _p.agents);
return agGPUcopy;
}
// check if s1[i] and s2[i] within small value of each other
unsigned mismatch(float *s1, float *s2, unsigned i)
{
float small = 1.0e-4;
return s1[i] > (s2[i]+small) || s1[i] < (s2[i]-small);
}
unsigned mismatchui(unsigned *s1, unsigned *s2, unsigned i)
{
unsigned small = 0;
return s1[i] > (s2[i]+small) || s1[i] < (s2[i]-small);
}
// check that the GPU agent information copied from the device is the same as the
// CPU agent information pointed to by last_CPU_agent_dump
void check_agents(AGENT_DATA *agGPUcopy)
{
for (int agent = 0; agent < _p.agents; agent++) {
printf("[agent%4d] ", agent);
unsigned match = 1;
for (int s = 0; s < 4; s++) {
if (mismatchui(agGPUcopy->seeds, last_CPU_agent_dump->seeds, agent + s*_p.agents)){
match = 0;
printf("seed mismatch, ");
break;
}
if (mismatch(agGPUcopy->s, last_CPU_agent_dump->s, agent + s*_p.agents)){
match = 0;
printf("state mismatch, ");
break;
}
}
for (int th = 0; th < _p.num_features * _p.num_actions; th++) {
if (mismatch(agGPUcopy->theta, last_CPU_agent_dump->theta, agent + th*_p.agents)){
match = 0;
printf("theta mismatch feature=%d, action=%d, %f vs %f\n", th/_p.num_actions, th % _p.num_actions, agGPUcopy->theta[agent + th * _p.agents], last_CPU_agent_dump->theta[agent + th * _p.agents]);
// break;
}
if (mismatch(agGPUcopy->e, last_CPU_agent_dump->e, agent + th*_p.agents)){
match = 0;
printf("trace mismatch feature=%d, action=%d\n", th/_p.num_actions, th % _p.num_actions);
// break;
}
}
printf(match ? "match\n" : "\n");
}
}
void dump_agents_GPU(const char *str, unsigned check)
{
AGENT_DATA *agGPUcopy = copy_GPU_agents();
if (check) check_agents(agGPUcopy);
dump_agents(str, agGPUcopy);
free_agentsCPU(agGPUcopy);
}
void dump_one_agent_GPU(const char *str)
{
AGENT_DATA *agGPUcopy = copy_GPU_agents();
dump_one_agent(str, agGPUcopy);
free_agentsCPU(agGPUcopy);
}
/*
Initialize agent data on GPU by copying the CPU data.
Also initialize constant memory pointers to point to the GPU data.
Allocate device memory for:
dc_seeds, dc_theta, dc_e, dc_s, dc_Q, and dc_action
Device pointers also stored in host memory: d_seeds, d_theta, d_e, d_s, d_Q, and d_action,
which are used to free the device memory.
*/
void initialize_agentsGPU(AGENT_DATA *agCPU)
{
#ifdef VERBOSE
printf("initializing agents on GPU...\n");
#endif
d_seeds = device_copyui(agCPU->seeds, _p.agents * 4);
d_theta = device_copyf(agCPU->theta, _p.agents * _p.num_features * _p.num_actions);
d_theta_bias = device_copyf(agCPU->theta_bias, _p.agents * _p.num_features * _p.num_actions);
d_e = device_copyf(agCPU->e, _p.agents * _p.num_features * _p.num_actions);
d_wgt = device_copyf(agCPU->wgt, _p.agents * _p.num_features * _p.num_actions);
d_s = device_copyf(agCPU->s, _p.agents * _p.state_size);
d_Q = device_copyf(agCPU->Q, _p.agents * _p.num_actions);
d_action = device_copyui(agCPU->action, _p.agents);
hipMemcpyToSymbol("dc_seeds", &d_seeds, sizeof(unsigned *));
hipMemcpyToSymbol("dc_theta", &d_theta, sizeof(float *));
hipMemcpyToSymbol("dc_theta_bias", &d_theta_bias, sizeof(float *));
hipMemcpyToSymbol("dc_e", &d_e, sizeof(float *));
hipMemcpyToSymbol("dc_wgt", &d_wgt, sizeof(float *));
hipMemcpyToSymbol("dc_s", &d_s, sizeof(float *));
hipMemcpyToSymbol("dc_Q", &d_Q, sizeof(float *));
hipMemcpyToSymbol("dc_action", &d_action, sizeof(unsigned *));
}
// free all agent data from GPU
void free_agentsGPU()
{
#ifdef VERBOSE
printf("freeing agents on GPU...\n");
#endif
if (d_seeds) hipFree(d_seeds);
if (d_theta) hipFree(d_theta);
if (d_e) hipFree(d_e);
if (d_wgt) hipFree(d_wgt);
if (d_s) hipFree(d_s);
if (d_Q) hipFree(d_Q);
if (d_action) hipFree(d_action);
}
/*
copy state information from global device memory to shared memory
assumes stride is BLOCK_SIZE for shared memory and dc_agents for global memory
*/
#define COPY_STATE_TO_SHARED(iLocal, iGlobal) { \
s_s[iLocal] = dc_s[iGlobal]; \
s_s[iLocal + BLOCK_SIZE] = dc_s[iGlobal + dc_agents]; \
s_s[iLocal + 2*BLOCK_SIZE] = dc_s[iGlobal + 2*dc_agents]; \
s_s[iLocal + 3*BLOCK_SIZE] = dc_s[iGlobal + 3*dc_agents]; \
s_action[iLocal] = dc_action[iGlobal]; \
s_Q[iLocal] = dc_Q[iGlobal]; \
s_Q[iLocal + BLOCK_SIZE] = dc_Q[iGlobal + dc_agents]; \
}
#define COPY_STATE_TO_GLOBAL(iLocal, iGlobal) { \
dc_s[iGlobal] = s_s[iLocal]; \
dc_s[iGlobal + dc_agents] = s_s[iLocal + BLOCK_SIZE]; \
dc_s[iGlobal + 2*dc_agents] = s_s[iLocal + 2*BLOCK_SIZE]; \
dc_s[iGlobal + 3*dc_agents] = s_s[iLocal + 3*BLOCK_SIZE]; \
dc_action[iGlobal] = s_action[iLocal]; \
dc_Q[iGlobal] = s_Q[iLocal]; \
dc_Q[iGlobal + dc_agents] = s_Q[iLocal + BLOCK_SIZE]; \
}
/*
* Calculate average thetas for each feature/action value for the entire group and share with
* all threads in the group
* The group's y dimension is the feature/action index.
* Shared memory is used to do the reduction to get total values for the group.
*/
__global__ void pole_share_kernel(unsigned numShareBlocks)
{
unsigned idx = threadIdx.x;
unsigned fa = blockIdx.y;
unsigned iGlobal = idx + blockIdx.x * dc_agent_group_size + fa * dc_agents;
// copy thetas and wgts to shared memory, converting theta to theta x wgt
extern __shared__ float s_theta[];
float *s_wgt = s_theta + blockDim.x;
s_wgt[idx] = dc_wgt[iGlobal];
s_theta[idx] = dc_theta[iGlobal] * s_wgt[idx]; // remove bias
// repeat the process if there are more than one share blocks to be reduced
for (int i = 1; i < numShareBlocks; i++) {
unsigned iG = iGlobal + i * blockDim.x;
s_wgt[idx] += dc_wgt[iG];
s_theta[idx] += dc_theta[iG] * dc_wgt[iG];
}
__syncthreads();
// do a reduction on theta for this group
for (unsigned half = blockDim.x >> 1; half > 0; half >>= 1) {
if (idx < half) {
s_theta[idx] += s_theta[idx + half];
s_wgt[idx] += s_wgt[idx + half];
}
__syncthreads();
}
// copy the values at index 0 to all threads
// **TODO** rearrange to only do all calculations when s_wgt[0] > 0.0f
float new_theta = 0.0f;
if (s_wgt[0] > 0.0f) new_theta = s_theta[0] / s_wgt[0];
for (int i = 0; i < numShareBlocks; i++) {
unsigned iG = iGlobal + i * blockDim.x;
if (s_wgt[0] > 0.0f) dc_theta[iG] = new_theta;
dc_wgt[iG] = dc_initial_sharing_wgt;
}
// **-------------
}
/*
set all eligibility trace values to 0.0f
*/
__global__ void pole_clear_trace_kernel()
{
unsigned iGlobal = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x;
if (iGlobal < dc_num_featuresXactionsXagents) dc_e[iGlobal] = 0.0f;
}
/*
multiply all theta bias values by a factor, k
*/
__global__ void pole_reduce_bias_kernel(float k)
{
unsigned iGlobal = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x;
if (iGlobal < dc_num_featuresXactionsXagents) dc_theta_bias[iGlobal] *= k;
}
/*
Do a learning session for specified number of steps.
On entry, the theta values are valid from prior learning episodes.
First, randomize the state if this is a restart,
Then repeat the learning process for specified number of iterations
Ending state is saved.
Choosed an action based on biased theta values
*/
__global__ void pole_learn_kernel(unsigned steps, unsigned isRestart)
{
unsigned iGlobal = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x;
unsigned idx = threadIdx.x;
if (iGlobal >= dc_agents) return;
__shared__ float s_s[4 * BLOCK_SIZE];
__shared__ unsigned s_action[BLOCK_SIZE];
__shared__ float s_Q[2*BLOCK_SIZE];
if (isRestart) {
// randomize state, determine first action and update eligibility trace
randomize_stateGPU(s_s + idx, dc_seeds + iGlobal);
unsigned feature = feature_for_state(s_s + idx, BLOCK_SIZE);
s_action[idx] = choose_action_biasedGPU(s_s + idx, dc_theta + iGlobal, dc_theta_bias + iGlobal, s_Q + idx, dc_seeds + iGlobal, feature);
// s_Q contains Q values for each action from the current state
// s_action contains the chosen action to be taken from the current state
update_traceGPU(s_action[idx], s_s + idx, dc_e + iGlobal, feature);
} else COPY_STATE_TO_SHARED(idx, iGlobal);
// loop through specified number of time steps
float *s_sidx = s_s + idx;
float *s_Qidx = s_Q + idx;
for (int t = 0; t < steps; t++) {
// take the action stored in s_action
float reward = take_action(s_action[idx], s_sidx, s_sidx, BLOCK_SIZE);
unsigned fail = (reward == REWARD_FAIL);
if (fail) randomize_stateGPU(s_sidx, dc_seeds + iGlobal);
unsigned feature = feature_for_state(s_sidx, BLOCK_SIZE);
// now may be in a different state
float Q_a = s_Q[idx + s_action[idx] * BLOCK_SIZE];
s_action[idx] = choose_action_biasedGPU(s_sidx, dc_theta + iGlobal, dc_theta_bias + iGlobal, s_Qidx, dc_seeds + iGlobal, feature);
float Q_a_prime = s_Q[idx + s_action[idx] * BLOCK_SIZE];
float delta = reward - Q_a + (fail ? 0 : dc_gamma * Q_a_prime);
update_thetasGPU(dc_theta + iGlobal, dc_e + iGlobal, dc_wgt + iGlobal, delta);
if (fail) reset_traceGPU(dc_e + iGlobal);
update_stored_QGPU(s_Qidx, s_sidx, dc_theta + iGlobal, feature);
update_traceGPU(s_action[idx], s_sidx, dc_e + iGlobal, feature);
}
COPY_STATE_TO_GLOBAL(idx, iGlobal);
}
__global__ void pole_test_kernel(float *results)
{
unsigned iGlobal = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x;
unsigned idx = threadIdx.x;
if (iGlobal >= dc_agents) return;
__shared__ float s_s[4 * BLOCK_SIZE];
__shared__ unsigned s_action[BLOCK_SIZE];
__shared__ float s_Q[2*BLOCK_SIZE];
randomize_stateGPU(s_s + idx, dc_seeds + iGlobal);
unsigned feature = feature_for_state(s_s + idx, BLOCK_SIZE);
s_action[idx] = best_actionGPU(s_s + idx, dc_theta + iGlobal, s_Q + idx, feature);
// run the test using shared memory
float *s_sidx = s_s + idx;
float *s_Qidx = s_Q + idx;
int t = 0;
for (t = 0; t < dc_test_reps; t++) {
take_action(s_action[idx], s_sidx, s_sidx, BLOCK_SIZE);
if (terminal_state(s_sidx, BLOCK_SIZE)) {
break;
}
unsigned feature = feature_for_state(s_s + idx, BLOCK_SIZE);
s_action[idx] = best_actionGPU(s_sidx, dc_theta + iGlobal, s_Qidx, feature);
}
results[iGlobal] = t;
}
void run_GPU(RESULTS *r)
{
#ifdef VERBOSE
printf("\n==============================================\nRunning on GPU...\n");
#endif
// on entry the device constant pointers have been initialized to agent's theta,
// eligibility trace, and state values
#ifdef DUMP_INITIAL_AGENTS
dump_agents_GPU("initial agents on GPU", 0);
#endif
// setup constant memory on device
set_constant_params(_p);
// allocate an array to hold individual thread test results
float *d_results = device_allocf(_p.agents * _p.num_tests);
// one thread for each agent in each trial
dim3 blockDim(BLOCK_SIZE);
dim3 gridDim(1 + (_p.agents - 1) / BLOCK_SIZE);
if (gridDim.x > 65535){
gridDim.y = 1 + (gridDim.x-1) / 65535;
gridDim.x = 1 + (gridDim.x-1) / gridDim.y;
}
dim3 clearTraceBlockDim(512);
dim3 clearTraceGridDim(1 + (_p.agents * _p.num_features * _p.num_actions - 1) / 512);
if (clearTraceGridDim.x > 65535) {
clearTraceGridDim.y = 1 + (clearTraceGridDim.x-1) / 65535;
clearTraceGridDim.x = 1 + (clearTraceGridDim.x-1) / clearTraceGridDim.y;
}
// calculate a multiplier in case the agent group size is more than 512
unsigned numShareBlocks = 1;
unsigned shareBlockSize = _p.agent_group_size;
if (shareBlockSize > 512) {
numShareBlocks = shareBlockSize / 512;
shareBlockSize = 512;
}
dim3 shareBlockDim(shareBlockSize);
dim3 shareGridDim(_p.trials, _p.num_features * _p.num_actions);
#ifdef VERBOSE
printf("%d total agents\n", _p.agents);
printf("%d threads per block, (%d x %d) grid of blocks\n", blockDim.x, gridDim.x, gridDim.y);
printf("for sharing: %d threads per block, (%d x %d) grid of blocks\n", shareBlockDim.x, shareGridDim.x, shareGridDim.y);
printf("for clearing trace: %d threads per block, (%d x %d) grid of blocks\n",
clearTraceBlockDim.x, clearTraceGridDim.x, clearTraceGridDim.y);
#endif
float timeClear = 0.0f;
float timeLearn = 0.0f;
float timeShare = 0.0f;
float timeTest = 0.0f;
float timeReduce = 0.0f;
unsigned timerCPU;
CREATE_TIMER(&timerCPU);
START_TIMER(timerCPU);
CUDA_EVENT_PREPARE;
#ifdef VERBOSE
printf("chunk interval is %d and there are %d chunks in the total time steps of %d\n",
_p.chunk_interval, _p.num_chunks, _p.time_steps);
printf(" restart interval is %d which is %d chunks\n", _p.restart_interval, _p.chunks_per_restart);
printf(" sharing interval is %d which is %d chunks\n", _p.sharing_interval, _p.chunks_per_share);
printf(" testing interval is %d which is %d chunks\n", _p.test_interval, _p.chunks_per_test);
#endif
timing_feedback_header(_p.num_chunks);
for (int i = 0; i < _p.num_chunks; i++) {
timing_feedback_dot(i);
#ifdef VERBOSE
printf("--------------- new chunk [%d]------------------\n", i);
#endif
unsigned isRestart = (0 == (i % _p.chunks_per_restart));
if(isRestart){
// reset traces
CUDA_EVENT_START
hipLaunchKernelGGL(( pole_clear_trace_kernel), dim3(clearTraceGridDim), dim3(clearTraceBlockDim), 0, 0, );
CUDA_EVENT_STOP(timeClear);
CUT_CHECK_ERROR("pole_clear_trace_kernel execution failed");
}
// always do learning for this chunk of time
CUDA_EVENT_START
hipLaunchKernelGGL(( pole_learn_kernel), dim3(gridDim), dim3(blockDim), 0, 0, _p.chunk_interval, isRestart);
CUDA_EVENT_STOP(timeLearn);
CUT_CHECK_ERROR("pole_learn_kernel execution failed");
if ((_p.agent_group_size > 1) && (0 == ((i+1) % _p.chunks_per_share))) {
CUDA_EVENT_START;
hipLaunchKernelGGL(( pole_share_kernel), dim3(shareGridDim), dim3(shareBlockDim), 2*shareBlockDim.x * sizeof(float), 0, numShareBlocks);
CUDA_EVENT_STOP(timeShare);
CUT_CHECK_ERROR("pole_share_kernel execution failed");
CUDA_EVENT_START;
hipLaunchKernelGGL(( pole_reduce_bias_kernel), dim3(clearTraceGridDim), dim3(clearTraceBlockDim), 0, 0, THETA_BIAS_REDUCTION_FACTOR);
CUDA_EVENT_STOP(timeClear);
CUT_CHECK_ERROR("pole_reduce_bias_kernel execution failed");
}
if (0 == ((i+1) % _p.chunks_per_test)) {
CUDA_EVENT_START;
hipLaunchKernelGGL(( pole_test_kernel), dim3(gridDim), dim3(blockDim), 0, 0, d_results + (i / _p.chunks_per_test) * _p.agents);
CUDA_EVENT_STOP(timeTest);
CUT_CHECK_ERROR("pole_test_kernel execution failed");
}
}
printf("\n");
// reduce the result array on the device and copy back to the host
CUDA_EVENT_START;
row_reduce(d_results, _p.agents, _p.num_tests);
for (int i = 0; i < _p.num_tests; i++) {
CUDA_SAFE_CALL(hipMemcpy(r->avg_fail + i, d_results + i * _p.agents, sizeof(float),
hipMemcpyDeviceToHost));
r->avg_fail[i] /= _p.agents;
}
CUDA_EVENT_STOP(timeReduce);
CUDA_EVENT_CLEANUP;
STOP_TIMER(timerCPU, "total GPU time");
PRINT_TIME(timeClear, "pole_clear_trace_kernel");
PRINT_TIME(timeLearn, "pole_learn_kernel");
PRINT_TIME(timeShare, "pole_share_kernel");
PRINT_TIME(timeTest, "pole_test_kernel");
PRINT_TIME(timeReduce, "pole_reduce_kernel");
#ifdef DUMP_TERMINAL_AGENT_STATE
dump_agents_GPU("--------------------------------------\n Ending Agent States\n", 0);
#endif
if (_p.dump1) {
dump_one_agent_GPU("----------------------------------------------\n Agent 0 Ending State\n");
}
if (d_results) hipFree(d_results);
}
| ceb6a8700d78c2460b4b2213373ce27be4c924b6.cu | //
// pole.cu
// pole
//
// Created by Dwight Bell on 8/18/10.
// Copyright dbelll 2010. All rights reserved.
//
#include <cuda.h>
#include "cutil.h"
#include "cuda_rand.cu"
#include "pole.h"
#include "cuda_utils.h"
#include "main.h"
#include "cuda_row_reduction.h"
// paramaters are stored in constant memory on the device
__constant__ unsigned dc_agents;
__constant__ unsigned dc_agent_group_size;
__constant__ unsigned dc_time_steps;
__constant__ float dc_initial_sharing_wgt;
__constant__ float dc_epsilon;
__constant__ float dc_gamma;
__constant__ float dc_lambda;
__constant__ float dc_gammaXlambda;
__constant__ float dc_alpha;
__constant__ unsigned dc_num_actions;
__constant__ unsigned dc_num_actionsXagents;
__constant__ unsigned dc_num_features;
__constant__ unsigned dc_num_featuresXactionsXagents;
__constant__ unsigned dc_test_interval;
__constant__ unsigned dc_test_reps;
// fixed pointers are stored in constant memory on the device
__constant__ unsigned *dc_seeds;
__constant__ float *dc_theta;
__constant__ float *dc_theta_bias;
__constant__ float *dc_e;
__constant__ float *dc_wgt;
__constant__ float *dc_s;
__constant__ float *dc_Q;
__constant__ unsigned *dc_action;
static AGENT_DATA *last_CPU_agent_dump;
// device pointers are stored here so they can be freed prior to exit
static unsigned *d_seeds;
static float *d_theta;
static float *d_theta_bias;
static float *d_e;
static float *d_wgt;
static float *d_s;
static float *d_Q;
static unsigned *d_action;
// copy parameter values to constant memory on device
void set_constant_params(PARAMS p)
{
cudaMemcpyToSymbol("dc_agents", &p.agents, sizeof(unsigned));
cudaMemcpyToSymbol("dc_agent_group_size", &p.agent_group_size, sizeof(unsigned));
cudaMemcpyToSymbol("dc_time_steps", &p.time_steps, sizeof(unsigned));
cudaMemcpyToSymbol("dc_initial_sharing_wgt", &p.initial_sharing_wgt, sizeof(float));
cudaMemcpyToSymbol("dc_epsilon", &p.epsilon, sizeof(float));
cudaMemcpyToSymbol("dc_gamma", &p.gamma, sizeof(float));
cudaMemcpyToSymbol("dc_lambda", &p.lambda, sizeof(float));
float gammaXlambda = p.gamma * p.lambda;
cudaMemcpyToSymbol("dc_gammaXlambda", &gammaXlambda, sizeof(float));
cudaMemcpyToSymbol("dc_alpha", &p.alpha, sizeof(float));
cudaMemcpyToSymbol("dc_num_actions", &p.num_actions, sizeof(unsigned));
unsigned num_actionsXagents = p.num_actions * p.agents;
cudaMemcpyToSymbol("dc_num_actionsXagents", &num_actionsXagents, sizeof(unsigned));
cudaMemcpyToSymbol("dc_num_features", &p.num_features, sizeof(unsigned));
unsigned num_featuresXactionsXagents = p.num_features * p.num_actions * p.agents;
cudaMemcpyToSymbol("dc_num_featuresXactionsXagents", &num_featuresXactionsXagents, sizeof(unsigned));
cudaMemcpyToSymbol("dc_test_interval", &p.test_interval, sizeof(unsigned));
cudaMemcpyToSymbol("dc_test_reps", &p.test_reps, sizeof(unsigned));
}
/*
Procedures for setting up and running the pole balancing experiements on CPU and GPU
*/
static PARAMS _p;
static unsigned g_seeds[4] = {2784565659u, 1491908209u, 3415062841u, 3293636241u};
#pragma mark CPU & GPU
// random number in an interval from -max to +max using random uniform distribution
__host__ __device__ float random_interval(unsigned *seeds, unsigned stride, float max)
{
float r = (-max) + 2 * max * RandUniform(seeds, stride);
return r;
}
// randomize the state
__host__ void randomize_state(float *s, unsigned *seeds, unsigned stride)
{
s[0] = random_interval(seeds, stride, ANGLE_MAX);
s[stride] = random_interval(seeds, stride, ANGLE_VEL_MAX/4.0f);
s[2*stride] = random_interval(seeds, stride, X_MAX);
s[3*stride] = random_interval(seeds, stride, X_VEL_MAX/4.0f);
}
__device__ void randomize_stateGPU(float *s, unsigned *seeds)
{
s[0] = random_interval(seeds, dc_agents, ANGLE_MAX);
s[BLOCK_SIZE] = random_interval(seeds, dc_agents, ANGLE_VEL_MAX/4.0f);
s[2*BLOCK_SIZE] = random_interval(seeds, dc_agents, X_MAX);
s[3*BLOCK_SIZE] = random_interval(seeds, dc_agents, X_VEL_MAX/4.0f);
}
// reset eligibility traces to 0.0f
__host__ void reset_trace(float *e, unsigned num_features, unsigned num_actions,
unsigned stride)
{
for (int f = 0; f < num_features; f++) {
for (int a = 0; a < num_actions; a++) {
e[(a + f * num_actions) * stride] = 0.0f;
}
}
}
__device__ void reset_traceGPU(float *e)
{
for (int f = 0; f < dc_num_featuresXactionsXagents; f += dc_num_actionsXagents) {
for (int a = 0; a < dc_num_actionsXagents; a += dc_agents) {
e[a + f] = 0.0f;
}
}
}
__device__ __host__ unsigned terminal_state(float *s, unsigned stride)
{
float s2 = s[2*stride];
return (s2 < X_MIN) || (s2 > X_MAX) || (s[0] < ANGLE_MIN) || (s[0] > ANGLE_MAX);
}
// take an action from the current state, s, returning the reward and saving the new state in s_prime
__device__ __host__ float take_action(unsigned a, float *s, float *s_prime, unsigned stride)
{
// formulas are from: Brownlee. The pole balancing problem: a benchmark control theory
// problem.hdl.handle.net (2005)
// determine force from the action
float F = a ? FORCE : -FORCE;
float ang = s[0];
float ang_vel = s[stride];
float cos_a = cos(ang);
float sin_a = sin(ang);
// calculate angular acceleration
float ang_accel = GRAV * sin_a;
ang_accel += cos_a * (-F - POLE_MASS * POLE_LENGTH * ang_vel * ang_vel * sin_a) /
(CART_MASS + POLE_MASS);
ang_accel /= POLE_LENGTH * (4.0f/3.0f - POLE_MASS * cos_a * cos_a / (CART_MASS + POLE_MASS));
float x = s[2*stride];
float x_vel = s[3*stride];
// calculate x acceleration
float x_accel = F + POLE_MASS * POLE_LENGTH * (ang_vel * ang_vel * sin_a - ang_accel * cos_a);
x_accel /= (CART_MASS + POLE_MASS);
// update ang, ang_vel and x, x_vel
s_prime[0] = ang + TAU * ang_vel;
s_prime[stride] = ang_vel + TAU * ang_accel;
s_prime[2*stride] = x + TAU * x_vel;
s_prime[3*stride] = x_vel + TAU * x_accel;
// determine the reward
float reward = terminal_state(s_prime, stride) ? REWARD_FAIL : REWARD_NON_FAIL;
return reward;
}
// Calculate which feature division the state value falls into, based on the min, max,
// and number of divisions.
__device__ __host__ unsigned feature_val_for_state_val(float s, float minv, float maxv,
unsigned div)
{
return (unsigned)max(0.0f, min(((float)(div)-1.0f), ((s-minv)/(maxv-minv) * (float)div)));
}
// Determine which feature corresponds to the given state
__device__ __host__ unsigned feature_for_state(float *s, unsigned stride)
{
unsigned feature = feature_val_for_state_val(s[0], ANGLE_MIN, ANGLE_MAX, ANGLE_DIV);
feature += (ANGLE_DIV) *
feature_val_for_state_val(s[stride], ANGLE_VEL_MIN, ANGLE_VEL_MAX, ANGLE_VEL_DIV);
feature += (ANGLE_DIV * ANGLE_VEL_DIV) *
feature_val_for_state_val(s[2 * stride], X_MIN, X_MAX, X_DIV);
feature += (ANGLE_DIV * ANGLE_VEL_DIV * X_DIV) *
feature_val_for_state_val(s[3 * stride], X_VEL_MIN, X_VEL_MAX, X_VEL_DIV);
return feature;
}
// Calculate a number with the division for each state variable
__device__ __host__ unsigned divs_for_feature(unsigned feature)
{
unsigned divs = feature % ANGLE_DIV;
feature /= ANGLE_DIV;
divs += 16 * (feature % ANGLE_VEL_DIV);
feature /= ANGLE_VEL_DIV;
divs += 256 * (feature % X_DIV);
feature /= X_DIV;
divs += 4096 * feature;
return divs;
}
// lookup the Q value for an action from a state
// can also be used to lookup theta_bias values
__host__ float calc_Q(float *s, unsigned a, float *theta, unsigned stride, unsigned num_actions)
{
// only one feature corresponds with any given state
unsigned feature = feature_for_state(s, stride);
float Q = theta[(a + feature * num_actions) * stride];
return Q;
}
__device__ float calc_QGPU(float *s, unsigned a, float *theta, unsigned feature)
{
// only one feature corresponds with any given state
float Q = theta[(a + feature * NUM_ACTIONS) * dc_agents];
return Q;
}
__host__ void update_stored_Q(float *Q, float *s, float *theta, unsigned stride, unsigned num_actions)
{
for (int a = 0; a < num_actions; a++) {
Q[a * stride] = calc_Q(s, a, theta, stride, num_actions);
}
}
__device__ void update_stored_QGPU(float *Q, float *s, float *theta, unsigned feature)
{
for (int a = 0; a < NUM_ACTIONS; a++) {
Q[a * BLOCK_SIZE] = calc_QGPU(s, a, theta, feature);
}
}
// Calculate the Q value for each action from the given state, storing the values in Q
// Return the action with the highest Q value
__host__ unsigned best_action(float *s, float *theta, float *Q, unsigned stride, unsigned num_actions)
{
// calculate the Q value for each action
Q[0] = calc_Q(s, 0, theta, stride, num_actions);
unsigned best_action = 0;
float bestQ = Q[0];
for (int a = 1; a < num_actions; a++) {
Q[a * stride] = calc_Q(s, a, theta, stride, num_actions);
if (Q[a * stride] > bestQ) {
bestQ = Q[a * stride];
best_action = a;
}
}
return best_action;
}
__host__ unsigned best_action_biased(float *s, float *theta, float *theta_bias, float *Q, unsigned stride, unsigned num_actions)
{
// calculate the Q value for each action
Q[0] = calc_Q(s, 0, theta, stride, num_actions);
float bias = calc_Q(s, 0, theta_bias, stride, num_actions);
unsigned best_action = 0;
float bestQ_biased = Q[0] + bias;
#ifdef LOG_BIAS
printf("Q[0]=%7.4f, bias = %7.4f ...", Q[0], bias);
#endif
for (int a = 1; a < num_actions; a++) {
Q[a * stride] = calc_Q(s, a, theta, stride, num_actions);
bias = calc_Q(s, a, theta_bias, stride, num_actions);
#ifdef LOG_BIAS
printf("Q[%d]=%7.4f, bias = %7.4f ...", a, Q[a*stride], bias);
#endif
if ((Q[a * stride]+bias) > bestQ_biased) {
bestQ_biased = (Q[a * stride]+bias);
best_action = a;
#ifdef LOG_BIAS
if (Q[0] > Q[a*stride]) {
printf("<----------- bias applied !!!!");
}
#endif
}
#ifdef LOG_BIAS
else {
if (Q[0] < Q[a*stride]) {
printf("<----------- bias applied !!!!");
}
}
#endif
}
#ifdef LOG_BIAS
printf("best action is %d\n", best_action);
#endif
return best_action;
}
__device__ unsigned best_actionGPU(float *s, float *theta, float *Q, unsigned feature)
{
// calculate the Q value for each action
Q[0] = calc_QGPU(s, 0, theta, feature);
unsigned best_action = 0;
float bestQ = Q[0];
unsigned index = BLOCK_SIZE;
for (int a = 1; a < NUM_ACTIONS; a++, index += BLOCK_SIZE) {
Q[index] = calc_QGPU(s, a, theta, feature);
if (Q[index] > bestQ) {
bestQ = Q[index];
best_action = a;
}
}
return best_action;
}
__device__ unsigned best_action_biasedGPU(float *s, float *theta, float *theta_bias, float *Q, unsigned feature)
{
// calculate the Q value for each action
Q[0] = calc_QGPU(s, 0, theta, feature);
float bias = calc_QGPU(s, 0, theta_bias, feature);
unsigned best_action = 0;
float bestQ_biased = Q[0] + bias;
unsigned index = BLOCK_SIZE;
for (int a = 1; a < NUM_ACTIONS; a++, index += BLOCK_SIZE) {
Q[index] = calc_QGPU(s, a, theta, feature);
bias = calc_QGPU(s, a, theta_bias, feature);
if ((Q[index]+bias) > bestQ_biased) {
bestQ_biased = (Q[index]+bias);
best_action = a;
}
}
return best_action;
}
// choose action from current state, storing Q values for each possible action in Q
__host__ unsigned choose_action(float *s, float *theta, float epsilon, unsigned stride,
float *Q, unsigned num_actions, unsigned *seeds)
{
// always calcualte the best action and store all the Q values for each action
unsigned a = best_action(s, theta, Q, stride, num_actions);
if (epsilon > 0.0f && RandUniform(seeds, stride) < epsilon){
// choose random action
float r = RandUniform(seeds, stride);
a = r * num_actions;
}
return a;
}
// choose action from current state, storing Q values for each possible action in Q
__host__ unsigned choose_action_biased(float *s, float *theta, float *theta_bias, float epsilon, unsigned stride, float *Q, unsigned num_actions, unsigned *seeds)
{
// always calcualte the best action and store all the Q values for each action
unsigned a = best_action_biased(s, theta, theta_bias, Q, stride, num_actions);
if (epsilon > 0.0f && RandUniform(seeds, stride) < epsilon){
// choose random action
float r = RandUniform(seeds, stride);
a = r * num_actions;
}
return a;
}
__device__ unsigned choose_actionGPU(float *s, float *theta, float *Q, unsigned *seeds, unsigned feature)
{
// always calcualte the best action and store all the Q values for each action
unsigned a = best_actionGPU(s, theta, Q, feature);
if (dc_epsilon > 0.0f && RandUniform(seeds, dc_agents) < dc_epsilon){
// choose random action
float r = RandUniform(seeds, dc_agents);
a = r * NUM_ACTIONS;
}
return a;
}
__device__ unsigned choose_action_biasedGPU(float *s, float *theta, float *theta_bias, float *Q, unsigned *seeds, unsigned feature)
{
// always calcualte the best action and store all the Q values for each action
unsigned a = best_action_biasedGPU(s, theta, theta_bias, Q, feature);
if (dc_epsilon > 0.0f && RandUniform(seeds, dc_agents) < dc_epsilon){
// choose random action
float r = RandUniform(seeds, dc_agents);
a = r * NUM_ACTIONS;
}
return a;
}
// Update eligibility traces based on action and state
__host__ void update_trace(unsigned action, float *s, float *e, unsigned num_features,
unsigned num_actions, unsigned stride)
{
unsigned feature = feature_for_state(s, stride);
float gl = _p.gamma * _p.lambda;
for (int f = 0; f < num_features; f++) {
for (int a = 0; a < num_actions; a++) {
unsigned index = (a + f * num_actions) * stride;
// Replacing trace with optional block
if (f == feature) {
// set to 1.0 for action selected from current state,
// set to 0.0 for actions not taken from current state
e[index] = (a == action) ? 1.0f : 0.0f;
}else {
// decay all other values
e[index] *= gl;
}
}
}
}
__device__ void update_traceGPU(unsigned action, float *s, float *e, unsigned feature)
{
unsigned ff = feature * dc_num_actionsXagents;
unsigned aa = action * dc_agents;
for (unsigned f = 0; f < dc_num_featuresXactionsXagents; f += dc_num_actionsXagents) {
for (unsigned a = 0; a < dc_num_actionsXagents; a += dc_agents) {
unsigned index = a + f;
// Replacing trace with optional block
if (f == ff) {
// set to 1.0 for action selected from current state,
// set to 0.0 for actions not taken from current state
e[index] = (a == aa) ? 1.0f : 0.0f;
}else{
// decay all other values
e[index] *= dc_gammaXlambda;
}
}
}
}
// Update theta values for one agent
// theta = theta + alpha * delta * eligibility trace
__host__ void update_thetas(float *theta, float *e, float *wgt, float alpha, float delta, unsigned num_features, unsigned stride, unsigned num_actions)
{
#ifdef DUMP_THETA_UPDATE_CALCULATIONS
printf("updating thetas for alpha = %9.6f, delta = %9.6f\n", alpha, delta);
#endif
for (int fa = 0; fa < num_features * num_actions * stride; fa += stride) {
#ifdef DUMP_THETA_UPDATE_CALCULATIONS
printf(" feature-action %5d(%4x) %3d with trace %9.6f changed from %9.6f", (fa/num_actions), divs_for_feature(fa/num_actions), (fa%num_actions), e[fa*stride], theta[fa*stride]);
#endif
theta[fa] += alpha * delta * e[fa];
wgt[fa] += alpha * e[fa];
#ifdef DUMP_THETA_UPDATE_CALCULATIONS
printf(" to %9.6f\n", theta[fa*stride]);
#endif
}
}
__device__ void update_thetasGPU(float *theta, float *e, float *wgt, float delta)
{
float ad = dc_alpha * delta;
for (int fa = 0; fa < dc_num_featuresXactionsXagents; fa += dc_agents) {
theta[fa] += ad * e[fa];
wgt[fa] += dc_alpha * e[fa];
}
}
#pragma mark -
#pragma mark CPU
void set_params(PARAMS p){ _p = p;}
void dump_agent(AGENT_DATA *ag, unsigned agent)
{
printf("[agent %d]: ", agent);
printf(" seeds = %u, %u, %u, %u\n", ag->seeds[agent], ag->seeds[agent + _p.agents],
ag->seeds[agent + 2*_p.agents], ag->seeds[agent + 3*_p.agents]);
#ifdef AGENT_DUMP_INCLUDE_THETA_E
printf("FEATURE ACTION THETA E WGT BIAS\n");
for (int f = 0; f < _p.num_features; f++) {
for (int action = 0; action < _p.num_actions; action++) {
printf("%7d %4x %7d %9.4f %9.4f %9.2f %9.4f\n", f, divs_for_feature(f), action,
ag->theta[agent + (action + f * _p.num_actions) * _p.agents],
ag->e[agent + (action + f * _p.num_actions) * _p.agents],
ag->wgt[agent + (action + f * _p.num_actions) * _p.agents],
ag->theta_bias[agent + (action + f * _p.num_actions) * _p.agents]);
}
}
#endif
printf(" angle angleV x xV Q0 Q1 feature\n");
unsigned feature = feature_for_state(ag->s + agent, _p.agents);
printf("%9.6f %9.6f %9.6f %9.6f %9.6f %9.6f %7d(%4x)\n", ag->s[agent], ag->s[agent + _p.agents], ag->s[agent + 2*_p.agents], ag->s[agent + 3*_p.agents], ag->Q[agent], ag->Q[agent + _p.agents],
feature, divs_for_feature(feature));
printf("ACTION Q-value\n");
for (int action = 0; action < _p.num_actions; action++) {
(action == ag->action[agent]) ? printf("-->") : printf(" ");
printf("%3d %9.6f\n", action, ag->Q[agent + action * _p.agents]);
}
printf("\n");
}
void dump_agents(const char *str, AGENT_DATA *ag)
{
last_CPU_agent_dump = ag;
printf("%s\n", str);
for (int agent = 0; agent < _p.agents; agent++) {
dump_agent(ag, agent);
}
}
void dump_one_agent(const char *str, AGENT_DATA *ag)
{
printf("%s\n", str);
dump_agent(ag, 0);
}
// generate random seeds for the sepecified number of agents
unsigned *create_seeds(unsigned num_agents)
{
unsigned *seeds = (unsigned *)malloc(num_agents * 4 * sizeof(unsigned));
for (int i = 0; i < num_agents * 4; i++) {
seeds[i] = RandUniformui(g_seeds, 1);
}
return seeds;
}
// create wgts set initially to random values between RAND_WGT_MIN and RAND_WGT_MAX
float *create_theta(unsigned num_agents, unsigned num_features, unsigned num_actions, float theta_min, float theta_max)
{
#ifdef VERBOSE
printf("create_theta for %d agents and %d features\n", num_agents, num_features);
#endif
float *theta = (float *)malloc(num_agents * num_features * num_actions * sizeof(float));
for (int i = 0; i < num_agents * num_features * num_actions; i++) {
theta[i] = (theta_max - theta_min) * RandUniform(g_seeds, 1) + theta_min;
}
return theta;
}
// create theta_bias amounts set initially to random values between -THETA_BIAS_MAX and +THEAT_BIAS_MAX
float *create_theta_bias(unsigned num_agents, unsigned num_features, unsigned num_actions, float theta_bias_max)
{
#ifdef VERBOSE
printf("create_theta_bias for %d agents and %d features\n", num_agents, num_features);
#endif
float *bias = (float *)malloc(num_agents * num_features * num_actions * sizeof(float));
for (int a = 0; a < num_agents; a++) {
for (int fa = 0; fa < num_features * num_actions; fa++) {
if (theta_bias_max > 0.0f) {
bias[fa * num_agents + a] = random_interval(g_seeds, 1, theta_bias_max);
}else {
bias[fa * num_agents + a] = 0.0f;
}
}
}
return bias;
}
// initial eligibility traces to 0.0f
float *create_e(unsigned num_agents, unsigned num_features, unsigned num_actions)
{
#ifdef VERBOSE
printf("create_e for %d agents and %d features and %d actions\n", num_agents, num_features, num_actions);
#endif
float *e = (float *)malloc(num_agents * num_features * num_actions * sizeof(float));
for (int i = 0; i < num_agents * num_features * num_actions; i++) {
e[i] = 0.0f;
}
return e;
}
// initial wgt's set to initial_sharing_wgt
float *create_wgt(unsigned num_agents, unsigned num_features, unsigned num_actions, float initial_sharing_wgt)
{
#ifdef VERBOSE
printf("create_wgt for %d agents and %d features and %d actions\n", num_agents, num_features, num_actions);
#endif
float *wgt = (float *)malloc(num_agents * num_features * num_actions * sizeof(float));
for (int i = 0; i < num_agents * num_features * num_actions; i++) {
wgt[i] = initial_sharing_wgt;
}
return wgt;
}
// initial random states
float *create_states(unsigned num_agents, unsigned *seeds)
{
float *states = (float *)malloc(num_agents * _p.state_size * sizeof(float));
for (int i = 0; i < num_agents; i++) {
randomize_state(states + i, seeds + i, num_agents);
}
return states;
}
RESULTS *initialize_results()
{
#ifdef VERBOSE
printf("initializing result arrays...\n");
#endif
RESULTS *r = (RESULTS *)malloc(sizeof(RESULTS));
r->avg_fail = (float *)malloc((_p.time_steps / _p.test_interval) * sizeof(float));
return r;
}
void free_results(RESULTS *r)
{
#ifdef VERBOSE
printf("freeing result arrays...\n");
#endif
if (r) {
if (r->avg_fail) free(r->avg_fail);
free(r);
}
}
void display_results(const char *str, RESULTS *r)
{
printf("%s \n", str);
printf(" TEST Avg Episode\n");
for (int i = 0; i < _p.num_tests; i++) {
printf(" [%4d]%9.0f\n", i, r->avg_fail[i]);
}
}
unsigned *create_actions(unsigned num_agents, unsigned num_actions)
{
unsigned *actions = (unsigned *)malloc(num_agents * num_actions * sizeof(unsigned));
for (int i = 0; i < num_agents * num_actions; i++) {
actions[i] = num_actions; // not possible action
}
return actions;
}
// Initialize agents on the CPU. Some values will be re-used for GPU agents
AGENT_DATA *initialize_agentsCPU()
{
#ifdef VERBOSE
printf("initializing agents on CPU...\n");
#endif
AGENT_DATA *ag = (AGENT_DATA *)malloc(sizeof(AGENT_DATA));
ag->seeds = create_seeds(_p.agents);
ag->theta = create_theta(_p.agents, _p.num_features, _p.num_actions, _p.initial_theta_min, _p.initial_theta_max);
ag->theta_bias = create_theta_bias(_p.agents, _p.num_features, _p.num_actions, _p.theta_bias_max);
ag->e = create_e(_p.agents, _p.num_features, _p.num_actions);
ag->wgt = create_wgt(_p.agents, _p.num_features, _p.num_actions, _p.initial_sharing_wgt);
ag->s = create_states(_p.agents, ag->seeds);
ag->Q = (float *)malloc(_p.agents * _p.num_actions * sizeof(float));
ag->action = create_actions(_p.agents, _p.num_actions);
return ag;
}
void dump_state(float *s, unsigned stride)
{
printf("(%9.6f,%9.6f,%9.6f,%9.6f)[%d]\n", s[0], s[stride], s[2*stride], s[3*stride],
feature_for_state(s, stride));
}
// run tests for all agents and return the average failures
float run_test(AGENT_DATA *ag)
{
float total_time = 0.0f;
// initialize all agent states
for (int agent = 0; agent < _p.agents; agent++) {
// save agent state prior to testing
float s0 = ag->s[agent];
float s1 = ag->s[agent + _p.agents];
float s2 = ag->s[agent + 2*_p.agents];
float s3 = ag->s[agent + 3*_p.agents];
unsigned act = ag->action[agent];
float Q0 = ag->Q[agent];
float Q1 = ag->Q[agent + _p.agents];
randomize_state(ag->s + agent, ag->seeds + agent, _p.agents);
ag->action[agent] = best_action(ag->s + agent, ag->theta + agent, ag->Q + agent, _p.agents, _p.num_actions);
// run the test for up to the specified number of reps or first failure
int t;
for (t = 0; t < _p.test_reps; t++) {
take_action(ag->action[agent], ag->s+agent, ag->s+agent, _p.agents);
if (terminal_state(ag->s + agent, _p.agents)){
break;
}
// choose best action
ag->action[agent] = best_action(ag->s + agent, ag->theta + agent, ag->Q + agent, _p.agents, _p.num_actions);
}
total_time += t;
// restore agent state
ag->s[agent] = s0;
ag->s[agent + _p.agents] = s1;
ag->s[agent + 2*_p.agents] = s2;
ag->s[agent + 3*_p.agents] = s3;
act = ag->action[agent] = act;
ag->Q[agent] = Q0;
ag->Q[agent + _p.agents] = Q1;
}
return total_time / (float)_p.agents;
}
void clear_traces(AGENT_DATA *ag)
{
for (int i = 0; i < _p.agents * _p.num_features * _p.num_actions; i++) {
ag->e[i] = 0.0f;
}
}
void randomize_all_states(AGENT_DATA *ag)
{
// randomize the state for all agents, preparing for a new test session
for (int agent = 0; agent < _p.agents; agent++) {
randomize_state(ag->s + agent, ag->seeds + agent, _p.agents);
ag->action[agent] = choose_action(ag->s + agent, ag->theta + agent, _p.epsilon, _p.agents,
ag->Q + agent, _p.num_actions, ag->seeds + agent);
update_trace(ag->action[agent], ag->s + agent, ag->e + agent, _p.num_features,
_p.num_actions, _p.agents);
}
}
void randomize_all_states_biased(AGENT_DATA *ag)
{
// randomize the state for all agents, preparing for a new test session
for (int agent = 0; agent < _p.agents; agent++) {
randomize_state(ag->s + agent, ag->seeds + agent, _p.agents);
ag->action[agent] = choose_action_biased(ag->s + agent, ag->theta + agent, ag->theta_bias + agent, _p.epsilon, _p.agents, ag->Q + agent, _p.num_actions, ag->seeds + agent);
update_trace(ag->action[agent], ag->s + agent, ag->e + agent, _p.num_features,
_p.num_actions, _p.agents);
}
}
void learning_session(AGENT_DATA *ag)
{
// run learning session for all agents for one chunk of time
for (int agent = 0; agent < _p.agents; agent++) {
// loop over the time steps in the chunk
for (int t = 0; t < _p.chunk_interval; t++) {
float reward = take_action(ag->action[agent], ag->s + agent, ag->s + agent, _p.agents);
unsigned fail = terminal_state(ag->s + agent, _p.agents);
if (fail) randomize_state(ag->s + agent, ag->seeds + agent, _p.agents);
float Q_a = ag->Q[agent + ag->action[agent] * _p.agents];
ag->action[agent] = choose_action_biased(ag->s + agent, ag->theta + agent, ag->theta_bias + agent, _p.epsilon, _p.agents, ag->Q + agent, _p.num_actions, ag->seeds + agent);
float Q_a_prime = ag->Q[agent + ag->action[agent] * _p.agents];
float delta = reward - Q_a + (fail ? 0 : _p.gamma * Q_a_prime);
update_thetas(ag->theta + agent, ag->e + agent, ag->wgt + agent, _p.alpha, delta, _p.num_features, _p.agents, _p.num_actions);
if (fail) reset_trace(ag->e + agent, _p.num_features, _p.num_actions, _p.agents);
update_stored_Q(ag->Q + agent, ag->s + agent, ag->theta + agent, _p.agents,
_p.num_actions);
update_trace(ag->action[agent], ag->s + agent, ag->e + agent, _p.num_features,
_p.num_actions, _p.agents);
}
}
}
// calculate average theta values within each agent group and duplicate
// for all agents in the group
void share_theta(AGENT_DATA *ag)
{
// loop over every agent group and accumulate the theta values and wgt's
// in agent 0 in that group, then duplicate for all agents in group
for (int i = 0; i < _p.trials; i++) {
for (int fa = 0; fa < _p.num_features * _p.num_actions; fa++) {
unsigned agent0 = i * _p.agent_group_size + fa * _p.agents;
float block_theta = 0.0f;
float block_wgt = 0.0f;
// accumulate wgtd theta and total wgt
for (int a = agent0; a < agent0 + _p.agent_group_size; a++) {
block_theta += ag->theta[a] * ag->wgt[a];
block_wgt += ag->wgt[a];
}
if (block_wgt > 0.0f){
block_theta /= block_wgt; // convert to the average theta
// block_wgt /= _p.agent_group_size; // evenly divide total wgt over all agents
// store the new theta (with bias) and reset the sharing weight to initial value
for (int a = agent0; a < agent0 + _p.agent_group_size; a++) {
ag->theta[a] = block_theta; // add in bias
ag->wgt[a] = _p.initial_sharing_wgt;
}
}
}
}
}
/*
Multiply all theta bias amounts by a factor, k
*/
void reduce_theta_bias(AGENT_DATA *ag, float k)
{
for (int i = 0; i < _p.agents * _p.num_features * _p.num_actions; i++) {
ag->theta_bias[i] *= k;
}
}
// helper functions to print a timing indicator to stdout
static int _k_ = 1;
void timing_feedback_header(unsigned n)
{
_k_ = 1;
if (n > 40) {
_k_ = (1 + (n-1)/40);
}
for (int i = 0; i < (n/_k_); i++) {
printf("-");
}
printf("|\n");
}
void timing_feedback_dot(unsigned i)
{
if (0 == (i+1) % _k_) { printf("."); fflush(NULL); }
}
void run_CPU_aux(AGENT_DATA *ag, RESULTS *r)
{
// on entry the agent's theta, eligibility trace, and state values have been initialized
timing_feedback_header(_p.num_chunks);
#ifdef VERBOSE
printf("%d chunks per share\n", _p.chunks_per_share);
#endif
for (int i = 0; i < _p.num_chunks; i++) {
#ifdef VERBOSE
printf("--------------- new chunk [%d]------------------\n", i);
#endif
timing_feedback_dot(i);
if(0 == (i % _p.chunks_per_restart)){
#ifdef VERBOSE
printf("clearing traces ...\n");
#endif
clear_traces(ag);
#ifdef VERBOSE
printf("randomizing state ...\n");
#endif
randomize_all_states_biased(ag);
}
#ifdef VERBOSE
printf("learning session ...\n");
#endif
learning_session(ag);
if ((_p.agent_group_size > 1) && 0 == ((i+1)%_p.chunks_per_share)) {
#ifdef VERBOSE
printf("sharing ...\n");
#endif
share_theta(ag);
reduce_theta_bias(ag, THETA_BIAS_REDUCTION_FACTOR);
}
if (0 == ((i+1)%_p.chunks_per_test)) {
#ifdef VERBOSE
printf("testing...\n");
#endif
r->avg_fail[i/_p.chunks_per_test] = run_test(ag);
}
}
#ifdef DUMP_TERMINAL_AGENT_STATE
printf("\n----------------------------------------------\n");
dump_agents(" ENDING AGENT STATES\n", ag);
#endif
printf("\n");
if (_p.dump1) {
dump_one_agent("----------------------------------------------\n Agent 0 Ending State\n", ag);
}
}
void run_CPU(AGENT_DATA *ag, RESULTS *r)
{
#ifdef VERBOSE
printf("\n==============================================\nrunning on CPU...\n");
#endif
#ifdef DUMP_INITIAL_AGENTS
dump_agents("Initial agents on CPU", ag);
#endif
unsigned timer;
CREATE_TIMER(&timer);
START_TIMER(timer);
_p.agent_group_size > 1 ? run_CPU_aux(ag, r) : run_CPU_aux(ag, r);
STOP_TIMER(timer, "run on CPU");
}
void free_agentsCPU(AGENT_DATA *ag)
{
#ifdef VERBOSE
printf("freeing agents on CPU...\n");
#endif
if (ag) {
if (ag->seeds) free(ag->seeds);
if (ag->theta) free(ag->theta);
if (ag->theta_bias) free(ag->theta_bias);
if (ag->e) free(ag->e);
if (ag->wgt) free(ag->wgt);
if (ag->s) free(ag->s);
if (ag->Q) free(ag->Q);
if (ag->action) free(ag->action);
free(ag);
}
}
#pragma mark -
#pragma mark GPU
AGENT_DATA *copy_GPU_agents()
{
AGENT_DATA *agGPUcopy = (AGENT_DATA *)malloc(sizeof(AGENT_DATA));
agGPUcopy->seeds = host_copyui(d_seeds, _p.agents * 4);
agGPUcopy->theta = host_copyf(d_theta, _p.agents * _p.num_features * _p.num_actions);
agGPUcopy->theta_bias = host_copyf(d_theta_bias, _p.agents * _p.num_features * _p.num_actions);
agGPUcopy->e = host_copyf(d_e, _p.agents * _p.num_features * _p.num_actions);
agGPUcopy->wgt = host_copyf(d_wgt, _p.agents * _p.num_features * _p.num_actions);
agGPUcopy->s = host_copyf(d_s, _p.agents * _p.state_size);
agGPUcopy->Q = host_copyf(d_Q, _p.agents * _p.num_actions);
agGPUcopy->action = host_copyui(d_action, _p.agents);
return agGPUcopy;
}
// check if s1[i] and s2[i] within small value of each other
unsigned mismatch(float *s1, float *s2, unsigned i)
{
float small = 1.0e-4;
return s1[i] > (s2[i]+small) || s1[i] < (s2[i]-small);
}
unsigned mismatchui(unsigned *s1, unsigned *s2, unsigned i)
{
unsigned small = 0;
return s1[i] > (s2[i]+small) || s1[i] < (s2[i]-small);
}
// check that the GPU agent information copied from the device is the same as the
// CPU agent information pointed to by last_CPU_agent_dump
void check_agents(AGENT_DATA *agGPUcopy)
{
for (int agent = 0; agent < _p.agents; agent++) {
printf("[agent%4d] ", agent);
unsigned match = 1;
for (int s = 0; s < 4; s++) {
if (mismatchui(agGPUcopy->seeds, last_CPU_agent_dump->seeds, agent + s*_p.agents)){
match = 0;
printf("seed mismatch, ");
break;
}
if (mismatch(agGPUcopy->s, last_CPU_agent_dump->s, agent + s*_p.agents)){
match = 0;
printf("state mismatch, ");
break;
}
}
for (int th = 0; th < _p.num_features * _p.num_actions; th++) {
if (mismatch(agGPUcopy->theta, last_CPU_agent_dump->theta, agent + th*_p.agents)){
match = 0;
printf("theta mismatch feature=%d, action=%d, %f vs %f\n", th/_p.num_actions, th % _p.num_actions, agGPUcopy->theta[agent + th * _p.agents], last_CPU_agent_dump->theta[agent + th * _p.agents]);
// break;
}
if (mismatch(agGPUcopy->e, last_CPU_agent_dump->e, agent + th*_p.agents)){
match = 0;
printf("trace mismatch feature=%d, action=%d\n", th/_p.num_actions, th % _p.num_actions);
// break;
}
}
printf(match ? "match\n" : "\n");
}
}
void dump_agents_GPU(const char *str, unsigned check)
{
AGENT_DATA *agGPUcopy = copy_GPU_agents();
if (check) check_agents(agGPUcopy);
dump_agents(str, agGPUcopy);
free_agentsCPU(agGPUcopy);
}
void dump_one_agent_GPU(const char *str)
{
AGENT_DATA *agGPUcopy = copy_GPU_agents();
dump_one_agent(str, agGPUcopy);
free_agentsCPU(agGPUcopy);
}
/*
Initialize agent data on GPU by copying the CPU data.
Also initialize constant memory pointers to point to the GPU data.
Allocate device memory for:
dc_seeds, dc_theta, dc_e, dc_s, dc_Q, and dc_action
Device pointers also stored in host memory: d_seeds, d_theta, d_e, d_s, d_Q, and d_action,
which are used to free the device memory.
*/
void initialize_agentsGPU(AGENT_DATA *agCPU)
{
#ifdef VERBOSE
printf("initializing agents on GPU...\n");
#endif
d_seeds = device_copyui(agCPU->seeds, _p.agents * 4);
d_theta = device_copyf(agCPU->theta, _p.agents * _p.num_features * _p.num_actions);
d_theta_bias = device_copyf(agCPU->theta_bias, _p.agents * _p.num_features * _p.num_actions);
d_e = device_copyf(agCPU->e, _p.agents * _p.num_features * _p.num_actions);
d_wgt = device_copyf(agCPU->wgt, _p.agents * _p.num_features * _p.num_actions);
d_s = device_copyf(agCPU->s, _p.agents * _p.state_size);
d_Q = device_copyf(agCPU->Q, _p.agents * _p.num_actions);
d_action = device_copyui(agCPU->action, _p.agents);
cudaMemcpyToSymbol("dc_seeds", &d_seeds, sizeof(unsigned *));
cudaMemcpyToSymbol("dc_theta", &d_theta, sizeof(float *));
cudaMemcpyToSymbol("dc_theta_bias", &d_theta_bias, sizeof(float *));
cudaMemcpyToSymbol("dc_e", &d_e, sizeof(float *));
cudaMemcpyToSymbol("dc_wgt", &d_wgt, sizeof(float *));
cudaMemcpyToSymbol("dc_s", &d_s, sizeof(float *));
cudaMemcpyToSymbol("dc_Q", &d_Q, sizeof(float *));
cudaMemcpyToSymbol("dc_action", &d_action, sizeof(unsigned *));
}
// free all agent data from GPU
void free_agentsGPU()
{
#ifdef VERBOSE
printf("freeing agents on GPU...\n");
#endif
if (d_seeds) cudaFree(d_seeds);
if (d_theta) cudaFree(d_theta);
if (d_e) cudaFree(d_e);
if (d_wgt) cudaFree(d_wgt);
if (d_s) cudaFree(d_s);
if (d_Q) cudaFree(d_Q);
if (d_action) cudaFree(d_action);
}
/*
copy state information from global device memory to shared memory
assumes stride is BLOCK_SIZE for shared memory and dc_agents for global memory
*/
#define COPY_STATE_TO_SHARED(iLocal, iGlobal) { \
s_s[iLocal] = dc_s[iGlobal]; \
s_s[iLocal + BLOCK_SIZE] = dc_s[iGlobal + dc_agents]; \
s_s[iLocal + 2*BLOCK_SIZE] = dc_s[iGlobal + 2*dc_agents]; \
s_s[iLocal + 3*BLOCK_SIZE] = dc_s[iGlobal + 3*dc_agents]; \
s_action[iLocal] = dc_action[iGlobal]; \
s_Q[iLocal] = dc_Q[iGlobal]; \
s_Q[iLocal + BLOCK_SIZE] = dc_Q[iGlobal + dc_agents]; \
}
#define COPY_STATE_TO_GLOBAL(iLocal, iGlobal) { \
dc_s[iGlobal] = s_s[iLocal]; \
dc_s[iGlobal + dc_agents] = s_s[iLocal + BLOCK_SIZE]; \
dc_s[iGlobal + 2*dc_agents] = s_s[iLocal + 2*BLOCK_SIZE]; \
dc_s[iGlobal + 3*dc_agents] = s_s[iLocal + 3*BLOCK_SIZE]; \
dc_action[iGlobal] = s_action[iLocal]; \
dc_Q[iGlobal] = s_Q[iLocal]; \
dc_Q[iGlobal + dc_agents] = s_Q[iLocal + BLOCK_SIZE]; \
}
/*
* Calculate average thetas for each feature/action value for the entire group and share with
* all threads in the group
* The group's y dimension is the feature/action index.
* Shared memory is used to do the reduction to get total values for the group.
*/
__global__ void pole_share_kernel(unsigned numShareBlocks)
{
unsigned idx = threadIdx.x;
unsigned fa = blockIdx.y;
unsigned iGlobal = idx + blockIdx.x * dc_agent_group_size + fa * dc_agents;
// copy thetas and wgts to shared memory, converting theta to theta x wgt
extern __shared__ float s_theta[];
float *s_wgt = s_theta + blockDim.x;
s_wgt[idx] = dc_wgt[iGlobal];
s_theta[idx] = dc_theta[iGlobal] * s_wgt[idx]; // remove bias
// repeat the process if there are more than one share blocks to be reduced
for (int i = 1; i < numShareBlocks; i++) {
unsigned iG = iGlobal + i * blockDim.x;
s_wgt[idx] += dc_wgt[iG];
s_theta[idx] += dc_theta[iG] * dc_wgt[iG];
}
__syncthreads();
// do a reduction on theta for this group
for (unsigned half = blockDim.x >> 1; half > 0; half >>= 1) {
if (idx < half) {
s_theta[idx] += s_theta[idx + half];
s_wgt[idx] += s_wgt[idx + half];
}
__syncthreads();
}
// copy the values at index 0 to all threads
// **TODO** rearrange to only do all calculations when s_wgt[0] > 0.0f
float new_theta = 0.0f;
if (s_wgt[0] > 0.0f) new_theta = s_theta[0] / s_wgt[0];
for (int i = 0; i < numShareBlocks; i++) {
unsigned iG = iGlobal + i * blockDim.x;
if (s_wgt[0] > 0.0f) dc_theta[iG] = new_theta;
dc_wgt[iG] = dc_initial_sharing_wgt;
}
// **-------------
}
/*
set all eligibility trace values to 0.0f
*/
__global__ void pole_clear_trace_kernel()
{
unsigned iGlobal = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x;
if (iGlobal < dc_num_featuresXactionsXagents) dc_e[iGlobal] = 0.0f;
}
/*
multiply all theta bias values by a factor, k
*/
__global__ void pole_reduce_bias_kernel(float k)
{
unsigned iGlobal = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x;
if (iGlobal < dc_num_featuresXactionsXagents) dc_theta_bias[iGlobal] *= k;
}
/*
Do a learning session for specified number of steps.
On entry, the theta values are valid from prior learning episodes.
First, randomize the state if this is a restart,
Then repeat the learning process for specified number of iterations
Ending state is saved.
Choosed an action based on biased theta values
*/
__global__ void pole_learn_kernel(unsigned steps, unsigned isRestart)
{
unsigned iGlobal = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x;
unsigned idx = threadIdx.x;
if (iGlobal >= dc_agents) return;
__shared__ float s_s[4 * BLOCK_SIZE];
__shared__ unsigned s_action[BLOCK_SIZE];
__shared__ float s_Q[2*BLOCK_SIZE];
if (isRestart) {
// randomize state, determine first action and update eligibility trace
randomize_stateGPU(s_s + idx, dc_seeds + iGlobal);
unsigned feature = feature_for_state(s_s + idx, BLOCK_SIZE);
s_action[idx] = choose_action_biasedGPU(s_s + idx, dc_theta + iGlobal, dc_theta_bias + iGlobal, s_Q + idx, dc_seeds + iGlobal, feature);
// s_Q contains Q values for each action from the current state
// s_action contains the chosen action to be taken from the current state
update_traceGPU(s_action[idx], s_s + idx, dc_e + iGlobal, feature);
} else COPY_STATE_TO_SHARED(idx, iGlobal);
// loop through specified number of time steps
float *s_sidx = s_s + idx;
float *s_Qidx = s_Q + idx;
for (int t = 0; t < steps; t++) {
// take the action stored in s_action
float reward = take_action(s_action[idx], s_sidx, s_sidx, BLOCK_SIZE);
unsigned fail = (reward == REWARD_FAIL);
if (fail) randomize_stateGPU(s_sidx, dc_seeds + iGlobal);
unsigned feature = feature_for_state(s_sidx, BLOCK_SIZE);
// now may be in a different state
float Q_a = s_Q[idx + s_action[idx] * BLOCK_SIZE];
s_action[idx] = choose_action_biasedGPU(s_sidx, dc_theta + iGlobal, dc_theta_bias + iGlobal, s_Qidx, dc_seeds + iGlobal, feature);
float Q_a_prime = s_Q[idx + s_action[idx] * BLOCK_SIZE];
float delta = reward - Q_a + (fail ? 0 : dc_gamma * Q_a_prime);
update_thetasGPU(dc_theta + iGlobal, dc_e + iGlobal, dc_wgt + iGlobal, delta);
if (fail) reset_traceGPU(dc_e + iGlobal);
update_stored_QGPU(s_Qidx, s_sidx, dc_theta + iGlobal, feature);
update_traceGPU(s_action[idx], s_sidx, dc_e + iGlobal, feature);
}
COPY_STATE_TO_GLOBAL(idx, iGlobal);
}
__global__ void pole_test_kernel(float *results)
{
unsigned iGlobal = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x;
unsigned idx = threadIdx.x;
if (iGlobal >= dc_agents) return;
__shared__ float s_s[4 * BLOCK_SIZE];
__shared__ unsigned s_action[BLOCK_SIZE];
__shared__ float s_Q[2*BLOCK_SIZE];
randomize_stateGPU(s_s + idx, dc_seeds + iGlobal);
unsigned feature = feature_for_state(s_s + idx, BLOCK_SIZE);
s_action[idx] = best_actionGPU(s_s + idx, dc_theta + iGlobal, s_Q + idx, feature);
// run the test using shared memory
float *s_sidx = s_s + idx;
float *s_Qidx = s_Q + idx;
int t = 0;
for (t = 0; t < dc_test_reps; t++) {
take_action(s_action[idx], s_sidx, s_sidx, BLOCK_SIZE);
if (terminal_state(s_sidx, BLOCK_SIZE)) {
break;
}
unsigned feature = feature_for_state(s_s + idx, BLOCK_SIZE);
s_action[idx] = best_actionGPU(s_sidx, dc_theta + iGlobal, s_Qidx, feature);
}
results[iGlobal] = t;
}
void run_GPU(RESULTS *r)
{
#ifdef VERBOSE
printf("\n==============================================\nRunning on GPU...\n");
#endif
// on entry the device constant pointers have been initialized to agent's theta,
// eligibility trace, and state values
#ifdef DUMP_INITIAL_AGENTS
dump_agents_GPU("initial agents on GPU", 0);
#endif
// setup constant memory on device
set_constant_params(_p);
// allocate an array to hold individual thread test results
float *d_results = device_allocf(_p.agents * _p.num_tests);
// one thread for each agent in each trial
dim3 blockDim(BLOCK_SIZE);
dim3 gridDim(1 + (_p.agents - 1) / BLOCK_SIZE);
if (gridDim.x > 65535){
gridDim.y = 1 + (gridDim.x-1) / 65535;
gridDim.x = 1 + (gridDim.x-1) / gridDim.y;
}
dim3 clearTraceBlockDim(512);
dim3 clearTraceGridDim(1 + (_p.agents * _p.num_features * _p.num_actions - 1) / 512);
if (clearTraceGridDim.x > 65535) {
clearTraceGridDim.y = 1 + (clearTraceGridDim.x-1) / 65535;
clearTraceGridDim.x = 1 + (clearTraceGridDim.x-1) / clearTraceGridDim.y;
}
// calculate a multiplier in case the agent group size is more than 512
unsigned numShareBlocks = 1;
unsigned shareBlockSize = _p.agent_group_size;
if (shareBlockSize > 512) {
numShareBlocks = shareBlockSize / 512;
shareBlockSize = 512;
}
dim3 shareBlockDim(shareBlockSize);
dim3 shareGridDim(_p.trials, _p.num_features * _p.num_actions);
#ifdef VERBOSE
printf("%d total agents\n", _p.agents);
printf("%d threads per block, (%d x %d) grid of blocks\n", blockDim.x, gridDim.x, gridDim.y);
printf("for sharing: %d threads per block, (%d x %d) grid of blocks\n", shareBlockDim.x, shareGridDim.x, shareGridDim.y);
printf("for clearing trace: %d threads per block, (%d x %d) grid of blocks\n",
clearTraceBlockDim.x, clearTraceGridDim.x, clearTraceGridDim.y);
#endif
float timeClear = 0.0f;
float timeLearn = 0.0f;
float timeShare = 0.0f;
float timeTest = 0.0f;
float timeReduce = 0.0f;
unsigned timerCPU;
CREATE_TIMER(&timerCPU);
START_TIMER(timerCPU);
CUDA_EVENT_PREPARE;
#ifdef VERBOSE
printf("chunk interval is %d and there are %d chunks in the total time steps of %d\n",
_p.chunk_interval, _p.num_chunks, _p.time_steps);
printf(" restart interval is %d which is %d chunks\n", _p.restart_interval, _p.chunks_per_restart);
printf(" sharing interval is %d which is %d chunks\n", _p.sharing_interval, _p.chunks_per_share);
printf(" testing interval is %d which is %d chunks\n", _p.test_interval, _p.chunks_per_test);
#endif
timing_feedback_header(_p.num_chunks);
for (int i = 0; i < _p.num_chunks; i++) {
timing_feedback_dot(i);
#ifdef VERBOSE
printf("--------------- new chunk [%d]------------------\n", i);
#endif
unsigned isRestart = (0 == (i % _p.chunks_per_restart));
if(isRestart){
// reset traces
CUDA_EVENT_START
pole_clear_trace_kernel<<<clearTraceGridDim, clearTraceBlockDim>>>();
CUDA_EVENT_STOP(timeClear);
CUT_CHECK_ERROR("pole_clear_trace_kernel execution failed");
}
// always do learning for this chunk of time
CUDA_EVENT_START
pole_learn_kernel<<<gridDim, blockDim>>>(_p.chunk_interval, isRestart);
CUDA_EVENT_STOP(timeLearn);
CUT_CHECK_ERROR("pole_learn_kernel execution failed");
if ((_p.agent_group_size > 1) && (0 == ((i+1) % _p.chunks_per_share))) {
CUDA_EVENT_START;
pole_share_kernel<<<shareGridDim, shareBlockDim, 2*shareBlockDim.x * sizeof(float)>>>(numShareBlocks);
CUDA_EVENT_STOP(timeShare);
CUT_CHECK_ERROR("pole_share_kernel execution failed");
CUDA_EVENT_START;
pole_reduce_bias_kernel<<<clearTraceGridDim, clearTraceBlockDim>>>(THETA_BIAS_REDUCTION_FACTOR);
CUDA_EVENT_STOP(timeClear);
CUT_CHECK_ERROR("pole_reduce_bias_kernel execution failed");
}
if (0 == ((i+1) % _p.chunks_per_test)) {
CUDA_EVENT_START;
pole_test_kernel<<<gridDim, blockDim>>>(d_results + (i / _p.chunks_per_test) * _p.agents);
CUDA_EVENT_STOP(timeTest);
CUT_CHECK_ERROR("pole_test_kernel execution failed");
}
}
printf("\n");
// reduce the result array on the device and copy back to the host
CUDA_EVENT_START;
row_reduce(d_results, _p.agents, _p.num_tests);
for (int i = 0; i < _p.num_tests; i++) {
CUDA_SAFE_CALL(cudaMemcpy(r->avg_fail + i, d_results + i * _p.agents, sizeof(float),
cudaMemcpyDeviceToHost));
r->avg_fail[i] /= _p.agents;
}
CUDA_EVENT_STOP(timeReduce);
CUDA_EVENT_CLEANUP;
STOP_TIMER(timerCPU, "total GPU time");
PRINT_TIME(timeClear, "pole_clear_trace_kernel");
PRINT_TIME(timeLearn, "pole_learn_kernel");
PRINT_TIME(timeShare, "pole_share_kernel");
PRINT_TIME(timeTest, "pole_test_kernel");
PRINT_TIME(timeReduce, "pole_reduce_kernel");
#ifdef DUMP_TERMINAL_AGENT_STATE
dump_agents_GPU("--------------------------------------\n Ending Agent States\n", 0);
#endif
if (_p.dump1) {
dump_one_agent_GPU("----------------------------------------------\n Agent 0 Ending State\n");
}
if (d_results) cudaFree(d_results);
}
|
d2b3bf101eef7b50a10946b8bf1f573d71bb6aa5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
//_1_: kernel to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20; // 1M elements
// float *x = new float[N];
// float *y = new float[N];
//_2_: Allocate Unified Memory -- accessible from CPU or GPU
float *x, *y;
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the CPU
// add(N, x, y);
//_3_: launch kernel running on GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
//_4_: synchronization
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
//_2_: Free memory
// delete [] x;
// delete [] y;
hipFree(x);
hipFree(y);
return 0;
}
| d2b3bf101eef7b50a10946b8bf1f573d71bb6aa5.cu | #include <iostream>
#include <math.h>
//_1_: kernel to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20; // 1M elements
// float *x = new float[N];
// float *y = new float[N];
//_2_: Allocate Unified Memory -- accessible from CPU or GPU
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the CPU
// add(N, x, y);
//_3_: launch kernel running on GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
//_4_: synchronization
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
//_2_: Free memory
// delete [] x;
// delete [] y;
cudaFree(x);
cudaFree(y);
return 0;
}
|
74a88af1727a3bc109eea8c5352fb5b8de4ee7e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is machine problem 2, part 2: brute force k nearest neighbors
* You are given a large number of particles, and are asked
* to find the k particles that are nearest to each one.
* Look at the example in /tutorials/thread_local_variables.cu
* for how you can use per thread arrays for sorting.
* Using that example, port the cpu reference code to the gpu in a first step.
* In a second step, modify your code so that the per-thread arrays are in
* shared memory. You should submit this second version of your code.
*/
/*
* SUBMISSION INSTRUCTIONS
* =========================
*
* You can submit the assignment from any of the cluster machines by using
* our submit script. Th submit script bundles the entire current directory into
* a submission. Thus, you use it by CDing to a the directory for your assignment,
* and running:
*
* > cd *some directory*
* > /usr/class/cs193g/bin/submit mp2
*
* This will submit the current directory as your assignment. You can submit
* as many times as you want, and we will use your last submission.
*/
#include <cassert>
#include "mp2-util.h"
// TODO enable this to print debugging information
//const bool print_debug = true;
const bool print_debug = false;
event_pair timer;
inline __device__ __host__ float3 operator -(float3 a, float3 b)
{
return make_float3(a.x-b.x, a.y-b.y, a.z-b.z);
}
__host__ __device__
float dist2(float3 a, float3 b)
{
float3 d = a - b;
float d2 = d.x*d.x + d.y*d.y + d.z*d.z;
return d2;
}
template
<typename T>
__host__ __device__
void init_list(T *base_ptr, unsigned int size, T val)
{
for(int i=0;i<size;i++)
{
base_ptr[i] = val;
}
}
__host__ __device__
void insert_list(float *dist_list, int *id_list, int size, float dist, int id)
{
int k;
for (k=0; k < size; k++) {
if (dist < dist_list[k]) {
// we should insert it in here, so push back and make it happen
for (int j = size - 1; j > k ; j--) {
dist_list[j] = dist_list[j-1];
id_list[j] = id_list[j-1];
}
dist_list[k] = dist;
id_list[k] = id;
break;
}
}
}
template
<int num_neighbors>
void host_find_knn(float3 *particles, int *knn, int array_length)
{
for(int i=0;i<array_length;i++)
{
float3 p = particles[i];
float neigh_dist[num_neighbors];
int neigh_ids[num_neighbors];
init_list(&neigh_dist[0],num_neighbors,2.0f);
init_list(&neigh_ids[0],num_neighbors,-1);
for(int j=0;j<array_length;j++)
{
if(i != j)
{
float rsq = dist2(p,particles[j]);
insert_list(&neigh_dist[0], &neigh_ids[0], num_neighbors, rsq, j);
}
}
for(int j=0;j<num_neighbors;j++)
{
knn[num_neighbors*i + j] = neigh_ids[j];
}
}
}
__global__ void device_find_knn_local(float3 *particles, int *knn, int array_length)
{
const int NUM_NEIGHBORS=5;
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
float3 p = particles[i];
float neigh_dist[NUM_NEIGHBORS];
int neigh_ids[NUM_NEIGHBORS];
init_list(&neigh_dist[0],NUM_NEIGHBORS,2.0f);
init_list(&neigh_ids[0],NUM_NEIGHBORS,-1);
for(int j=0;j<array_length;j++)
{
if(i != j)
{
float rsq = dist2(p,particles[j]);
insert_list(&neigh_dist[0], &neigh_ids[0], NUM_NEIGHBORS, rsq, j);
}
}
for(int j=0;j<NUM_NEIGHBORS;j++)
{
knn[NUM_NEIGHBORS*i + j] = neigh_ids[j];
}
}
__global__ void device_find_knn_shared(float3* particles, int *knn, int array_length)
{
const int NUM_NEIGHBORS =5;
const int block_size = 512;
__shared__ float neigh_dist[block_size*NUM_NEIGHBORS];
__shared__ int neigh_ids[block_size*NUM_NEIGHBORS];
init_list(&neigh_dist[0]+threadIdx.x*NUM_NEIGHBORS,NUM_NEIGHBORS,2.0f);
init_list(&neigh_ids[0]+threadIdx.x*NUM_NEIGHBORS,NUM_NEIGHBORS,-1);
// __syncthreads();
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
float3 p = particles[i];
for(int j=0;j<array_length;j++)
{
if(i != j)
{
float rsq = dist2(p,particles[j]);
insert_list(&neigh_dist[0]+threadIdx.x*NUM_NEIGHBORS, &neigh_ids[0]+threadIdx.x*NUM_NEIGHBORS, NUM_NEIGHBORS, rsq, j);
}
}
for(int j=0;j<NUM_NEIGHBORS;j++)
{
knn[NUM_NEIGHBORS*i + j] = neigh_ids[j+threadIdx.x*NUM_NEIGHBORS];
}
}
void allocate_host_memory(int num_particles, int num_neighbors,
float3 *&h_particles, int *&h_knn, int *&h_knn_checker)
{
// malloc host array
h_particles = (float3*)malloc(num_particles * sizeof(float3));
h_knn = (int*)malloc(num_particles * num_neighbors * sizeof(int));
h_knn_checker = (int*)malloc(num_particles * num_neighbors * sizeof(int));
// if either memory allocation failed, report an error message
if(h_particles == 0 || h_knn == 0 || h_knn_checker == 0)
{
printf("couldn't allocate host memory\n");
exit(1);
}
}
void allocate_device_memory(int num_particles, int num_neighbors,
float3 *&d_particles, int *&d_knn)
{
// TODO: your device memory allocations here
hipMalloc((void**)&d_particles,num_particles*sizeof(float3));
hipMalloc((void**)&d_knn,num_particles*num_neighbors*sizeof(int));
// TODO: don't forget to check for errors
if(d_particles ==0 || d_knn ==0)
{
printf("couldnt allocated device memory\n");
exit(1);
}
}
void deallocate_host_memory(float3 *h_particles, int *h_knn, int *h_knn_checker)
{
free(h_particles);
free(h_knn);
free(h_knn_checker);
}
void deallocate_device_memory(float3 *d_particles, int *d_knn)
{
// TODO: your device memory deallocations here
hipFree(d_particles);
hipFree(d_knn);
// TODO: don't forget to check for errors
}
bool cross_check_results(int * reference_knn, int * knn, int num_particles, int num_neighbors)
{
int error = 0;
for(int i=0;i<num_particles;i++)
{
for(int j=0;j<num_neighbors;j++)
{
if(reference_knn[i*num_neighbors + j] != knn[i*num_neighbors + j])
{
if(print_debug) printf("particle %d, neighbor %d is %d on cpu, %d on gpu\n",i,j,reference_knn[i*num_neighbors + j],knn[i*num_neighbors + j]);
error = 1;
}
}
}
if(error)
{
printf("Output of CUDA version and normal version didn't match! \n");
}
else {
printf("Worked! CUDA and reference output match. \n");
}
return error;
}
int main(void)
{
// create arrays of 8K elements
int num_particles = 20*1024;
const int num_neighbors = 5;
// pointers to host arrays
float3 *h_particles = 0;
int *h_knn = 0;
int *h_knn_checker = 0;
// pointers to device arrays
float3 *d_particles = 0;
int *d_knn = 0;
allocate_host_memory(num_particles, num_neighbors, h_particles, h_knn, h_knn_checker);
allocate_device_memory(num_particles, num_neighbors, d_particles, d_knn);
// generate random input
// initialize
srand(13);
for(int i=0;i< num_particles;i++)
{
h_particles[i] = make_float3((float)rand()/(float)RAND_MAX,(float)rand()/(float)RAND_MAX,(float)rand()/(float)RAND_MAX);
}
// copy input to GPU
start_timer(&timer);
// TODO: your copy of input from host to device here
hipMemcpy(d_particles,h_particles,num_particles*sizeof(float3),hipMemcpyHostToDevice);
// hipMemcpy(d_knn,h_knn,num_particles*num_neighbors*sizeof(int),hipMemcpyHostToDevice);
stop_timer(&timer,"copy to gpu");
const size_t block_size = 512;
const size_t num_blocks = num_particles/block_size;
start_timer(&timer);
// TODO: your kernel launch which uses local memory here
hipLaunchKernelGGL(( device_find_knn_local), dim3(num_blocks),dim3(block_size), 0, 0, d_particles, d_knn, num_particles) ;
check_cuda_error("brute force knn");
stop_timer(&timer,"brute force knn");
start_timer(&timer);
// TODO: your kernel launch which uses __shared__ memory here
hipLaunchKernelGGL(( device_find_knn_shared), dim3(num_blocks),dim3(block_size), 0, 0, d_particles, d_knn, num_particles) ;
check_cuda_error("shared meme knn");
stop_timer(&timer,"shared mem knn");
// download and inspect the result on the host
start_timer(&timer);
// TODO: your copy of results from device to host here
hipMemcpy(h_knn,d_knn,num_particles*num_neighbors*sizeof(int),hipMemcpyDeviceToHost);
check_cuda_error("copy from gpu");
stop_timer(&timer,"copy back from gpu memory");
// generate reference output
start_timer(&timer);
host_find_knn<num_neighbors>(h_particles, h_knn_checker, num_particles);
stop_timer(&timer,"cpu brute force knn");
// check CUDA output versus reference output
cross_check_results(h_knn_checker, h_knn, num_particles, num_neighbors);
deallocate_host_memory(h_particles, h_knn, h_knn_checker);
deallocate_device_memory(d_particles, d_knn);
return 0;
}
| 74a88af1727a3bc109eea8c5352fb5b8de4ee7e5.cu | /* This is machine problem 2, part 2: brute force k nearest neighbors
* You are given a large number of particles, and are asked
* to find the k particles that are nearest to each one.
* Look at the example in /tutorials/thread_local_variables.cu
* for how you can use per thread arrays for sorting.
* Using that example, port the cpu reference code to the gpu in a first step.
* In a second step, modify your code so that the per-thread arrays are in
* shared memory. You should submit this second version of your code.
*/
/*
* SUBMISSION INSTRUCTIONS
* =========================
*
* You can submit the assignment from any of the cluster machines by using
* our submit script. Th submit script bundles the entire current directory into
* a submission. Thus, you use it by CDing to a the directory for your assignment,
* and running:
*
* > cd *some directory*
* > /usr/class/cs193g/bin/submit mp2
*
* This will submit the current directory as your assignment. You can submit
* as many times as you want, and we will use your last submission.
*/
#include <cassert>
#include "mp2-util.h"
// TODO enable this to print debugging information
//const bool print_debug = true;
const bool print_debug = false;
event_pair timer;
inline __device__ __host__ float3 operator -(float3 a, float3 b)
{
return make_float3(a.x-b.x, a.y-b.y, a.z-b.z);
}
__host__ __device__
float dist2(float3 a, float3 b)
{
float3 d = a - b;
float d2 = d.x*d.x + d.y*d.y + d.z*d.z;
return d2;
}
template
<typename T>
__host__ __device__
void init_list(T *base_ptr, unsigned int size, T val)
{
for(int i=0;i<size;i++)
{
base_ptr[i] = val;
}
}
__host__ __device__
void insert_list(float *dist_list, int *id_list, int size, float dist, int id)
{
int k;
for (k=0; k < size; k++) {
if (dist < dist_list[k]) {
// we should insert it in here, so push back and make it happen
for (int j = size - 1; j > k ; j--) {
dist_list[j] = dist_list[j-1];
id_list[j] = id_list[j-1];
}
dist_list[k] = dist;
id_list[k] = id;
break;
}
}
}
template
<int num_neighbors>
void host_find_knn(float3 *particles, int *knn, int array_length)
{
for(int i=0;i<array_length;i++)
{
float3 p = particles[i];
float neigh_dist[num_neighbors];
int neigh_ids[num_neighbors];
init_list(&neigh_dist[0],num_neighbors,2.0f);
init_list(&neigh_ids[0],num_neighbors,-1);
for(int j=0;j<array_length;j++)
{
if(i != j)
{
float rsq = dist2(p,particles[j]);
insert_list(&neigh_dist[0], &neigh_ids[0], num_neighbors, rsq, j);
}
}
for(int j=0;j<num_neighbors;j++)
{
knn[num_neighbors*i + j] = neigh_ids[j];
}
}
}
__global__ void device_find_knn_local(float3 *particles, int *knn, int array_length)
{
const int NUM_NEIGHBORS=5;
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
float3 p = particles[i];
float neigh_dist[NUM_NEIGHBORS];
int neigh_ids[NUM_NEIGHBORS];
init_list(&neigh_dist[0],NUM_NEIGHBORS,2.0f);
init_list(&neigh_ids[0],NUM_NEIGHBORS,-1);
for(int j=0;j<array_length;j++)
{
if(i != j)
{
float rsq = dist2(p,particles[j]);
insert_list(&neigh_dist[0], &neigh_ids[0], NUM_NEIGHBORS, rsq, j);
}
}
for(int j=0;j<NUM_NEIGHBORS;j++)
{
knn[NUM_NEIGHBORS*i + j] = neigh_ids[j];
}
}
__global__ void device_find_knn_shared(float3* particles, int *knn, int array_length)
{
const int NUM_NEIGHBORS =5;
const int block_size = 512;
__shared__ float neigh_dist[block_size*NUM_NEIGHBORS];
__shared__ int neigh_ids[block_size*NUM_NEIGHBORS];
init_list(&neigh_dist[0]+threadIdx.x*NUM_NEIGHBORS,NUM_NEIGHBORS,2.0f);
init_list(&neigh_ids[0]+threadIdx.x*NUM_NEIGHBORS,NUM_NEIGHBORS,-1);
// __syncthreads();
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
float3 p = particles[i];
for(int j=0;j<array_length;j++)
{
if(i != j)
{
float rsq = dist2(p,particles[j]);
insert_list(&neigh_dist[0]+threadIdx.x*NUM_NEIGHBORS, &neigh_ids[0]+threadIdx.x*NUM_NEIGHBORS, NUM_NEIGHBORS, rsq, j);
}
}
for(int j=0;j<NUM_NEIGHBORS;j++)
{
knn[NUM_NEIGHBORS*i + j] = neigh_ids[j+threadIdx.x*NUM_NEIGHBORS];
}
}
void allocate_host_memory(int num_particles, int num_neighbors,
float3 *&h_particles, int *&h_knn, int *&h_knn_checker)
{
// malloc host array
h_particles = (float3*)malloc(num_particles * sizeof(float3));
h_knn = (int*)malloc(num_particles * num_neighbors * sizeof(int));
h_knn_checker = (int*)malloc(num_particles * num_neighbors * sizeof(int));
// if either memory allocation failed, report an error message
if(h_particles == 0 || h_knn == 0 || h_knn_checker == 0)
{
printf("couldn't allocate host memory\n");
exit(1);
}
}
void allocate_device_memory(int num_particles, int num_neighbors,
float3 *&d_particles, int *&d_knn)
{
// TODO: your device memory allocations here
cudaMalloc((void**)&d_particles,num_particles*sizeof(float3));
cudaMalloc((void**)&d_knn,num_particles*num_neighbors*sizeof(int));
// TODO: don't forget to check for errors
if(d_particles ==0 || d_knn ==0)
{
printf("couldnt allocated device memory\n");
exit(1);
}
}
void deallocate_host_memory(float3 *h_particles, int *h_knn, int *h_knn_checker)
{
free(h_particles);
free(h_knn);
free(h_knn_checker);
}
void deallocate_device_memory(float3 *d_particles, int *d_knn)
{
// TODO: your device memory deallocations here
cudaFree(d_particles);
cudaFree(d_knn);
// TODO: don't forget to check for errors
}
bool cross_check_results(int * reference_knn, int * knn, int num_particles, int num_neighbors)
{
int error = 0;
for(int i=0;i<num_particles;i++)
{
for(int j=0;j<num_neighbors;j++)
{
if(reference_knn[i*num_neighbors + j] != knn[i*num_neighbors + j])
{
if(print_debug) printf("particle %d, neighbor %d is %d on cpu, %d on gpu\n",i,j,reference_knn[i*num_neighbors + j],knn[i*num_neighbors + j]);
error = 1;
}
}
}
if(error)
{
printf("Output of CUDA version and normal version didn't match! \n");
}
else {
printf("Worked! CUDA and reference output match. \n");
}
return error;
}
int main(void)
{
// create arrays of 8K elements
int num_particles = 20*1024;
const int num_neighbors = 5;
// pointers to host arrays
float3 *h_particles = 0;
int *h_knn = 0;
int *h_knn_checker = 0;
// pointers to device arrays
float3 *d_particles = 0;
int *d_knn = 0;
allocate_host_memory(num_particles, num_neighbors, h_particles, h_knn, h_knn_checker);
allocate_device_memory(num_particles, num_neighbors, d_particles, d_knn);
// generate random input
// initialize
srand(13);
for(int i=0;i< num_particles;i++)
{
h_particles[i] = make_float3((float)rand()/(float)RAND_MAX,(float)rand()/(float)RAND_MAX,(float)rand()/(float)RAND_MAX);
}
// copy input to GPU
start_timer(&timer);
// TODO: your copy of input from host to device here
cudaMemcpy(d_particles,h_particles,num_particles*sizeof(float3),cudaMemcpyHostToDevice);
// cudaMemcpy(d_knn,h_knn,num_particles*num_neighbors*sizeof(int),cudaMemcpyHostToDevice);
stop_timer(&timer,"copy to gpu");
const size_t block_size = 512;
const size_t num_blocks = num_particles/block_size;
start_timer(&timer);
// TODO: your kernel launch which uses local memory here
device_find_knn_local<<<num_blocks,block_size>>>(d_particles, d_knn, num_particles) ;
check_cuda_error("brute force knn");
stop_timer(&timer,"brute force knn");
start_timer(&timer);
// TODO: your kernel launch which uses __shared__ memory here
device_find_knn_shared<<<num_blocks,block_size>>>(d_particles, d_knn, num_particles) ;
check_cuda_error("shared meme knn");
stop_timer(&timer,"shared mem knn");
// download and inspect the result on the host
start_timer(&timer);
// TODO: your copy of results from device to host here
cudaMemcpy(h_knn,d_knn,num_particles*num_neighbors*sizeof(int),cudaMemcpyDeviceToHost);
check_cuda_error("copy from gpu");
stop_timer(&timer,"copy back from gpu memory");
// generate reference output
start_timer(&timer);
host_find_knn<num_neighbors>(h_particles, h_knn_checker, num_particles);
stop_timer(&timer,"cpu brute force knn");
// check CUDA output versus reference output
cross_check_results(h_knn_checker, h_knn, num_particles, num_neighbors);
deallocate_host_memory(h_particles, h_knn, h_knn_checker);
deallocate_device_memory(d_particles, d_knn);
return 0;
}
|
f979e1bd4d1c281000d5df94988be0560f4ee5d7.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <rocblas.h>
#include <assert.h>
#include <float.h>
#include "blas.h"
#include "dark_cuda.h"
#include "utils.h"
#include "tree.h"
__inline__ __device__
float warpAllReduceSum(float val) {
for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2)
#if CUDART_VERSION >= 9000
val += __shfl_xor_sync(0xffffffff, val, mask);
#else
val += __shfl_xor(val, mask);
#endif
return val;
}
__global__ void compare_2_arrays_kernel(float *one, float *two, int size)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= size) return;
const float diff = 100 * fabs(one[index] - two[index]) / fabs(one[index]);
if (diff > 10) printf(" i: %d - one = %f, two = %f, diff = %f %% \n", index, one[index], two[index], diff);
}
void compare_2_arrays_gpu(float *one, float *two, int size)
{
const int num_blocks = get_number_of_blocks(size, BLOCK);
compare_2_arrays_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(one, two, size);
CHECK_CUDA(hipPeekAtLastError());
CHECK_CUDA(hipDeviceSynchronize());
}
__global__ void mean_array_kernel(float *src, int size, float alpha, float *avg)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i >= size) return;
avg[i] = avg[i] * (1 - alpha) + src[i] * alpha;
src[i] = avg[i];
}
void mean_array_gpu(float *src, int size, float alpha, float *avg)
{
const int num_blocks = get_number_of_blocks(size, BLOCK);
mean_array_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(src, size, alpha, avg);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void scale_bias_kernel(float *output, float *scale, int batch, int filters, int spatial, int current_size)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= current_size) return;
int f = (index / spatial) % filters;
output[index] *= scale[f];
}
void scale_bias_gpu(float *output, float *scale, int batch, int filters, int spatial)
{
const int current_size = batch * filters * spatial;
const int num_blocks = get_number_of_blocks(current_size, BLOCK);
scale_bias_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(output, scale, batch, filters, spatial, current_size);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
{
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; i += BLOCK){
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index]*x_norm[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i];
}
}
void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
{
hipLaunchKernelGGL(( backward_scale_kernel), dim3(n), dim3(BLOCK), 0, get_cuda_stream() , x_norm, delta, batch, n, size, scale_updates);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void add_bias_kernel(float *output, float *biases, int batch, int filters, int spatial, int current_size)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= current_size) return;
int f = (index / spatial) % filters;
output[index] += biases[f];
}
void add_bias_gpu(float *output, float *biases, int batch, int filters, int spatial)
{
const int current_size = batch * filters * spatial;
const int num_blocks = get_number_of_blocks(current_size, BLOCK);
add_bias_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(output, biases, batch, filters, spatial, current_size);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size)
{
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; i += BLOCK){
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i];
}
}
/*
__global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int f1 = index / n;
int f2 = index % n;
if (f2 <= f1) return;
float sum = 0;
float norm1 = 0;
float norm2 = 0;
int b, i;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
sum += output[i1] * output[i2];
norm1 += output[i1] * output[i1];
norm2 += output[i2] * output[i2];
}
}
norm1 = sqrt(norm1);
norm2 = sqrt(norm2);
float norm = norm1 * norm2;
sum = sum / norm;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
delta[i1] += - scale * sum * output[i2] / norm;
delta[i2] += - scale * sum * output[i1] / norm;
}
}
}
void dot_error_gpu(layer l)
{
dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK, 0, get_cuda_stream()>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
*/
void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size)
{
hipLaunchKernelGGL(( backward_bias_kernel), dim3(n), dim3(BLOCK), 0, get_cuda_stream() , bias_updates, delta, batch, n, size);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
float mhat = m[index] / (1.f - powf(B1, t));
float vhat = v[index] / (1.f - powf(B2, t));
x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps);
}
extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
adam_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(n, x, m, v, B1, B2, rate, eps, t);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t)
{
scal_ongpu(n, B1, m, 1);
scal_ongpu(n, B2, v, 1);
axpy_ongpu(n, -decay*batch, w, 1, d, 1);
axpy_ongpu(n, (1 - B1), d, 1, m, 1);
mul_ongpu(n, d, 1, d, 1);
axpy_ongpu(n, (1 - B2), d, 1, v, 1);
adam_gpu(n, w, m, v, B1, B2, rate, eps, t);
fill_ongpu(n, 0, d, 1);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index / spatial) % filters;
x[index] = (x[index] - mean[f]) / (sqrtf(variance[f] + .00001f));
}
extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
const int current_size = batch * filters * spatial;
const int num_blocks = get_number_of_blocks(current_size, BLOCK);
normalize_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(current_size, x, mean, variance, batch, filters, spatial);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
delta[index] = delta[index] * 1.F/(sqrtf(variance[f]) + .000001f) + variance_delta[f] * 2. * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
}
extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
{
size_t N = batch*filters*spatial;
hipLaunchKernelGGL(( normalize_delta_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream() , N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
variance_delta[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance_delta[i] += delta[index]*(x[index] - mean[i]);
}
}
variance_delta[i] *= -.5 * powf(variance[i] + .000001f, (float)(-3./2.));
}
__global__ void accumulate_kernel(float *x, int n, int groups, float *sum)
{
int k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= groups) return;
sum[i] = 0;
for(k = 0; k < n; ++k){
sum[i] += x[k*groups + i];
}
}
__global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index] : 0;
}
}
__syncthreads();
if(id == 0){
mean_delta[filter] = 0;
for(i = 0; i < threads; ++i){
mean_delta[filter] += local[i];
}
mean_delta[filter] *= (-1.F/sqrtf(variance[filter] + .000001f));
}
}
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
}
}
__syncthreads();
if(id == 0){
variance_delta[filter] = 0;
for(i = 0; i < threads; ++i){
variance_delta[filter] += local[i];
}
variance_delta[filter] *= -.5 * powf(variance[filter] + .000001f, (float)(-3./2.));
}
}
__global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean_delta[i] = 0;
for (j = 0; j < batch; ++j) {
for (k = 0; k < spatial; ++k) {
int index = j*filters*spatial + i*spatial + k;
mean_delta[i] += delta[index];
}
}
mean_delta[i] *= (-1.F/sqrtf(variance[i] + .000001f));
}
extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
hipLaunchKernelGGL(( mean_delta_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, get_cuda_stream() , delta, variance, batch, filters, spatial, mean_delta);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
hipLaunchKernelGGL(( fast_mean_delta_kernel), dim3(filters), dim3(BLOCK), 0, get_cuda_stream() , delta, variance, batch, filters, spatial, mean_delta);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
hipLaunchKernelGGL(( fast_variance_delta_kernel), dim3(filters), dim3(BLOCK), 0, get_cuda_stream() , x, delta, mean, variance, batch, filters, spatial, variance_delta);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
float scale = 1.F/(batch * spatial);
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
mean[i] += x[index];
}
}
mean[i] *= scale;
}
__global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
float scale = 1.F/(batch * spatial - 1);
int j,k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
variance[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance[i] += powf((x[index] - mean[i]), 2);
}
}
variance[i] *= scale;
}
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_index = i;
int in_w = i%w;
i = i/w;
int in_h = i%h;
i = i/h;
int in_c = i%c;
i = i/c;
int b = i%batch;
int out_c = c/(stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
//printf("%d\n", offset);
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
// printf("%d %d %d\n", w2, h2, c2);
//printf("%d %d\n", in_index, out_index);
//if(out_index >= N || out_index < 0) printf("bad bad bad \n");
if(forward) out[out_index] = x[in_index];
else out[in_index] = x[out_index];
//if(forward) out[1] = x[1];
//else out[0] = x[0];
}
__global__ void constrain_weight_updates_kernel(int N, float coef, float *weights_gpu, float *weight_updates_gpu)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) {
const float w = weights_gpu[i];
const float wu = weight_updates_gpu[i];
const float wu_sign = (wu == 0) ? 0 : (fabs(wu) / wu);
const float abs_limit = fabs(w * coef);
if (fabs(wu) > abs_limit) weight_updates_gpu[i] = abs_limit * wu_sign;
}
}
extern "C" void constrain_weight_updates_ongpu(int N, float coef, float *weights_gpu, float *weight_updates_gpu)
{
constrain_weight_updates_kernel << <cuda_gridsize(N), BLOCK, 0, get_cuda_stream() >> >(N, coef, weights_gpu, weight_updates_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX];
}
__global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] = powf(X[i*INCX], ALPHA);
}
__global__ void const_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX]));
}
__global__ void constrain_min_max_kernel(int N, float MIN, float MAX, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) X[i*INCX] = fminf(MAX, fmaxf(MIN, X[i*INCX]));
}
__global__ void supp_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) {
if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0;
}
}
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] *= ALPHA;
}
__global__ void scal_add_kernel(int N, float ALPHA, float BETA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) X[i*INCX] = X[i*INCX] * ALPHA + BETA;
}
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= N) return;
X[index*INCX] = ALPHA;
}
__global__ void mask_kernel_new_api(int n, float *x, float mask_num, float *mask, float val)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n && mask[i] == mask_num) x[i] = val;
}
__global__ void mask_kernel(int n, float *x, float mask_num, float *mask)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] = mask_num;
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
__global__ void simple_copy_kernel(int size, float *src, float *dst)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size)
dst[index] = src[index];
}
__global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] *= X[i*INCX];
}
__global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? x[index] : 0;
}
}
__syncthreads();
if(id == 0){
float mean_tmp = 0;
for(i = 0; i < threads; ++i){
mean_tmp += local[i];
}
mean_tmp /= spatial * batch;
mean[filter] = mean_tmp;
}
}
extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
fast_mean_kernel << <filters, BLOCK, 0, get_cuda_stream() >> >(x, batch, filters, spatial, mean);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0;
}
}
__syncthreads();
if(id == 0){
float variance_tmp = 0;
for(i = 0; i < threads; ++i){
variance_tmp += local[i];
}
variance_tmp /= (spatial * batch);// -1);
variance[filter] = variance_tmp;
}
}
extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
hipLaunchKernelGGL(( fast_variance_kernel), dim3(filters), dim3(BLOCK), 0, get_cuda_stream() , x, mean, batch, filters, spatial, variance);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void fast_v_cbn_kernel(const float *x, float *mean, int batch, int filters, int spatial, int minibatch_index, int max_minibatch_index, float *m_avg, float *v_avg, float *variance,
const float alpha, float *rolling_mean_gpu, float *rolling_variance_gpu, int inverse_variance, float epsilon)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for (j = 0; j < batch; ++j) {
for (i = 0; i < spatial; i += threads) {
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i + id < spatial) ? powf(x[index], 2) : 0;
}
}
__syncthreads();
if (id == 0) {
float v_tmp = 0;
v_tmp = 0;
for (i = 0; i < threads; ++i) {
v_tmp += local[i];
}
v_tmp /= (spatial * batch - 1);
v_tmp = fmax(v_tmp, powf(mean[filter], 2));
const float alpha_cbn = 1.0f / minibatch_index;
m_avg[filter] = alpha_cbn * mean[filter] + (1 - alpha_cbn) * m_avg[filter];
mean[filter] = m_avg[filter];
v_avg[filter] = alpha_cbn * v_tmp + (1 - alpha_cbn) * v_avg[filter];
float variance_tmp = fmax(0.0f, v_avg[filter] - powf(m_avg[filter], 2));
if (inverse_variance) variance[filter] = 1.0f / sqrtf(variance_tmp + epsilon);
else variance[filter] = variance_tmp;
//if (max_minibatch_index == minibatch_index)
{
if(rolling_mean_gpu) rolling_mean_gpu[filter] = alpha * mean[filter] + (1 - alpha) * rolling_mean_gpu[filter];
if(rolling_variance_gpu) rolling_variance_gpu[filter] = alpha * variance_tmp + (1 - alpha) * rolling_variance_gpu[filter];
}
}
}
extern "C" void fast_v_cbn_gpu(const float *x, float *mean, int batch, int filters, int spatial, int minibatch_index, int max_minibatch_index, float *m_avg, float *v_avg, float *variance,
const float alpha, float *rolling_mean_gpu, float *rolling_variance_gpu, int inverse_variance, float epsilon)
{
fast_v_cbn_kernel << <filters, BLOCK, 0, get_cuda_stream() >> >(x, mean, batch, filters, spatial, minibatch_index, max_minibatch_index, m_avg, v_avg, variance, alpha, rolling_mean_gpu, rolling_variance_gpu, inverse_variance, epsilon);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void inverse_variance_kernel(int size, float *src, float *dst, float epsilon)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size)
dst[index] = 1.0f / sqrtf(src[index] + epsilon);
}
extern "C" void inverse_variance_ongpu(int size, float *src, float *dst, float epsilon)
{
const int num_blocks = size / BLOCK + 1;
inverse_variance_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(size, src, dst, epsilon);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void normalize_scale_bias_kernel(int N, float *x, float *mean, float *variance, float *scales, float *biases, int batch, int filters, int spatial, int inverse_variance, float epsilon)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index / spatial) % filters;
float val = 0;
if(inverse_variance) val = (x[index] - mean[f]) * variance[f];
else val = (x[index] - mean[f]) / (sqrtf(variance[f] + epsilon));
val *= scales[f];
val += biases[f];
if (!isnan(val) && !isinf(val))
x[index] = val;
}
extern "C" void normalize_scale_bias_gpu(float *x, float *mean, float *variance, float *scales, float *biases, int batch, int filters, int spatial, int inverse_variance, float epsilon)
{
const int current_size = batch * filters * spatial;
const int num_blocks = get_number_of_blocks(current_size, BLOCK);
normalize_scale_bias_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(current_size, x, mean, variance, scales, biases, batch, filters, spatial, inverse_variance, epsilon);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
hipLaunchKernelGGL(( mean_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, get_cuda_stream() , x, batch, filters, spatial, mean);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
hipLaunchKernelGGL(( variance_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, get_cuda_stream() , x, mean, batch, filters, spatial, variance);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void axpy_ongpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
axpy_ongpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY);
}
extern "C" void pow_ongpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
hipLaunchKernelGGL(( pow_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream() , N, ALPHA, X, INCX, Y, INCY);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void axpy_ongpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
hipLaunchKernelGGL(( axpy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void copy_ongpu(int N, float * X, int INCX, float * Y, int INCY)
{
copy_ongpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
extern "C" void simple_copy_ongpu(int size, float *src, float *dst)
{
const int num_blocks = size / BLOCK + 1;
simple_copy_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(size, src, dst);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void memcpy_ongpu(void *dst, void *src, int size_bytes)
{
CHECK_CUDA(hipMemcpyAsync(dst, src, size_bytes, hipMemcpyDefault, get_cuda_stream()));
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void mul_ongpu(int N, float * X, int INCX, float * Y, int INCY)
{
hipLaunchKernelGGL(( mul_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream() , N, X, INCX, Y, INCY);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void copy_ongpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
hipLaunchKernelGGL(( copy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, X, OFFX, INCX, Y, OFFY, INCY);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_s = i%spatial;
i = i/spatial;
int in_c = i%layers;
i = i/layers;
int b = i;
int i1 = b*layers*spatial + in_c*spatial + in_s;
int i2 = b*layers*spatial + in_s*layers + in_c;
if (forward) out[i2] = x[i1];
else out[i1] = x[i2];
}
extern "C" void flatten_ongpu(float *x, int spatial, int layers, int batch, int forward, float *out)
{
int size = spatial*batch*layers;
hipLaunchKernelGGL(( flatten_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, get_cuda_stream(), size, x, spatial, layers, batch, forward, out);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void reorg_ongpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int size = w*h*c*batch;
hipLaunchKernelGGL(( reorg_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, get_cuda_stream(), size, x, w, h, c, batch, stride, forward, out);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void mask_gpu_new_api(int N, float * X, float mask_num, float * mask, float val)
{
hipLaunchKernelGGL(( mask_kernel_new_api) , dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream() , N, X, mask_num, mask, val);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void mask_ongpu(int N, float * X, float mask_num, float * mask)
{
hipLaunchKernelGGL(( mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream() , N, X, mask_num, mask);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void const_ongpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( const_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream() , N, ALPHA, X, INCX);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void constrain_ongpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( constrain_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream() , N, ALPHA, X, INCX);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void constrain_min_max_ongpu(int N, float MIN, float MAX, float * X, int INCX)
{
constrain_min_max_kernel << <cuda_gridsize(N), BLOCK, 0, get_cuda_stream() >> >(N, MIN, MAX, X, INCX);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void scal_ongpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( scal_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, ALPHA, X, INCX);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void scal_add_ongpu(int N, float ALPHA, float BETA, float * X, int INCX)
{
scal_add_kernel << <cuda_gridsize(N), BLOCK, 0, get_cuda_stream() >> >(N, ALPHA, BETA, X, INCX);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void supp_ongpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( supp_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream() , N, ALPHA, X, INCX);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void fill_ongpu(int N, float ALPHA, float * X, int INCX)
{
//fill_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>(N, ALPHA, X, INCX);
//CHECK_CUDA(hipPeekAtLastError());
fill_kernel << <get_number_of_blocks(N, BLOCK), BLOCK, 0, get_cuda_stream() >> >(N, ALPHA, X, INCX);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void gradient_centralization_kernel(int filters, int f_size, float *in)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int tid = index % WARP_SIZE;
const int f = index / WARP_SIZE;
if (f >= filters) return;
float mean = 0;
for (int i = 0; i < f_size; i += WARP_SIZE) {
mean += warpAllReduceSum(in[f*f_size + i + tid]);
}
mean = mean / f_size;
for (int i = 0; i < f_size; i += WARP_SIZE) {
in[f*f_size + i + tid] -= mean;
}
}
extern "C" void gradient_centralization_gpu(int w, int h, int c, int f, float *in)
{
const int size = f * WARP_SIZE;
const int f_size = c * h * w;
if (f_size % WARP_SIZE == 0) {
gradient_centralization_kernel << <get_number_of_blocks(size, BLOCK), BLOCK, 0, get_cuda_stream() >> > (f, f_size, in);
CHECK_CUDA(hipPeekAtLastError());
}
}
__device__ float relu(float src) {
if (src > 0) return src;
return 0;
}
__device__ float lrelu(float src) {
const float eps = 0.001;
if (src > eps) return src;
return eps;
}
__device__ float grad_relu(float src) {
return (src > 0);
}
__device__ float grad_lrelu(float src) {
const float eps = 0.001;
return (src > eps);
}
__global__ void shortcut_singlelayer_simple_kernel(int size, int src_outputs, int batch, int n, int *outputs_of_layers_gpu, float **layers_output_gpu, float *out, float *in, float *weights_gpu, int nweights, WEIGHTS_NORMALIZATION_T weights_normalization)
{
const int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int src_id = id;
const int src_i = src_id % src_outputs;
src_id /= src_outputs;
int src_b = src_id;
float out_val = in[id];
int add_outputs = outputs_of_layers_gpu[0];
if (src_i < add_outputs) {
int add_index = add_outputs*src_b + src_i;
float *add = layers_output_gpu[0];
out_val += add[add_index];
}
out[id] = out_val;
}
__global__ void shortcut_multilayer_kernel(int size, int src_outputs, int batch, int n, int *outputs_of_layers_gpu, float **layers_output_gpu, float *out, float *in, float *weights_gpu, int nweights, WEIGHTS_NORMALIZATION_T weights_normalization)
{
const int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
// nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w)
const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w)
int step = 0;
if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1
int src_id = id;
const int src_i = src_id % src_outputs;
src_id /= src_outputs;
int src_b = src_id;
float sum = 1, max_val = -FLT_MAX;
if (weights_gpu && weights_normalization) {
if (weights_normalization == SOFTMAX_NORMALIZATION) {
for (int i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
const float w = weights_gpu[weights_index];
if (max_val < w) max_val = w;
}
}
const float eps = 0.0001;
sum = eps;
for (int i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
const float w = weights_gpu[weights_index];
if (weights_normalization == RELU_NORMALIZATION) sum += lrelu(w);
else if (weights_normalization == SOFTMAX_NORMALIZATION) sum += expf(w - max_val);
}
}
float out_val = 0;
if (weights_gpu) {
float w = weights_gpu[src_i / step];
if (weights_normalization == RELU_NORMALIZATION) w = lrelu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
out_val = in[id] * w; // [0 or c or (c, h ,w)]
}
else out_val = in[id];
// layers
for (int i = 0; i < n; ++i) {
int add_outputs = outputs_of_layers_gpu[i];
if (src_i < add_outputs) {
int add_index = add_outputs*src_b + src_i;
float *add = layers_output_gpu[i];
if (weights_gpu) {
const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)]
float w = weights_gpu[weights_index];
if (weights_normalization == RELU_NORMALIZATION) w = lrelu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
out_val += add[add_index] * w; // [0 or c or (c, h ,w)]
}
else out_val += add[add_index];
}
}
out[id] = out_val;
}
extern "C" void shortcut_multilayer_gpu(int src_outputs, int batch, int n, int *outputs_of_layers_gpu, float **layers_output_gpu, float *out, float *in, float *weights_gpu, int nweights, WEIGHTS_NORMALIZATION_T weights_normalization)
{
//printf(" src_outputs = %d, batch = %d, n = %d \n", src_outputs, batch, n);
int size = batch * src_outputs;
if (nweights == 0 && n == 1) {
shortcut_singlelayer_simple_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> > (size, src_outputs, batch, n, outputs_of_layers_gpu, layers_output_gpu, out, in, weights_gpu, nweights, weights_normalization);
}
else {
shortcut_multilayer_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> > (size, src_outputs, batch, n, outputs_of_layers_gpu, layers_output_gpu, out, in, weights_gpu, nweights, weights_normalization);
}
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void backward_shortcut_multilayer_kernel(int size, int src_outputs, int batch, int n, int *outputs_of_layers_gpu,
float **layers_delta_gpu, float *delta_out, float *delta_in, float *weights_gpu, float *weight_updates_gpu, int nweights, float *in, float **layers_output_gpu, WEIGHTS_NORMALIZATION_T weights_normalization)
{
const int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
// nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w)
const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w)
int step = 0;
if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1
int src_id = id;
const int src_i = src_id % src_outputs;
src_id /= src_outputs;
int src_b = src_id;
float grad = 1, sum = 1, max_val = -FLT_MAX;
int i;
if (weights_gpu && weights_normalization) {
if (weights_normalization == SOFTMAX_NORMALIZATION) {
for (int i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
float w = weights_gpu[weights_index];
if (max_val < w) max_val = w;
}
}
const float eps = 0.0001;
sum = eps;
for (i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
const float w = weights_gpu[weights_index];
if (weights_normalization == RELU_NORMALIZATION) sum += lrelu(w);
else if (weights_normalization == SOFTMAX_NORMALIZATION) sum += expf(w - max_val);
}
}
if (weights_gpu) {
float w = weights_gpu[src_i / step];
if (weights_normalization == RELU_NORMALIZATION) w = lrelu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
if (weights_normalization == RELU_NORMALIZATION) grad = w;
else if (weights_normalization == SOFTMAX_NORMALIZATION) grad = w*(1-w);
delta_out[id] += delta_in[id] * w; // [0 or c or (c, h ,w)]
float weights_update_tmp = delta_in[id] * in[id] * grad;// / step;
if (layer_step == 1 && (size/32) > (id/32 + 1)) {
if (isnan(weights_update_tmp) || isinf(weights_update_tmp)) {
weights_update_tmp = 0;
}
float wu = warpAllReduceSum(weights_update_tmp);
if (threadIdx.x % 32 == 0) {
if (!isnan(wu) && !isinf(wu))
atomicAdd(&weight_updates_gpu[src_i / step], wu);
}
}
else {
if (!isnan(weights_update_tmp) && !isinf(weights_update_tmp))
atomicAdd(&weight_updates_gpu[src_i / step], weights_update_tmp);
//weight_updates_gpu[src_i / step] += weights_update_tmp;
}
}
else delta_out[id] += delta_in[id];
// layers
for (int i = 0; i < n; ++i) {
int add_outputs = outputs_of_layers_gpu[i];
if (src_i < add_outputs) {
int add_index = add_outputs*src_b + src_i;
int out_index = id;
float *layer_delta = layers_delta_gpu[i];
if (weights_gpu) {
float *add = layers_output_gpu[i];
const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)]
float w = weights_gpu[weights_index];
if (weights_normalization == RELU_NORMALIZATION) w = lrelu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
if (weights_normalization == RELU_NORMALIZATION) grad = w;
else if (weights_normalization == SOFTMAX_NORMALIZATION) grad = w*(1 - w);
layer_delta[add_index] += delta_in[id] * w;
float weights_update_tmp = delta_in[id] * add[add_index] * grad;// / step;
if (layer_step == 1 && (size / 32) > (id / 32 + 1)) {
if (isnan(weights_update_tmp) || isinf(weights_update_tmp)) {
weights_update_tmp = 0;
}
float wu = warpAllReduceSum(weights_update_tmp);
if (threadIdx.x % 32 == 0) {
if (!isnan(wu) && !isinf(wu))
atomicAdd(&weight_updates_gpu[weights_index], wu);
//if(weights_gpu[weights_index] != 1) printf(" wu = %f, weights_update_tmp = %f, w = %f, weights_gpu[weights_index] = %f, grad = %f, weights_normalization = %d ",
// wu, weights_update_tmp, w, weights_gpu[weights_index], grad, weights_normalization);
}
}
else {
if (!isnan(weights_update_tmp) && !isinf(weights_update_tmp))
atomicAdd(&weight_updates_gpu[weights_index], weights_update_tmp);
//weight_updates_gpu[weights_index] += weights_update_tmp;
}
}
else layer_delta[add_index] += delta_in[id];
}
}
}
extern "C" void backward_shortcut_multilayer_gpu(int src_outputs, int batch, int n, int *outputs_of_layers_gpu,
float **layers_delta_gpu, float *delta_out, float *delta_in, float *weights_gpu, float *weight_updates_gpu, int nweights, float *in, float **layers_output_gpu, WEIGHTS_NORMALIZATION_T weights_normalization)
{
const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w)
int step = 0;
if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1
//printf(" nweights = %d, n = %d, layer_step = %d, step = %d \n", nweights, n, layer_step, step);
//printf(" src_outputs = %d, batch = %d, n = %d \n", src_outputs, batch, n);
int size = batch * src_outputs;
backward_shortcut_multilayer_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> > (size, src_outputs, batch, n, outputs_of_layers_gpu,
layers_delta_gpu, delta_out, delta_in, weights_gpu, weight_updates_gpu, nweights, in, layers_output_gpu, weights_normalization);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] += add[add_index];
}
extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int stride = w1/w2;
int sample = w2/w1;
assert(stride == h1/h2);
assert(sample == h2/h1);
if(stride < 1) stride = 1;
if(sample < 1) sample = 1;
int size = batch * minw * minh * minc;
hipLaunchKernelGGL(( shortcut_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, get_cuda_stream(), size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void simple_input_shortcut_kernel(float *in, int size, float *add, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
out[id] = in[id] + add[id];
}
__global__ void input_shortcut_kernel(float *in, int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] = in[out_index] + add[add_index];
}
extern "C" void input_shortcut_gpu(float *in, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
if (w1 == w2 && h1 == h2 && c1 == c2) {
int size = batch * w1 * h1 * c1;
simple_input_shortcut_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> >(in, size, add, out);
CHECK_CUDA(hipPeekAtLastError());
return;
}
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int stride = w1 / w2;
int sample = w2 / w1;
assert(stride == h1 / h2);
assert(sample == h2 / h1);
if (stride < 1) stride = 1;
if (sample < 1) sample = 1;
int size = batch * minw * minh * minc;
//input_shortcut_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> >(in, size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out);
simple_copy_ongpu(w2 * h2 * c2 * batch, in, out);
shortcut_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> >(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
float abs_val = abs(diff);
if(abs_val < 1) {
error[i] = diff * diff;
delta[i] = diff;
}
else {
error[i] = 2*abs_val - 1;
delta[i] = (diff < 0) ? -1 : 1;
}
}
}
extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
hipLaunchKernelGGL(( smooth_l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, get_cuda_stream() , n, pred, truth, delta, error);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
float t = truth[i];
float p = pred[i];
error[i] = (t) ? -log(p) : 0;
delta[i] = t - p;
}
}
extern "C" void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
softmax_x_ent_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(n, pred, truth, delta, error);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
error[i] = diff * diff; //I know this is technically wrong, deal with it.
delta[i] = diff;
}
}
extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
hipLaunchKernelGGL(( l2_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, get_cuda_stream() , n, pred, truth, delta, error);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
}
}
extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c)
{
hipLaunchKernelGGL(( weighted_sum_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, get_cuda_stream() , num, a, b, s, c);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
if(da) da[i] += dc[i] * s[i];
db[i] += dc[i] * (1-s[i]);
ds[i] += dc[i] * a[i] + dc[i] * -b[i];
}
}
extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc)
{
hipLaunchKernelGGL(( weighted_delta_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, get_cuda_stream() , num, a, b, s, da, db, ds, dc);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void mult_add_into_kernel(int n, float *a, float *b, float *c)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
c[i] += a[i]*b[i];
}
}
extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c)
{
hipLaunchKernelGGL(( mult_add_into_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, get_cuda_stream() , num, a, b, c);
CHECK_CUDA(hipPeekAtLastError());
}
__device__ void softmax_device(int n, float *input, float temp, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for(i = 0; i < n; ++i){
int val = input[i];
largest = (val>largest) ? val : largest;
}
for(i = 0; i < n; ++i){
float e = exp(input[i]/temp - largest/temp);
sum += e;
output[i] = e;
}
for(i = 0; i < n; ++i){
output[i] /= sum;
}
}
__global__ void softmax_kernel(int n, int offset, int batch, float *input, float temp, float *output)
{
int b = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(b >= batch) return;
softmax_device(n, input + b*offset, temp, output + b*offset);
}
extern "C" void softmax_gpu(float *input, int n, int offset, int groups, float temp, float *output)
{
int inputs = n;
int batch = groups;
hipLaunchKernelGGL(( softmax_kernel), dim3(cuda_gridsize(batch)), dim3(BLOCK), 0, get_cuda_stream(), inputs, offset, batch, input, temp, output);
CHECK_CUDA(hipPeekAtLastError());
}
__device__ void softmax_device_new_api(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for (i = 0; i < n; ++i) {
int val = input[i*stride];
largest = (val>largest) ? val : largest;
}
for (i = 0; i < n; ++i) {
float e = expf(input[i*stride] / temp - largest / temp);
sum += e;
output[i*stride] = e;
}
for (i = 0; i < n; ++i) {
output[i*stride] /= sum;
}
}
__global__ void softmax_kernel_new_api(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= batch*groups) return;
int b = id / groups;
int g = id % groups;
softmax_device_new_api(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
}
extern "C" void softmax_gpu_new_api(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
softmax_kernel_new_api << <cuda_gridsize(batch*groups), BLOCK, 0, get_cuda_stream() >> >(input, n, batch, batch_offset, groups, group_offset, stride, temp, output);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int out_index = i;
int out_w = i % (w*stride);
i = i / (w*stride);
int out_h = i % (h*stride);
i = i / (h*stride);
int out_c = i%c;
i = i / c;
int b = i%batch;
int in_w = out_w / stride;
int in_h = out_h / stride;
int in_c = out_c;
int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w;
if (forward) out[out_index] += scale * x[in_index];
else atomicAdd(x + in_index, scale * out[out_index]);
}
extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t size = w*h*c*batch*stride*stride;
upsample_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> >(size, in, w, h, c, batch, stride, forward, scale, out);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= spatial*batch*groups) return;
int s = id % spatial;
id = id / spatial;
int g = id % groups;
int b = id / groups;
int goff = group_offset[g] * spatial;
int boff = b*stride;
softmax_device_new_api(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s);
}
extern "C" void softmax_tree_gpu(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier)
{
int *tree_groups_size = cuda_make_int_array_new_api(hier.group_size, hier.groups);
int *tree_groups_offset = cuda_make_int_array_new_api(hier.group_offset, hier.groups);
/*
static int *tree_groups_size = 0;
static int *tree_groups_offset = 0;
if(!tree_groups_size){
tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
}
*/
int num = spatial*batch*hier.groups;
hipLaunchKernelGGL(( softmax_tree_kernel) , dim3(cuda_gridsize(num)), dim3(BLOCK), 0, get_cuda_stream() , input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset);
CHECK_CUDA(hipPeekAtLastError());
cuda_free((float *)tree_groups_size);
cuda_free((float *)tree_groups_offset);
}
__global__ void fix_nan_and_inf_kernel(float *input, size_t size)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
float val = input[index];
if (isnan(val) || isinf(val)) {
input[index] = 1.0f / (fabs((float)index) + 1); // pseudo random value
}
}
}
extern "C" void fix_nan_and_inf(float *input, size_t size)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
fix_nan_and_inf_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> >(input, size);
CHECK_CUDA(hipPeekAtLastError());
//CHECK_CUDA(hipDeviceSynchronize());
}
__global__ void reset_nan_and_inf_kernel(float *input, size_t size)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
float val = input[index];
if (isnan(val) || isinf(val)) {
input[index] = 0;
}
}
}
extern "C" void reset_nan_and_inf(float *input, size_t size)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
reset_nan_and_inf_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> >(input, size);
CHECK_CUDA(hipPeekAtLastError());
//CHECK_CUDA(hipDeviceSynchronize());
}
__global__ void is_nan_or_inf_kernel(float *input, size_t size, int *pinned_return)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
float val = input[index];
if (isnan(val) || isinf(val))
*pinned_return = 1;
}
}
extern "C" int is_nan_or_inf(float *input, size_t size)
{
int *pinned_return;
CHECK_CUDA(hipHostMalloc(&pinned_return, sizeof(int), hipHostRegisterMapped));
*pinned_return = 0;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
is_nan_or_inf_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> >(input, size, pinned_return);
CHECK_CUDA(hipDeviceSynchronize());
int ret_val = *pinned_return;
CHECK_CUDA(hipHostFree(pinned_return));
return ret_val;
}
__global__ void add_3_arrays_activate_kernel(float *a1, float *a2, float *a3, size_t size, ACTIVATION a, float *dst)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
float val = 0;
if (a1) val += a1[index];
if (a2) val += a2[index];
if (a3) val += a3[index];
if (a == LOGISTIC) val = 1.f / (1.f + expf(-val));
else if (a == TANH) val = (2 / (1 + expf(-2 * val)) - 1);
else if (a == LEAKY) val = (val < 0) ? val*0.1 : val;
dst[index] = val;
}
}
extern "C" void add_3_arrays_activate(float *a1, float *a2, float *a3, size_t size, ACTIVATION a, float *dst)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
if (!(a == LOGISTIC || a == TANH || a == LEAKY || a == LINEAR)) {
printf(" add_3_arrays_activate() doesn't support activation %d, it supports only LOGISTIC and TANH \n", a);
exit(EXIT_FAILURE);
}
add_3_arrays_activate_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> >(a1, a2, a3, size, a, dst);
}
__global__ void sum_of_mults_kernel(float *a1, float *a2, float *b1, float *b2, size_t size, float *dst)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
dst[index] = a1[index] * a2[index] + b1[index] * b2[index];
}
}
extern "C" void sum_of_mults(float *a1, float *a2, float *b1, float *b2, size_t size, float *dst)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
sum_of_mults_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> >(a1, a2, b1, b2, size, dst);
}
__global__ void activate_and_mult_kernel(float *a1, float *a2, size_t size, ACTIVATION a, float *dst)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
float val = a1[index];
if (a == TANH) val = (2 / (1 + expf(-2 * val)) - 1);
else if (a == LEAKY) val = (val < 0) ? val*0.1 : val;
dst[index] = val * a2[index];
}
}
extern "C" void activate_and_mult(float *a1, float *a2, size_t size, ACTIVATION a, float *dst)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
if (!(a == TANH || a == LEAKY || a == LINEAR)) {
printf(" activat_and_mult() doesn't support activation %d, it supports only TANH \n", a);
exit(EXIT_FAILURE);
}
activate_and_mult_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> >(a1, a2, size, a, dst);
}
__global__ void scale_channels_kernel(float *in_w_h_c, int size, int channel_size, int batch_size, int scale_wh, float *scales_c, float *out)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
if (scale_wh) {
int osd_index = index % channel_size + (index / batch_size)*channel_size;
out[index] = in_w_h_c[index] * scales_c[osd_index];
}
else {
out[index] = in_w_h_c[index] * scales_c[index / channel_size];
}
}
}
extern "C" void scale_channels_gpu(float *in_w_h_c, int size, int channel_size, int batch_size, int scale_wh, float *scales_c, float *out)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
scale_channels_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> >(in_w_h_c, size, channel_size, batch_size, scale_wh, scales_c, out);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void backward_scale_channels_kernel(float *in_w_h_c_delta, int size, int channel_size, int batch_size, int scale_wh,
float *in_scales_c, float *out_from_delta,
float *in_from_output, float *out_state_delta)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
if (scale_wh)
{
int osd_index = index % channel_size + (index / batch_size)*channel_size;
//out_state_delta[osd_index] += in_w_h_c_delta[index] * in_from_output[index]; // l.delta * from (should be divided by channel_size?)
atomicAdd(&out_state_delta[osd_index], in_w_h_c_delta[index] * in_from_output[index] / channel_size); // l.delta * from
out_from_delta[index] += in_scales_c[osd_index] * in_w_h_c_delta[index]; // input * l.delta // atomic isn't required here
}
else {
int osd_index = index / channel_size;
//out_state_delta[osd_index] += in_w_h_c_delta[index] * in_from_output[index]; // l.delta * from (should be divided by channel_size?)
int warp_id = index / 32;
int index_warp_start = warp_id * 32;
int osd_index_warp_start = index_warp_start / channel_size;
int osd_index_warp_end = (index_warp_start + 31) / channel_size;
if (osd_index_warp_start == osd_index_warp_end) // all thread in warp process the same channel
{
float sum = warpAllReduceSum(in_w_h_c_delta[index] * in_from_output[index]); // l.delta * from
if (threadIdx.x % 32 == 0) {
atomicAdd(&out_state_delta[osd_index], sum);
//out_state_delta[osd_index] += sum;
}
}
else {
atomicAdd(&out_state_delta[osd_index], in_w_h_c_delta[index] * in_from_output[index]); // l.delta * from
}
out_from_delta[index] += in_scales_c[osd_index] * in_w_h_c_delta[index]; // input * l.delta // atomic isn't required here
}
}
}
extern "C" void backward_scale_channels_gpu(float *in_w_h_c_delta, int size, int channel_size, int batch_size, int scale_wh,
float *in_scales_c, float *out_from_delta,
float *in_from_output, float *out_state_delta)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
backward_scale_channels_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (in_w_h_c_delta, size, channel_size, batch_size, scale_wh,
in_scales_c, out_from_delta,
in_from_output, out_state_delta);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void sam_kernel(float *in_w_h_c, int size, int channel_size, float *scales_c, float *out)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
out[index] = in_w_h_c[index] * scales_c[index];
}
}
extern "C" void sam_gpu(float *in_w_h_c, int size, int channel_size, float *scales_c, float *out)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
sam_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> >(in_w_h_c, size, channel_size, scales_c, out);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void backward_sam_kernel(float *in_w_h_c_delta, int size, int channel_size,
float *in_scales_c, float *out_from_delta,
float *in_from_output, float *out_state_delta)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
out_state_delta[index] += in_w_h_c_delta[index] * in_from_output[index]; // l.delta * from (should be divided by channel_size?)
out_from_delta[index] += in_scales_c[index] * in_w_h_c_delta[index]; // input * l.delta
//out_state_delta[index] += in_w_h_c_delta[index];
//out_from_delta[index] = in_w_h_c_delta[index];
}
}
extern "C" void backward_sam_gpu(float *in_w_h_c_delta, int size, int channel_size,
float *in_scales_c, float *out_from_delta,
float *in_from_output, float *out_state_delta)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
backward_sam_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (in_w_h_c_delta, size, channel_size,
in_scales_c, out_from_delta,
in_from_output, out_state_delta);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void smooth_rotate_weights_kernel(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int kernel_size, int angle, int reverse)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int kernel_area = kernel_size * kernel_size;
const int i = index * kernel_area;
const int stage_step = (nweights / kernel_area) / 4; // 4 stages
const int stage_id = index / stage_step;
// nweights = (c / groups) * n * size * size;
// kernel_area = size*size
if (i < nweights)
{
// rotate left or right
if (reverse) angle = -angle;
const float cos_a = cosf(angle * 3.14159265 / 180);
const float sin_a = sinf(angle * 3.14159265 / 180);
const int x_c = kernel_size / 2;
const int y_c = kernel_size / 2;
float dropout_sum = 0;
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
// Xsource = x*cos(alpha) + y*sin(alpha)
// Ysource = -x*sin(alpha) + y*cos(alpha)
float x_s = x_c + (x - x_c)*cos_a + (y - y_c)*sin_a;
float y_s = y_c - (x - x_c)*sin_a + (y - y_c)*cos_a;
int x_0 = floorf(x_s); // round down
int x_1 = ceilf(x_s); // round up
if (x_0 == x_1) x_1 = x_0 + 1;
int y_0 = floorf(y_s);
int y_1 = ceilf(y_s);
if (y_0 == y_1) y_1 = y_0 + 1;
float c_x_0 = x_1 - x_s;
float c_x_1 = x_s - x_0;
float c_y_0 = y_1 - y_s;
float c_y_1 = y_s - y_0;
float val = 0;
if (x_0 >= 0 && x_0 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_0 + y_0*kernel_size + i] * c_x_0 * c_y_0;
else dropout_sum += c_x_0 * c_y_0;
if (x_1 >= 0 && x_1 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_1 + y_0*kernel_size + i] * c_x_1 * c_y_0;
else dropout_sum += c_x_1 * c_y_0;
if (x_0 >= 0 && x_0 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_0 + y_1*kernel_size + i] * c_x_0 * c_y_1;
else dropout_sum += c_x_0 * c_y_1;
if (x_1 >= 0 && x_1 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_1 + y_1*kernel_size + i] * c_x_1 * c_y_1;
else dropout_sum += c_x_1 * c_y_1;
weight_deform_gpu[x + y*kernel_size + i] = val;
}
}
// compensate for dropped items
const float coef = (kernel_size*kernel_size) / (kernel_size*kernel_size - dropout_sum);
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
weight_deform_gpu[x + y*kernel_size + i] *= coef;
}
}
}
}
extern "C" void smooth_rotate_weights_gpu(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int size, int angle, int reverse)
{
const int kernel_area = size*size;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(nweights / kernel_area, block_size);
smooth_rotate_weights_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (src_weight_gpu, weight_deform_gpu, nweights, n, size, angle, reverse);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void stretch_weights_kernel(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int kernel_size, float scale, int reverse)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int kernel_area = kernel_size * kernel_size;
const int i = index * kernel_area;
const int stage_step = (nweights / kernel_area) / 4; // 4 stages
const int stage_id = index / stage_step;
// nweights = (c / groups) * n * size * size;
// kernel_area = size*size
if (i < nweights)
{
if (stage_id == 0) {
// simple copy
for (int x = 0; x < kernel_size; ++x) {
for (int y = 0; y < kernel_size; ++y) {
weight_deform_gpu[x + y*kernel_size + i] = src_weight_gpu[x + y*kernel_size + i];
}
}
}
else if (stage_id > 0)
{
if (stage_id == 1) scale = 0.65;
else if (stage_id == 2) scale = 0.8;
else if (stage_id == 3) scale = 1.3;
if (reverse) scale = 1 / scale;
const int x_c = kernel_size / 2;
const int y_c = kernel_size / 2;
float dropout_sum = 0;
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
// Xsource = x_c + (x_d - x_c) / scale
// Ysource = y_c + (y_d - y_c) / scale
float x_s = x_c + (x - x_c) / scale;
float y_s = y_c + (y - y_c) / scale;
int x_0 = floorf(x_s); // round down
int x_1 = ceilf(x_s); // round up
if (x_0 == x_1) x_1 = x_0 + 1;
int y_0 = floorf(y_s);
int y_1 = ceilf(y_s);
if (y_0 == y_1) y_1 = y_0 + 1;
float c_x_0 = x_1 - x_s;
float c_x_1 = x_s - x_0;
float c_y_0 = y_1 - y_s;
float c_y_1 = y_s - y_0;
float val = 0;
if (x_0 >= 0 && x_0 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_0 + y_0*kernel_size + i] * c_x_0 * c_y_0;
else dropout_sum += c_x_0 * c_y_0;
if (x_1 >= 0 && x_1 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_1 + y_0*kernel_size + i] * c_x_1 * c_y_0;
else dropout_sum += c_x_1 * c_y_0;
if (x_0 >= 0 && x_0 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_0 + y_1*kernel_size + i] * c_x_0 * c_y_1;
else dropout_sum += c_x_0 * c_y_1;
if (x_1 >= 0 && x_1 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_1 + y_1*kernel_size + i] * c_x_1 * c_y_1;
else dropout_sum += c_x_1 * c_y_1;
weight_deform_gpu[x + y*kernel_size + i] = val;
}
}
// compensate for dropped items
//const float coef = (kernel_size*kernel_size) / (kernel_size*kernel_size - dropout_sum);
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
//if (scale < 1) weight_deform_gpu[x + y*kernel_size + i] /= scale;// *= coef;
weight_deform_gpu[x + y*kernel_size + i] /= scale;// *= coef;
}
}
}
}
}
extern "C" void stretch_weights_gpu(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int size, float scale, int reverse)
{
const int kernel_area = size*size;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(nweights / kernel_area, block_size);
stretch_weights_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (src_weight_gpu, weight_deform_gpu, nweights, n, size, scale, reverse);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void sway_and_flip_weights_kernel(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int kernel_size, int angle, int reverse)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int kernel_area = kernel_size * kernel_size;
const int i = index * kernel_area;
const int stage_step = (nweights / kernel_area) / 4; // 4 stages
const int stage_id = index / stage_step;
// nweights = (c / groups) * n * size * size;
// kernel_area = size*size
if (i < nweights)
{
if (stage_id == 0) {
// simple copy
for (int x = 0; x < kernel_size; ++x) {
for (int y = 0; y < kernel_size; ++y) {
weight_deform_gpu[x + y*kernel_size + i] = src_weight_gpu[x + y*kernel_size + i];
}
}
}
else if (stage_id == 1 || stage_id == 2)
{
// rotate left or right
if (stage_id == 2) angle = -angle;
if (reverse) angle = -angle;
const float cos_a = cosf(angle * 3.14159265 / 180);
const float sin_a = sinf(angle * 3.14159265 / 180);
const int x_c = kernel_size / 2;
const int y_c = kernel_size / 2;
float dropout_sum = 0;
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
// Xsource = x*cos(alpha) + y*sin(alpha)
// Ysource = -x*sin(alpha) + y*cos(alpha)
float x_s = x_c + (x - x_c)*cos_a + (y - y_c)*sin_a;
float y_s = y_c - (x - x_c)*sin_a + (y - y_c)*cos_a;
int x_0 = floorf(x_s); // round down
int x_1 = ceilf(x_s); // round up
if (x_0 == x_1) x_1 = x_0 + 1;
int y_0 = floorf(y_s);
int y_1 = ceilf(y_s);
if (y_0 == y_1) y_1 = y_0 + 1;
float c_x_0 = x_1 - x_s;
float c_x_1 = x_s - x_0;
float c_y_0 = y_1 - y_s;
float c_y_1 = y_s - y_0;
float val = 0;
if (x_0 >= 0 && x_0 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_0 + y_0*kernel_size + i] * c_x_0 * c_y_0;
else dropout_sum += c_x_0 * c_y_0;
if (x_1 >= 0 && x_1 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_1 + y_0*kernel_size + i] * c_x_1 * c_y_0;
else dropout_sum += c_x_1 * c_y_0;
if (x_0 >= 0 && x_0 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_0 + y_1*kernel_size + i] * c_x_0 * c_y_1;
else dropout_sum += c_x_0 * c_y_1;
if (x_1 >= 0 && x_1 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_1 + y_1*kernel_size + i] * c_x_1 * c_y_1;
else dropout_sum += c_x_1 * c_y_1;
weight_deform_gpu[x + y*kernel_size + i] = val;
}
}
// compensate for dropped items
const float coef = (kernel_size*kernel_size) / (kernel_size*kernel_size - dropout_sum);
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
weight_deform_gpu[x + y*kernel_size + i] *= coef;
}
}
}
else if (stage_id == 3)
{
// flip
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
weight_deform_gpu[(kernel_size - x - 1) + y*kernel_size + i] = src_weight_gpu[x + y*kernel_size + i];
}
}
}
}
}
extern "C" void sway_and_flip_weights_gpu(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int size, int angle, int reverse)
{
const int kernel_area = size*size;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(nweights / kernel_area, block_size);
sway_and_flip_weights_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (src_weight_gpu, weight_deform_gpu, nweights, n, size, angle, reverse);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void rotate_weights_kernel(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int kernel_size, int reverse)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int kernel_area = kernel_size * kernel_size;
const int i = index * kernel_area;
const int stage_step = (nweights / kernel_area) / 4; // 4 stages
const int stage_id = index / stage_step;
// nweights = (c / groups) * n * size * size;
// kernel_area = size*size
if (i < nweights)
{
// if(reverse)
if (stage_id == 0) {
// simple copy
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
const int src_i = x + y*kernel_size + i;
const int dst_i = x + y*kernel_size + i;
if (reverse) weight_deform_gpu[src_i] = src_weight_gpu[dst_i];
else weight_deform_gpu[dst_i] = src_weight_gpu[src_i];
}
}
}
else if (stage_id == 1)
{
// 90 degree clockwise rotation - 1
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
const int src_i = x + y*kernel_size + i;
const int dst_i = (kernel_size - 1 - y) + x*kernel_size + i;
if (reverse) weight_deform_gpu[src_i] = src_weight_gpu[dst_i];
else weight_deform_gpu[dst_i] = src_weight_gpu[src_i];
}
}
}
else if (stage_id == 2)
{
// 180 degree clockwise rotation - 2
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
const int src_i = x + y*kernel_size + i;
const int dst_i = (kernel_size - 1 - x) + (kernel_size - 1 - y)*kernel_size + i;
if (reverse) weight_deform_gpu[src_i] = src_weight_gpu[dst_i];
else weight_deform_gpu[dst_i] = src_weight_gpu[src_i];
}
}
}
else if (stage_id == 3)
{
// 270 degree clockwise rotation - 3
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
const int src_i = x + y*kernel_size + i;
const int dst_i = y + (kernel_size - 1 - x)*kernel_size + i;
if (reverse) weight_deform_gpu[src_i] = src_weight_gpu[dst_i];
else weight_deform_gpu[dst_i] = src_weight_gpu[src_i];
}
}
}
}
}
extern "C" void rotate_weights_gpu(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int size, int reverse)
{
const int kernel_area = size*size;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(nweights / kernel_area, block_size);
rotate_weights_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (src_weight_gpu, weight_deform_gpu, nweights, n, size, reverse);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void stretch_sway_flip_weights_kernel(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int kernel_size, float angle, int reverse)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int kernel_area = kernel_size * kernel_size;
const int i = index * kernel_area;
const int stage_step = (nweights / kernel_area) / 8; // 8 stages
const int stage_id = index / stage_step;
// nweights = (c / groups) * n * size * size;
// kernel_area = size*size
if (i < nweights)
{
if (stage_id == 0) {
// simple copy
for (int x = 0; x < kernel_size; ++x) {
for (int y = 0; y < kernel_size; ++y) {
weight_deform_gpu[x + y*kernel_size + i] = src_weight_gpu[x + y*kernel_size + i];
}
}
}
else if (stage_id == 1 || stage_id == 2 || stage_id == 3 || stage_id == 4)
{
float scale = 0.5;
if (stage_id == 1) scale = 0.65;
else if (stage_id == 2) scale = 0.8;
else if (stage_id == 3) scale = 1.2;
else if (stage_id == 4) scale = 1.4;
if (reverse) scale = 1 / scale;
const int x_c = kernel_size / 2;
const int y_c = kernel_size / 2;
float dropout_sum = 0;
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
// Xsource = x_c + (x_d - x_c) / scale
// Ysource = y_c + (y_d - y_c) / scale
float x_s = x_c + (x - x_c) / scale;
float y_s = y_c + (y - y_c) / scale;
int x_0 = floorf(x_s); // round down
int x_1 = ceilf(x_s); // round up
if (x_0 == x_1) x_1 = x_0 + 1;
int y_0 = floorf(y_s);
int y_1 = ceilf(y_s);
if (y_0 == y_1) y_1 = y_0 + 1;
float c_x_0 = x_1 - x_s;
float c_x_1 = x_s - x_0;
float c_y_0 = y_1 - y_s;
float c_y_1 = y_s - y_0;
float val = 0;
if (x_0 >= 0 && x_0 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_0 + y_0*kernel_size + i] * c_x_0 * c_y_0;
else dropout_sum += c_x_0 * c_y_0;
if (x_1 >= 0 && x_1 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_1 + y_0*kernel_size + i] * c_x_1 * c_y_0;
else dropout_sum += c_x_1 * c_y_0;
if (x_0 >= 0 && x_0 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_0 + y_1*kernel_size + i] * c_x_0 * c_y_1;
else dropout_sum += c_x_0 * c_y_1;
if (x_1 >= 0 && x_1 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_1 + y_1*kernel_size + i] * c_x_1 * c_y_1;
else dropout_sum += c_x_1 * c_y_1;
weight_deform_gpu[x + y*kernel_size + i] = val;
}
}
// compensate for dropped items
//const float coef = (kernel_size*kernel_size) / (kernel_size*kernel_size - dropout_sum);
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
if(scale > 1)
weight_deform_gpu[x + y*kernel_size + i] /= scale;// *= coef;
}
}
}
else if (stage_id == 5 || stage_id == 6)
{
// rotate left or right
if (stage_id == 6) angle = -angle;
if (reverse) angle = -angle;
const float cos_a = cosf(angle * 3.14159265 / 180);
const float sin_a = sinf(angle * 3.14159265 / 180);
const int x_c = kernel_size / 2;
const int y_c = kernel_size / 2;
float dropout_sum = 0;
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
// Xsource = x*cos(alpha) + y*sin(alpha)
// Ysource = -x*sin(alpha) + y*cos(alpha)
float x_s = x_c + (x - x_c)*cos_a + (y - y_c)*sin_a;
float y_s = y_c - (x - x_c)*sin_a + (y - y_c)*cos_a;
int x_0 = floorf(x_s); // round down
int x_1 = ceilf(x_s); // round up
if (x_0 == x_1) x_1 = x_0 + 1;
int y_0 = floorf(y_s);
int y_1 = ceilf(y_s);
if (y_0 == y_1) y_1 = y_0 + 1;
float c_x_0 = x_1 - x_s;
float c_x_1 = x_s - x_0;
float c_y_0 = y_1 - y_s;
float c_y_1 = y_s - y_0;
float val = 0;
if (x_0 >= 0 && x_0 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_0 + y_0*kernel_size + i] * c_x_0 * c_y_0;
else dropout_sum += c_x_0 * c_y_0;
if (x_1 >= 0 && x_1 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_1 + y_0*kernel_size + i] * c_x_1 * c_y_0;
else dropout_sum += c_x_1 * c_y_0;
if (x_0 >= 0 && x_0 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_0 + y_1*kernel_size + i] * c_x_0 * c_y_1;
else dropout_sum += c_x_0 * c_y_1;
if (x_1 >= 0 && x_1 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_1 + y_1*kernel_size + i] * c_x_1 * c_y_1;
else dropout_sum += c_x_1 * c_y_1;
weight_deform_gpu[x + y*kernel_size + i] = val;
}
}
// compensate for dropped items
const float coef = (kernel_size*kernel_size) / (kernel_size*kernel_size - dropout_sum);
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
weight_deform_gpu[x + y*kernel_size + i] *= coef;
}
}
}
else if (stage_id == 7)
{
// flip
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
weight_deform_gpu[(kernel_size - x - 1) + y*kernel_size + i] = src_weight_gpu[x + y*kernel_size + i];
}
}
}
}
}
extern "C" void stretch_sway_flip_weights_gpu(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int size, int angle, int reverse)
{
const int kernel_area = size*size;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(nweights / kernel_area, block_size);
stretch_sway_flip_weights_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (src_weight_gpu, weight_deform_gpu, nweights, n, size, angle, reverse);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void reduce_and_expand_array_kernel(const float *src_gpu, float *dst_gpu, int current_size, int groups)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < current_size) {
float val = 0;
for (int i = 0; i < groups; ++i) {
val += src_gpu[index + i*current_size];
}
for (int i = 0; i < groups; ++i) {
dst_gpu[index + i*current_size] = val / groups;
}
}
}
extern "C" void reduce_and_expand_array_gpu(const float *src_gpu, float *dst_gpu, int size, int groups)
{
const int current_size = size / groups;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(current_size, block_size);
reduce_and_expand_array_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (src_gpu, dst_gpu, current_size, groups);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void expand_array_kernel(const float *src_gpu, float *dst_gpu, int current_size, int groups)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < current_size) {
for (int i = 0; i < groups; ++i) {
dst_gpu[index + i*current_size] = src_gpu[index];
}
}
}
extern "C" void expand_array_gpu(const float *src_gpu, float *dst_gpu, int size, int groups)
{
const int current_size = size / groups;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(current_size, block_size);
expand_array_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (src_gpu, dst_gpu, current_size, groups);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void mult_inverse_array_kernel(const float *src_gpu, float *dst_gpu, int size, const float eps,
float divider, const float clip, const float abs_add)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
float val = src_gpu[index];
float sign = (val < 0) ? -1 : 1;
// eps = 1 by default
// eps = 2 - lower delta
// eps = 0 - higher delta (linear)
// eps = -1 - high delta (inverse number)
// = (abs(x)*10+1)^(-1)
float unsigned_val = powf(fabs(val)*10 + abs_add, eps);
unsigned_val = unsigned_val / divider;
if (unsigned_val > clip && clip != 0.0) unsigned_val = clip;
if (isnan(unsigned_val) || isinf(unsigned_val)) unsigned_val = 0;
dst_gpu[index] = unsigned_val * sign;
}
}
extern "C" void mult_inverse_array_gpu(const float *src_gpu, float *dst_gpu, int size, float eps, float divider, float clip, float abs_add)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
mult_inverse_array_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (src_gpu, dst_gpu, size, eps, divider, clip, abs_add);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void P_constrastive_f_det_kernel(int *labels, unsigned int feature_size, float temperature, contrastive_params *contrast_p, const int contrast_p_size)
{
const int il = blockIdx.x*blockDim.x + threadIdx.x;
if (il < contrast_p_size) {
const float sim = contrast_p[il].sim;
const size_t i = contrast_p[il].i;
const size_t j = contrast_p[il].j;
const float numerator = expf(sim / temperature);
float denominator = 0;
int k;
for (k = 0; k < contrast_p_size; ++k) {
contrastive_params cp = contrast_p[k];
//if (k != i && labels[k] != labels[i]) {
//if (k != i) {
if (cp.i != i && cp.j == j) {
//const float sim_den = cp.sim;
////const float sim_den = find_sim(k, l, contrast_p, contrast_p_size); // cosine_similarity(z[k], z[l], feature_size);
//denominator += expf(sim_den / temperature);
denominator += cp.exp_sim;
}
}
float result = 0.9999;
if (denominator != 0) result = numerator / denominator;
if (result > 1) result = 0.9999;
contrast_p[il].P = result;
}
}
extern "C" void P_constrastive_f_det_gpu(int *labels, unsigned int feature_size, float temperature, contrastive_params *contrast_p, const int contrast_p_size)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(contrast_p_size, block_size);
P_constrastive_f_det_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (labels, feature_size, temperature, contrast_p, contrast_p_size);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void coord_conv_kernel(float *dst, int w, int h, int chan, int batch, int type)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
const int x = i % w;
i = i / w;
const int y = i % h;
i = i / h;
const int c = i % chan;
//i = i / chan;
//const int b = i % batch;
if (type == 0) {
if (c == 0) {
const float x_val = (2.0f * x) / w - 1.0f; // [-1; 1)
dst[i] = x_val; // x - coord
}
else if (c == 1) {
const float y_val = (2.0f * y) / h - 1.0f; // [-1; 1)
dst[i] = y_val; // y - coord
}
else if (c == 2) {
const float x_val = (2.0f * x) / w - 1.0f; // [-1; 1)
const float y_val = (2.0f * y) / h - 1.0f; // [-1; 1)
const float rad_val = sqrtf(x_val*x_val + y_val*y_val); // [0; 1.414)
dst[i] = rad_val; // rad - coord
}
}
else if (type == 1) {
if (c >= 0 && c <= 2) {
dst[i] = 0;
}
}
}
extern "C" void coord_conv_gpu(float *dst, int size, int w, int h, int chan, int b, int type)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
coord_conv_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (dst, w, h, chan, b, type);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void forward_implicit_kernel(int size, int batch, int nweights, float *weight_gpu, float *output_gpu)
{
const int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
output_gpu[id] = weight_gpu[id % nweights];
}
extern "C" void forward_implicit_gpu(int batch, int nweights, float *weight_gpu, float *output_gpu)
{
int size = batch * nweights;
forward_implicit_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> > (size, batch, nweights, weight_gpu, output_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void backward_implicit_kernel(int size, int batch, int nweights, float *weight_updates_gpu, float *delta_gpu)
{
const int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
for (int i = 0; i < batch; ++i) {
weight_updates_gpu[id] += delta_gpu[id + i * nweights];
}
}
extern "C" void backward_implicit_gpu(int batch, int nweights, float *weight_updates_gpu, float *delta_gpu)
{
int size = nweights;
backward_implicit_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> > (size, batch, nweights, weight_updates_gpu, delta_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
| f979e1bd4d1c281000d5df94988be0560f4ee5d7.cu | #include <cuda_runtime.h>
#include <curand.h>
#include <cublas_v2.h>
#include <assert.h>
#include <float.h>
#include "blas.h"
#include "dark_cuda.h"
#include "utils.h"
#include "tree.h"
__inline__ __device__
float warpAllReduceSum(float val) {
for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2)
#if CUDART_VERSION >= 9000
val += __shfl_xor_sync(0xffffffff, val, mask);
#else
val += __shfl_xor(val, mask);
#endif
return val;
}
__global__ void compare_2_arrays_kernel(float *one, float *two, int size)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= size) return;
const float diff = 100 * fabs(one[index] - two[index]) / fabs(one[index]);
if (diff > 10) printf(" i: %d - one = %f, two = %f, diff = %f %% \n", index, one[index], two[index], diff);
}
void compare_2_arrays_gpu(float *one, float *two, int size)
{
const int num_blocks = get_number_of_blocks(size, BLOCK);
compare_2_arrays_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(one, two, size);
CHECK_CUDA(cudaPeekAtLastError());
CHECK_CUDA(cudaDeviceSynchronize());
}
__global__ void mean_array_kernel(float *src, int size, float alpha, float *avg)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i >= size) return;
avg[i] = avg[i] * (1 - alpha) + src[i] * alpha;
src[i] = avg[i];
}
void mean_array_gpu(float *src, int size, float alpha, float *avg)
{
const int num_blocks = get_number_of_blocks(size, BLOCK);
mean_array_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(src, size, alpha, avg);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void scale_bias_kernel(float *output, float *scale, int batch, int filters, int spatial, int current_size)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= current_size) return;
int f = (index / spatial) % filters;
output[index] *= scale[f];
}
void scale_bias_gpu(float *output, float *scale, int batch, int filters, int spatial)
{
const int current_size = batch * filters * spatial;
const int num_blocks = get_number_of_blocks(current_size, BLOCK);
scale_bias_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(output, scale, batch, filters, spatial, current_size);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
{
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; i += BLOCK){
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index]*x_norm[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i];
}
}
void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
{
backward_scale_kernel<<<n, BLOCK, 0, get_cuda_stream() >>>(x_norm, delta, batch, n, size, scale_updates);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void add_bias_kernel(float *output, float *biases, int batch, int filters, int spatial, int current_size)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= current_size) return;
int f = (index / spatial) % filters;
output[index] += biases[f];
}
void add_bias_gpu(float *output, float *biases, int batch, int filters, int spatial)
{
const int current_size = batch * filters * spatial;
const int num_blocks = get_number_of_blocks(current_size, BLOCK);
add_bias_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(output, biases, batch, filters, spatial, current_size);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size)
{
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; i += BLOCK){
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i];
}
}
/*
__global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int f1 = index / n;
int f2 = index % n;
if (f2 <= f1) return;
float sum = 0;
float norm1 = 0;
float norm2 = 0;
int b, i;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
sum += output[i1] * output[i2];
norm1 += output[i1] * output[i1];
norm2 += output[i2] * output[i2];
}
}
norm1 = sqrt(norm1);
norm2 = sqrt(norm2);
float norm = norm1 * norm2;
sum = sum / norm;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
delta[i1] += - scale * sum * output[i2] / norm;
delta[i2] += - scale * sum * output[i1] / norm;
}
}
}
void dot_error_gpu(layer l)
{
dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK, 0, get_cuda_stream()>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
*/
void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size)
{
backward_bias_kernel<<<n, BLOCK, 0, get_cuda_stream() >>>(bias_updates, delta, batch, n, size);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
float mhat = m[index] / (1.f - powf(B1, t));
float vhat = v[index] / (1.f - powf(B2, t));
x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps);
}
extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
adam_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(n, x, m, v, B1, B2, rate, eps, t);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t)
{
scal_ongpu(n, B1, m, 1);
scal_ongpu(n, B2, v, 1);
axpy_ongpu(n, -decay*batch, w, 1, d, 1);
axpy_ongpu(n, (1 - B1), d, 1, m, 1);
mul_ongpu(n, d, 1, d, 1);
axpy_ongpu(n, (1 - B2), d, 1, v, 1);
adam_gpu(n, w, m, v, B1, B2, rate, eps, t);
fill_ongpu(n, 0, d, 1);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index / spatial) % filters;
x[index] = (x[index] - mean[f]) / (sqrtf(variance[f] + .00001f));
}
extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
const int current_size = batch * filters * spatial;
const int num_blocks = get_number_of_blocks(current_size, BLOCK);
normalize_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(current_size, x, mean, variance, batch, filters, spatial);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
delta[index] = delta[index] * 1.F/(sqrtf(variance[f]) + .000001f) + variance_delta[f] * 2. * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
}
extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
{
size_t N = batch*filters*spatial;
normalize_delta_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream() >>>(N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
variance_delta[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance_delta[i] += delta[index]*(x[index] - mean[i]);
}
}
variance_delta[i] *= -.5 * powf(variance[i] + .000001f, (float)(-3./2.));
}
__global__ void accumulate_kernel(float *x, int n, int groups, float *sum)
{
int k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= groups) return;
sum[i] = 0;
for(k = 0; k < n; ++k){
sum[i] += x[k*groups + i];
}
}
__global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index] : 0;
}
}
__syncthreads();
if(id == 0){
mean_delta[filter] = 0;
for(i = 0; i < threads; ++i){
mean_delta[filter] += local[i];
}
mean_delta[filter] *= (-1.F/sqrtf(variance[filter] + .000001f));
}
}
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
}
}
__syncthreads();
if(id == 0){
variance_delta[filter] = 0;
for(i = 0; i < threads; ++i){
variance_delta[filter] += local[i];
}
variance_delta[filter] *= -.5 * powf(variance[filter] + .000001f, (float)(-3./2.));
}
}
__global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean_delta[i] = 0;
for (j = 0; j < batch; ++j) {
for (k = 0; k < spatial; ++k) {
int index = j*filters*spatial + i*spatial + k;
mean_delta[i] += delta[index];
}
}
mean_delta[i] *= (-1.F/sqrtf(variance[i] + .000001f));
}
extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
mean_delta_kernel<<<cuda_gridsize(filters), BLOCK, 0, get_cuda_stream() >>>(delta, variance, batch, filters, spatial, mean_delta);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
fast_mean_delta_kernel<<<filters, BLOCK, 0, get_cuda_stream() >>>(delta, variance, batch, filters, spatial, mean_delta);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
fast_variance_delta_kernel<<<filters, BLOCK, 0, get_cuda_stream() >>>(x, delta, mean, variance, batch, filters, spatial, variance_delta);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
float scale = 1.F/(batch * spatial);
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
mean[i] += x[index];
}
}
mean[i] *= scale;
}
__global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
float scale = 1.F/(batch * spatial - 1);
int j,k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
variance[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance[i] += powf((x[index] - mean[i]), 2);
}
}
variance[i] *= scale;
}
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_index = i;
int in_w = i%w;
i = i/w;
int in_h = i%h;
i = i/h;
int in_c = i%c;
i = i/c;
int b = i%batch;
int out_c = c/(stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
//printf("%d\n", offset);
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
// printf("%d %d %d\n", w2, h2, c2);
//printf("%d %d\n", in_index, out_index);
//if(out_index >= N || out_index < 0) printf("bad bad bad \n");
if(forward) out[out_index] = x[in_index];
else out[in_index] = x[out_index];
//if(forward) out[1] = x[1];
//else out[0] = x[0];
}
__global__ void constrain_weight_updates_kernel(int N, float coef, float *weights_gpu, float *weight_updates_gpu)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) {
const float w = weights_gpu[i];
const float wu = weight_updates_gpu[i];
const float wu_sign = (wu == 0) ? 0 : (fabs(wu) / wu);
const float abs_limit = fabs(w * coef);
if (fabs(wu) > abs_limit) weight_updates_gpu[i] = abs_limit * wu_sign;
}
}
extern "C" void constrain_weight_updates_ongpu(int N, float coef, float *weights_gpu, float *weight_updates_gpu)
{
constrain_weight_updates_kernel << <cuda_gridsize(N), BLOCK, 0, get_cuda_stream() >> >(N, coef, weights_gpu, weight_updates_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX];
}
__global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] = powf(X[i*INCX], ALPHA);
}
__global__ void const_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX]));
}
__global__ void constrain_min_max_kernel(int N, float MIN, float MAX, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) X[i*INCX] = fminf(MAX, fmaxf(MIN, X[i*INCX]));
}
__global__ void supp_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) {
if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0;
}
}
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] *= ALPHA;
}
__global__ void scal_add_kernel(int N, float ALPHA, float BETA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) X[i*INCX] = X[i*INCX] * ALPHA + BETA;
}
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= N) return;
X[index*INCX] = ALPHA;
}
__global__ void mask_kernel_new_api(int n, float *x, float mask_num, float *mask, float val)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n && mask[i] == mask_num) x[i] = val;
}
__global__ void mask_kernel(int n, float *x, float mask_num, float *mask)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] = mask_num;
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
__global__ void simple_copy_kernel(int size, float *src, float *dst)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size)
dst[index] = src[index];
}
__global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] *= X[i*INCX];
}
__global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? x[index] : 0;
}
}
__syncthreads();
if(id == 0){
float mean_tmp = 0;
for(i = 0; i < threads; ++i){
mean_tmp += local[i];
}
mean_tmp /= spatial * batch;
mean[filter] = mean_tmp;
}
}
extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
fast_mean_kernel << <filters, BLOCK, 0, get_cuda_stream() >> >(x, batch, filters, spatial, mean);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0;
}
}
__syncthreads();
if(id == 0){
float variance_tmp = 0;
for(i = 0; i < threads; ++i){
variance_tmp += local[i];
}
variance_tmp /= (spatial * batch);// -1);
variance[filter] = variance_tmp;
}
}
extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
fast_variance_kernel<<<filters, BLOCK, 0, get_cuda_stream() >>>(x, mean, batch, filters, spatial, variance);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void fast_v_cbn_kernel(const float *x, float *mean, int batch, int filters, int spatial, int minibatch_index, int max_minibatch_index, float *m_avg, float *v_avg, float *variance,
const float alpha, float *rolling_mean_gpu, float *rolling_variance_gpu, int inverse_variance, float epsilon)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for (j = 0; j < batch; ++j) {
for (i = 0; i < spatial; i += threads) {
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i + id < spatial) ? powf(x[index], 2) : 0;
}
}
__syncthreads();
if (id == 0) {
float v_tmp = 0;
v_tmp = 0;
for (i = 0; i < threads; ++i) {
v_tmp += local[i];
}
v_tmp /= (spatial * batch - 1);
v_tmp = fmax(v_tmp, powf(mean[filter], 2));
const float alpha_cbn = 1.0f / minibatch_index;
m_avg[filter] = alpha_cbn * mean[filter] + (1 - alpha_cbn) * m_avg[filter];
mean[filter] = m_avg[filter];
v_avg[filter] = alpha_cbn * v_tmp + (1 - alpha_cbn) * v_avg[filter];
float variance_tmp = fmax(0.0f, v_avg[filter] - powf(m_avg[filter], 2));
if (inverse_variance) variance[filter] = 1.0f / sqrtf(variance_tmp + epsilon);
else variance[filter] = variance_tmp;
//if (max_minibatch_index == minibatch_index)
{
if(rolling_mean_gpu) rolling_mean_gpu[filter] = alpha * mean[filter] + (1 - alpha) * rolling_mean_gpu[filter];
if(rolling_variance_gpu) rolling_variance_gpu[filter] = alpha * variance_tmp + (1 - alpha) * rolling_variance_gpu[filter];
}
}
}
extern "C" void fast_v_cbn_gpu(const float *x, float *mean, int batch, int filters, int spatial, int minibatch_index, int max_minibatch_index, float *m_avg, float *v_avg, float *variance,
const float alpha, float *rolling_mean_gpu, float *rolling_variance_gpu, int inverse_variance, float epsilon)
{
fast_v_cbn_kernel << <filters, BLOCK, 0, get_cuda_stream() >> >(x, mean, batch, filters, spatial, minibatch_index, max_minibatch_index, m_avg, v_avg, variance, alpha, rolling_mean_gpu, rolling_variance_gpu, inverse_variance, epsilon);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void inverse_variance_kernel(int size, float *src, float *dst, float epsilon)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size)
dst[index] = 1.0f / sqrtf(src[index] + epsilon);
}
extern "C" void inverse_variance_ongpu(int size, float *src, float *dst, float epsilon)
{
const int num_blocks = size / BLOCK + 1;
inverse_variance_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(size, src, dst, epsilon);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void normalize_scale_bias_kernel(int N, float *x, float *mean, float *variance, float *scales, float *biases, int batch, int filters, int spatial, int inverse_variance, float epsilon)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index / spatial) % filters;
float val = 0;
if(inverse_variance) val = (x[index] - mean[f]) * variance[f];
else val = (x[index] - mean[f]) / (sqrtf(variance[f] + epsilon));
val *= scales[f];
val += biases[f];
if (!isnan(val) && !isinf(val))
x[index] = val;
}
extern "C" void normalize_scale_bias_gpu(float *x, float *mean, float *variance, float *scales, float *biases, int batch, int filters, int spatial, int inverse_variance, float epsilon)
{
const int current_size = batch * filters * spatial;
const int num_blocks = get_number_of_blocks(current_size, BLOCK);
normalize_scale_bias_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(current_size, x, mean, variance, scales, biases, batch, filters, spatial, inverse_variance, epsilon);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
mean_kernel<<<cuda_gridsize(filters), BLOCK, 0, get_cuda_stream() >>>(x, batch, filters, spatial, mean);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
variance_kernel<<<cuda_gridsize(filters), BLOCK, 0, get_cuda_stream() >>>(x, mean, batch, filters, spatial, variance);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void axpy_ongpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
axpy_ongpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY);
}
extern "C" void pow_ongpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
pow_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream() >>>(N, ALPHA, X, INCX, Y, INCY);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void axpy_ongpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
axpy_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>(N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void copy_ongpu(int N, float * X, int INCX, float * Y, int INCY)
{
copy_ongpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
extern "C" void simple_copy_ongpu(int size, float *src, float *dst)
{
const int num_blocks = size / BLOCK + 1;
simple_copy_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(size, src, dst);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void memcpy_ongpu(void *dst, void *src, int size_bytes)
{
CHECK_CUDA(cudaMemcpyAsync(dst, src, size_bytes, cudaMemcpyDefault, get_cuda_stream()));
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void mul_ongpu(int N, float * X, int INCX, float * Y, int INCY)
{
mul_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream() >>>(N, X, INCX, Y, INCY);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void copy_ongpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
copy_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>(N, X, OFFX, INCX, Y, OFFY, INCY);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_s = i%spatial;
i = i/spatial;
int in_c = i%layers;
i = i/layers;
int b = i;
int i1 = b*layers*spatial + in_c*spatial + in_s;
int i2 = b*layers*spatial + in_s*layers + in_c;
if (forward) out[i2] = x[i1];
else out[i1] = x[i2];
}
extern "C" void flatten_ongpu(float *x, int spatial, int layers, int batch, int forward, float *out)
{
int size = spatial*batch*layers;
flatten_kernel<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream()>>>(size, x, spatial, layers, batch, forward, out);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void reorg_ongpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int size = w*h*c*batch;
reorg_kernel<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream()>>>(size, x, w, h, c, batch, stride, forward, out);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void mask_gpu_new_api(int N, float * X, float mask_num, float * mask, float val)
{
mask_kernel_new_api <<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream() >>>(N, X, mask_num, mask, val);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void mask_ongpu(int N, float * X, float mask_num, float * mask)
{
mask_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream() >>>(N, X, mask_num, mask);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void const_ongpu(int N, float ALPHA, float * X, int INCX)
{
const_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream() >>>(N, ALPHA, X, INCX);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void constrain_ongpu(int N, float ALPHA, float * X, int INCX)
{
constrain_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream() >>>(N, ALPHA, X, INCX);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void constrain_min_max_ongpu(int N, float MIN, float MAX, float * X, int INCX)
{
constrain_min_max_kernel << <cuda_gridsize(N), BLOCK, 0, get_cuda_stream() >> >(N, MIN, MAX, X, INCX);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void scal_ongpu(int N, float ALPHA, float * X, int INCX)
{
scal_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>(N, ALPHA, X, INCX);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void scal_add_ongpu(int N, float ALPHA, float BETA, float * X, int INCX)
{
scal_add_kernel << <cuda_gridsize(N), BLOCK, 0, get_cuda_stream() >> >(N, ALPHA, BETA, X, INCX);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void supp_ongpu(int N, float ALPHA, float * X, int INCX)
{
supp_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream() >>>(N, ALPHA, X, INCX);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void fill_ongpu(int N, float ALPHA, float * X, int INCX)
{
//fill_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>(N, ALPHA, X, INCX);
//CHECK_CUDA(cudaPeekAtLastError());
fill_kernel << <get_number_of_blocks(N, BLOCK), BLOCK, 0, get_cuda_stream() >> >(N, ALPHA, X, INCX);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void gradient_centralization_kernel(int filters, int f_size, float *in)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int tid = index % WARP_SIZE;
const int f = index / WARP_SIZE;
if (f >= filters) return;
float mean = 0;
for (int i = 0; i < f_size; i += WARP_SIZE) {
mean += warpAllReduceSum(in[f*f_size + i + tid]);
}
mean = mean / f_size;
for (int i = 0; i < f_size; i += WARP_SIZE) {
in[f*f_size + i + tid] -= mean;
}
}
extern "C" void gradient_centralization_gpu(int w, int h, int c, int f, float *in)
{
const int size = f * WARP_SIZE;
const int f_size = c * h * w;
if (f_size % WARP_SIZE == 0) {
gradient_centralization_kernel << <get_number_of_blocks(size, BLOCK), BLOCK, 0, get_cuda_stream() >> > (f, f_size, in);
CHECK_CUDA(cudaPeekAtLastError());
}
}
__device__ float relu(float src) {
if (src > 0) return src;
return 0;
}
__device__ float lrelu(float src) {
const float eps = 0.001;
if (src > eps) return src;
return eps;
}
__device__ float grad_relu(float src) {
return (src > 0);
}
__device__ float grad_lrelu(float src) {
const float eps = 0.001;
return (src > eps);
}
__global__ void shortcut_singlelayer_simple_kernel(int size, int src_outputs, int batch, int n, int *outputs_of_layers_gpu, float **layers_output_gpu, float *out, float *in, float *weights_gpu, int nweights, WEIGHTS_NORMALIZATION_T weights_normalization)
{
const int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int src_id = id;
const int src_i = src_id % src_outputs;
src_id /= src_outputs;
int src_b = src_id;
float out_val = in[id];
int add_outputs = outputs_of_layers_gpu[0];
if (src_i < add_outputs) {
int add_index = add_outputs*src_b + src_i;
float *add = layers_output_gpu[0];
out_val += add[add_index];
}
out[id] = out_val;
}
__global__ void shortcut_multilayer_kernel(int size, int src_outputs, int batch, int n, int *outputs_of_layers_gpu, float **layers_output_gpu, float *out, float *in, float *weights_gpu, int nweights, WEIGHTS_NORMALIZATION_T weights_normalization)
{
const int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
// nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w)
const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w)
int step = 0;
if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1
int src_id = id;
const int src_i = src_id % src_outputs;
src_id /= src_outputs;
int src_b = src_id;
float sum = 1, max_val = -FLT_MAX;
if (weights_gpu && weights_normalization) {
if (weights_normalization == SOFTMAX_NORMALIZATION) {
for (int i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
const float w = weights_gpu[weights_index];
if (max_val < w) max_val = w;
}
}
const float eps = 0.0001;
sum = eps;
for (int i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
const float w = weights_gpu[weights_index];
if (weights_normalization == RELU_NORMALIZATION) sum += lrelu(w);
else if (weights_normalization == SOFTMAX_NORMALIZATION) sum += expf(w - max_val);
}
}
float out_val = 0;
if (weights_gpu) {
float w = weights_gpu[src_i / step];
if (weights_normalization == RELU_NORMALIZATION) w = lrelu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
out_val = in[id] * w; // [0 or c or (c, h ,w)]
}
else out_val = in[id];
// layers
for (int i = 0; i < n; ++i) {
int add_outputs = outputs_of_layers_gpu[i];
if (src_i < add_outputs) {
int add_index = add_outputs*src_b + src_i;
float *add = layers_output_gpu[i];
if (weights_gpu) {
const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)]
float w = weights_gpu[weights_index];
if (weights_normalization == RELU_NORMALIZATION) w = lrelu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
out_val += add[add_index] * w; // [0 or c or (c, h ,w)]
}
else out_val += add[add_index];
}
}
out[id] = out_val;
}
extern "C" void shortcut_multilayer_gpu(int src_outputs, int batch, int n, int *outputs_of_layers_gpu, float **layers_output_gpu, float *out, float *in, float *weights_gpu, int nweights, WEIGHTS_NORMALIZATION_T weights_normalization)
{
//printf(" src_outputs = %d, batch = %d, n = %d \n", src_outputs, batch, n);
int size = batch * src_outputs;
if (nweights == 0 && n == 1) {
shortcut_singlelayer_simple_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> > (size, src_outputs, batch, n, outputs_of_layers_gpu, layers_output_gpu, out, in, weights_gpu, nweights, weights_normalization);
}
else {
shortcut_multilayer_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> > (size, src_outputs, batch, n, outputs_of_layers_gpu, layers_output_gpu, out, in, weights_gpu, nweights, weights_normalization);
}
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void backward_shortcut_multilayer_kernel(int size, int src_outputs, int batch, int n, int *outputs_of_layers_gpu,
float **layers_delta_gpu, float *delta_out, float *delta_in, float *weights_gpu, float *weight_updates_gpu, int nweights, float *in, float **layers_output_gpu, WEIGHTS_NORMALIZATION_T weights_normalization)
{
const int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
// nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w)
const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w)
int step = 0;
if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1
int src_id = id;
const int src_i = src_id % src_outputs;
src_id /= src_outputs;
int src_b = src_id;
float grad = 1, sum = 1, max_val = -FLT_MAX;
int i;
if (weights_gpu && weights_normalization) {
if (weights_normalization == SOFTMAX_NORMALIZATION) {
for (int i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
float w = weights_gpu[weights_index];
if (max_val < w) max_val = w;
}
}
const float eps = 0.0001;
sum = eps;
for (i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
const float w = weights_gpu[weights_index];
if (weights_normalization == RELU_NORMALIZATION) sum += lrelu(w);
else if (weights_normalization == SOFTMAX_NORMALIZATION) sum += expf(w - max_val);
}
}
if (weights_gpu) {
float w = weights_gpu[src_i / step];
if (weights_normalization == RELU_NORMALIZATION) w = lrelu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
if (weights_normalization == RELU_NORMALIZATION) grad = w;
else if (weights_normalization == SOFTMAX_NORMALIZATION) grad = w*(1-w);
delta_out[id] += delta_in[id] * w; // [0 or c or (c, h ,w)]
float weights_update_tmp = delta_in[id] * in[id] * grad;// / step;
if (layer_step == 1 && (size/32) > (id/32 + 1)) {
if (isnan(weights_update_tmp) || isinf(weights_update_tmp)) {
weights_update_tmp = 0;
}
float wu = warpAllReduceSum(weights_update_tmp);
if (threadIdx.x % 32 == 0) {
if (!isnan(wu) && !isinf(wu))
atomicAdd(&weight_updates_gpu[src_i / step], wu);
}
}
else {
if (!isnan(weights_update_tmp) && !isinf(weights_update_tmp))
atomicAdd(&weight_updates_gpu[src_i / step], weights_update_tmp);
//weight_updates_gpu[src_i / step] += weights_update_tmp;
}
}
else delta_out[id] += delta_in[id];
// layers
for (int i = 0; i < n; ++i) {
int add_outputs = outputs_of_layers_gpu[i];
if (src_i < add_outputs) {
int add_index = add_outputs*src_b + src_i;
int out_index = id;
float *layer_delta = layers_delta_gpu[i];
if (weights_gpu) {
float *add = layers_output_gpu[i];
const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)]
float w = weights_gpu[weights_index];
if (weights_normalization == RELU_NORMALIZATION) w = lrelu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
if (weights_normalization == RELU_NORMALIZATION) grad = w;
else if (weights_normalization == SOFTMAX_NORMALIZATION) grad = w*(1 - w);
layer_delta[add_index] += delta_in[id] * w;
float weights_update_tmp = delta_in[id] * add[add_index] * grad;// / step;
if (layer_step == 1 && (size / 32) > (id / 32 + 1)) {
if (isnan(weights_update_tmp) || isinf(weights_update_tmp)) {
weights_update_tmp = 0;
}
float wu = warpAllReduceSum(weights_update_tmp);
if (threadIdx.x % 32 == 0) {
if (!isnan(wu) && !isinf(wu))
atomicAdd(&weight_updates_gpu[weights_index], wu);
//if(weights_gpu[weights_index] != 1) printf(" wu = %f, weights_update_tmp = %f, w = %f, weights_gpu[weights_index] = %f, grad = %f, weights_normalization = %d ",
// wu, weights_update_tmp, w, weights_gpu[weights_index], grad, weights_normalization);
}
}
else {
if (!isnan(weights_update_tmp) && !isinf(weights_update_tmp))
atomicAdd(&weight_updates_gpu[weights_index], weights_update_tmp);
//weight_updates_gpu[weights_index] += weights_update_tmp;
}
}
else layer_delta[add_index] += delta_in[id];
}
}
}
extern "C" void backward_shortcut_multilayer_gpu(int src_outputs, int batch, int n, int *outputs_of_layers_gpu,
float **layers_delta_gpu, float *delta_out, float *delta_in, float *weights_gpu, float *weight_updates_gpu, int nweights, float *in, float **layers_output_gpu, WEIGHTS_NORMALIZATION_T weights_normalization)
{
const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w)
int step = 0;
if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1
//printf(" nweights = %d, n = %d, layer_step = %d, step = %d \n", nweights, n, layer_step, step);
//printf(" src_outputs = %d, batch = %d, n = %d \n", src_outputs, batch, n);
int size = batch * src_outputs;
backward_shortcut_multilayer_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> > (size, src_outputs, batch, n, outputs_of_layers_gpu,
layers_delta_gpu, delta_out, delta_in, weights_gpu, weight_updates_gpu, nweights, in, layers_output_gpu, weights_normalization);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] += add[add_index];
}
extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int stride = w1/w2;
int sample = w2/w1;
assert(stride == h1/h2);
assert(sample == h2/h1);
if(stride < 1) stride = 1;
if(sample < 1) sample = 1;
int size = batch * minw * minh * minc;
shortcut_kernel<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream()>>>(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void simple_input_shortcut_kernel(float *in, int size, float *add, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
out[id] = in[id] + add[id];
}
__global__ void input_shortcut_kernel(float *in, int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] = in[out_index] + add[add_index];
}
extern "C" void input_shortcut_gpu(float *in, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
if (w1 == w2 && h1 == h2 && c1 == c2) {
int size = batch * w1 * h1 * c1;
simple_input_shortcut_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> >(in, size, add, out);
CHECK_CUDA(cudaPeekAtLastError());
return;
}
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int stride = w1 / w2;
int sample = w2 / w1;
assert(stride == h1 / h2);
assert(sample == h2 / h1);
if (stride < 1) stride = 1;
if (sample < 1) sample = 1;
int size = batch * minw * minh * minc;
//input_shortcut_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> >(in, size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out);
simple_copy_ongpu(w2 * h2 * c2 * batch, in, out);
shortcut_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> >(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
float abs_val = abs(diff);
if(abs_val < 1) {
error[i] = diff * diff;
delta[i] = diff;
}
else {
error[i] = 2*abs_val - 1;
delta[i] = (diff < 0) ? -1 : 1;
}
}
}
extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
smooth_l1_kernel<<<cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >>>(n, pred, truth, delta, error);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
float t = truth[i];
float p = pred[i];
error[i] = (t) ? -log(p) : 0;
delta[i] = t - p;
}
}
extern "C" void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
softmax_x_ent_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(n, pred, truth, delta, error);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
error[i] = diff * diff; //I know this is technically wrong, deal with it.
delta[i] = diff;
}
}
extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
l2_kernel<<<cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >>>(n, pred, truth, delta, error);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
}
}
extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c)
{
weighted_sum_kernel<<<cuda_gridsize(num), BLOCK, 0, get_cuda_stream() >>>(num, a, b, s, c);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
if(da) da[i] += dc[i] * s[i];
db[i] += dc[i] * (1-s[i]);
ds[i] += dc[i] * a[i] + dc[i] * -b[i];
}
}
extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc)
{
weighted_delta_kernel<<<cuda_gridsize(num), BLOCK, 0, get_cuda_stream() >>>(num, a, b, s, da, db, ds, dc);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void mult_add_into_kernel(int n, float *a, float *b, float *c)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
c[i] += a[i]*b[i];
}
}
extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c)
{
mult_add_into_kernel<<<cuda_gridsize(num), BLOCK, 0, get_cuda_stream() >>>(num, a, b, c);
CHECK_CUDA(cudaPeekAtLastError());
}
__device__ void softmax_device(int n, float *input, float temp, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for(i = 0; i < n; ++i){
int val = input[i];
largest = (val>largest) ? val : largest;
}
for(i = 0; i < n; ++i){
float e = exp(input[i]/temp - largest/temp);
sum += e;
output[i] = e;
}
for(i = 0; i < n; ++i){
output[i] /= sum;
}
}
__global__ void softmax_kernel(int n, int offset, int batch, float *input, float temp, float *output)
{
int b = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(b >= batch) return;
softmax_device(n, input + b*offset, temp, output + b*offset);
}
extern "C" void softmax_gpu(float *input, int n, int offset, int groups, float temp, float *output)
{
int inputs = n;
int batch = groups;
softmax_kernel<<<cuda_gridsize(batch), BLOCK, 0, get_cuda_stream()>>>(inputs, offset, batch, input, temp, output);
CHECK_CUDA(cudaPeekAtLastError());
}
__device__ void softmax_device_new_api(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for (i = 0; i < n; ++i) {
int val = input[i*stride];
largest = (val>largest) ? val : largest;
}
for (i = 0; i < n; ++i) {
float e = expf(input[i*stride] / temp - largest / temp);
sum += e;
output[i*stride] = e;
}
for (i = 0; i < n; ++i) {
output[i*stride] /= sum;
}
}
__global__ void softmax_kernel_new_api(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= batch*groups) return;
int b = id / groups;
int g = id % groups;
softmax_device_new_api(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
}
extern "C" void softmax_gpu_new_api(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
softmax_kernel_new_api << <cuda_gridsize(batch*groups), BLOCK, 0, get_cuda_stream() >> >(input, n, batch, batch_offset, groups, group_offset, stride, temp, output);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int out_index = i;
int out_w = i % (w*stride);
i = i / (w*stride);
int out_h = i % (h*stride);
i = i / (h*stride);
int out_c = i%c;
i = i / c;
int b = i%batch;
int in_w = out_w / stride;
int in_h = out_h / stride;
int in_c = out_c;
int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w;
if (forward) out[out_index] += scale * x[in_index];
else atomicAdd(x + in_index, scale * out[out_index]);
}
extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t size = w*h*c*batch*stride*stride;
upsample_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> >(size, in, w, h, c, batch, stride, forward, scale, out);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= spatial*batch*groups) return;
int s = id % spatial;
id = id / spatial;
int g = id % groups;
int b = id / groups;
int goff = group_offset[g] * spatial;
int boff = b*stride;
softmax_device_new_api(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s);
}
extern "C" void softmax_tree_gpu(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier)
{
int *tree_groups_size = cuda_make_int_array_new_api(hier.group_size, hier.groups);
int *tree_groups_offset = cuda_make_int_array_new_api(hier.group_offset, hier.groups);
/*
static int *tree_groups_size = 0;
static int *tree_groups_offset = 0;
if(!tree_groups_size){
tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
}
*/
int num = spatial*batch*hier.groups;
softmax_tree_kernel <<<cuda_gridsize(num), BLOCK, 0, get_cuda_stream() >>>(input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset);
CHECK_CUDA(cudaPeekAtLastError());
cuda_free((float *)tree_groups_size);
cuda_free((float *)tree_groups_offset);
}
__global__ void fix_nan_and_inf_kernel(float *input, size_t size)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
float val = input[index];
if (isnan(val) || isinf(val)) {
input[index] = 1.0f / (fabs((float)index) + 1); // pseudo random value
}
}
}
extern "C" void fix_nan_and_inf(float *input, size_t size)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
fix_nan_and_inf_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> >(input, size);
CHECK_CUDA(cudaPeekAtLastError());
//CHECK_CUDA(cudaDeviceSynchronize());
}
__global__ void reset_nan_and_inf_kernel(float *input, size_t size)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
float val = input[index];
if (isnan(val) || isinf(val)) {
input[index] = 0;
}
}
}
extern "C" void reset_nan_and_inf(float *input, size_t size)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
reset_nan_and_inf_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> >(input, size);
CHECK_CUDA(cudaPeekAtLastError());
//CHECK_CUDA(cudaDeviceSynchronize());
}
__global__ void is_nan_or_inf_kernel(float *input, size_t size, int *pinned_return)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
float val = input[index];
if (isnan(val) || isinf(val))
*pinned_return = 1;
}
}
extern "C" int is_nan_or_inf(float *input, size_t size)
{
int *pinned_return;
CHECK_CUDA(cudaHostAlloc(&pinned_return, sizeof(int), cudaHostRegisterMapped));
*pinned_return = 0;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
is_nan_or_inf_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> >(input, size, pinned_return);
CHECK_CUDA(cudaDeviceSynchronize());
int ret_val = *pinned_return;
CHECK_CUDA(cudaFreeHost(pinned_return));
return ret_val;
}
__global__ void add_3_arrays_activate_kernel(float *a1, float *a2, float *a3, size_t size, ACTIVATION a, float *dst)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
float val = 0;
if (a1) val += a1[index];
if (a2) val += a2[index];
if (a3) val += a3[index];
if (a == LOGISTIC) val = 1.f / (1.f + expf(-val));
else if (a == TANH) val = (2 / (1 + expf(-2 * val)) - 1);
else if (a == LEAKY) val = (val < 0) ? val*0.1 : val;
dst[index] = val;
}
}
extern "C" void add_3_arrays_activate(float *a1, float *a2, float *a3, size_t size, ACTIVATION a, float *dst)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
if (!(a == LOGISTIC || a == TANH || a == LEAKY || a == LINEAR)) {
printf(" add_3_arrays_activate() doesn't support activation %d, it supports only LOGISTIC and TANH \n", a);
exit(EXIT_FAILURE);
}
add_3_arrays_activate_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> >(a1, a2, a3, size, a, dst);
}
__global__ void sum_of_mults_kernel(float *a1, float *a2, float *b1, float *b2, size_t size, float *dst)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
dst[index] = a1[index] * a2[index] + b1[index] * b2[index];
}
}
extern "C" void sum_of_mults(float *a1, float *a2, float *b1, float *b2, size_t size, float *dst)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
sum_of_mults_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> >(a1, a2, b1, b2, size, dst);
}
__global__ void activate_and_mult_kernel(float *a1, float *a2, size_t size, ACTIVATION a, float *dst)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
float val = a1[index];
if (a == TANH) val = (2 / (1 + expf(-2 * val)) - 1);
else if (a == LEAKY) val = (val < 0) ? val*0.1 : val;
dst[index] = val * a2[index];
}
}
extern "C" void activate_and_mult(float *a1, float *a2, size_t size, ACTIVATION a, float *dst)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
if (!(a == TANH || a == LEAKY || a == LINEAR)) {
printf(" activat_and_mult() doesn't support activation %d, it supports only TANH \n", a);
exit(EXIT_FAILURE);
}
activate_and_mult_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> >(a1, a2, size, a, dst);
}
__global__ void scale_channels_kernel(float *in_w_h_c, int size, int channel_size, int batch_size, int scale_wh, float *scales_c, float *out)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
if (scale_wh) {
int osd_index = index % channel_size + (index / batch_size)*channel_size;
out[index] = in_w_h_c[index] * scales_c[osd_index];
}
else {
out[index] = in_w_h_c[index] * scales_c[index / channel_size];
}
}
}
extern "C" void scale_channels_gpu(float *in_w_h_c, int size, int channel_size, int batch_size, int scale_wh, float *scales_c, float *out)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
scale_channels_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> >(in_w_h_c, size, channel_size, batch_size, scale_wh, scales_c, out);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void backward_scale_channels_kernel(float *in_w_h_c_delta, int size, int channel_size, int batch_size, int scale_wh,
float *in_scales_c, float *out_from_delta,
float *in_from_output, float *out_state_delta)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
if (scale_wh)
{
int osd_index = index % channel_size + (index / batch_size)*channel_size;
//out_state_delta[osd_index] += in_w_h_c_delta[index] * in_from_output[index]; // l.delta * from (should be divided by channel_size?)
atomicAdd(&out_state_delta[osd_index], in_w_h_c_delta[index] * in_from_output[index] / channel_size); // l.delta * from
out_from_delta[index] += in_scales_c[osd_index] * in_w_h_c_delta[index]; // input * l.delta // atomic isn't required here
}
else {
int osd_index = index / channel_size;
//out_state_delta[osd_index] += in_w_h_c_delta[index] * in_from_output[index]; // l.delta * from (should be divided by channel_size?)
int warp_id = index / 32;
int index_warp_start = warp_id * 32;
int osd_index_warp_start = index_warp_start / channel_size;
int osd_index_warp_end = (index_warp_start + 31) / channel_size;
if (osd_index_warp_start == osd_index_warp_end) // all thread in warp process the same channel
{
float sum = warpAllReduceSum(in_w_h_c_delta[index] * in_from_output[index]); // l.delta * from
if (threadIdx.x % 32 == 0) {
atomicAdd(&out_state_delta[osd_index], sum);
//out_state_delta[osd_index] += sum;
}
}
else {
atomicAdd(&out_state_delta[osd_index], in_w_h_c_delta[index] * in_from_output[index]); // l.delta * from
}
out_from_delta[index] += in_scales_c[osd_index] * in_w_h_c_delta[index]; // input * l.delta // atomic isn't required here
}
}
}
extern "C" void backward_scale_channels_gpu(float *in_w_h_c_delta, int size, int channel_size, int batch_size, int scale_wh,
float *in_scales_c, float *out_from_delta,
float *in_from_output, float *out_state_delta)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
backward_scale_channels_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (in_w_h_c_delta, size, channel_size, batch_size, scale_wh,
in_scales_c, out_from_delta,
in_from_output, out_state_delta);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void sam_kernel(float *in_w_h_c, int size, int channel_size, float *scales_c, float *out)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
out[index] = in_w_h_c[index] * scales_c[index];
}
}
extern "C" void sam_gpu(float *in_w_h_c, int size, int channel_size, float *scales_c, float *out)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
sam_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> >(in_w_h_c, size, channel_size, scales_c, out);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void backward_sam_kernel(float *in_w_h_c_delta, int size, int channel_size,
float *in_scales_c, float *out_from_delta,
float *in_from_output, float *out_state_delta)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
out_state_delta[index] += in_w_h_c_delta[index] * in_from_output[index]; // l.delta * from (should be divided by channel_size?)
out_from_delta[index] += in_scales_c[index] * in_w_h_c_delta[index]; // input * l.delta
//out_state_delta[index] += in_w_h_c_delta[index];
//out_from_delta[index] = in_w_h_c_delta[index];
}
}
extern "C" void backward_sam_gpu(float *in_w_h_c_delta, int size, int channel_size,
float *in_scales_c, float *out_from_delta,
float *in_from_output, float *out_state_delta)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
backward_sam_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (in_w_h_c_delta, size, channel_size,
in_scales_c, out_from_delta,
in_from_output, out_state_delta);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void smooth_rotate_weights_kernel(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int kernel_size, int angle, int reverse)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int kernel_area = kernel_size * kernel_size;
const int i = index * kernel_area;
const int stage_step = (nweights / kernel_area) / 4; // 4 stages
const int stage_id = index / stage_step;
// nweights = (c / groups) * n * size * size;
// kernel_area = size*size
if (i < nweights)
{
// rotate left or right
if (reverse) angle = -angle;
const float cos_a = cosf(angle * 3.14159265 / 180);
const float sin_a = sinf(angle * 3.14159265 / 180);
const int x_c = kernel_size / 2;
const int y_c = kernel_size / 2;
float dropout_sum = 0;
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
// Xsource = x*cos(alpha) + y*sin(alpha)
// Ysource = -x*sin(alpha) + y*cos(alpha)
float x_s = x_c + (x - x_c)*cos_a + (y - y_c)*sin_a;
float y_s = y_c - (x - x_c)*sin_a + (y - y_c)*cos_a;
int x_0 = floorf(x_s); // round down
int x_1 = ceilf(x_s); // round up
if (x_0 == x_1) x_1 = x_0 + 1;
int y_0 = floorf(y_s);
int y_1 = ceilf(y_s);
if (y_0 == y_1) y_1 = y_0 + 1;
float c_x_0 = x_1 - x_s;
float c_x_1 = x_s - x_0;
float c_y_0 = y_1 - y_s;
float c_y_1 = y_s - y_0;
float val = 0;
if (x_0 >= 0 && x_0 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_0 + y_0*kernel_size + i] * c_x_0 * c_y_0;
else dropout_sum += c_x_0 * c_y_0;
if (x_1 >= 0 && x_1 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_1 + y_0*kernel_size + i] * c_x_1 * c_y_0;
else dropout_sum += c_x_1 * c_y_0;
if (x_0 >= 0 && x_0 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_0 + y_1*kernel_size + i] * c_x_0 * c_y_1;
else dropout_sum += c_x_0 * c_y_1;
if (x_1 >= 0 && x_1 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_1 + y_1*kernel_size + i] * c_x_1 * c_y_1;
else dropout_sum += c_x_1 * c_y_1;
weight_deform_gpu[x + y*kernel_size + i] = val;
}
}
// compensate for dropped items
const float coef = (kernel_size*kernel_size) / (kernel_size*kernel_size - dropout_sum);
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
weight_deform_gpu[x + y*kernel_size + i] *= coef;
}
}
}
}
extern "C" void smooth_rotate_weights_gpu(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int size, int angle, int reverse)
{
const int kernel_area = size*size;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(nweights / kernel_area, block_size);
smooth_rotate_weights_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (src_weight_gpu, weight_deform_gpu, nweights, n, size, angle, reverse);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void stretch_weights_kernel(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int kernel_size, float scale, int reverse)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int kernel_area = kernel_size * kernel_size;
const int i = index * kernel_area;
const int stage_step = (nweights / kernel_area) / 4; // 4 stages
const int stage_id = index / stage_step;
// nweights = (c / groups) * n * size * size;
// kernel_area = size*size
if (i < nweights)
{
if (stage_id == 0) {
// simple copy
for (int x = 0; x < kernel_size; ++x) {
for (int y = 0; y < kernel_size; ++y) {
weight_deform_gpu[x + y*kernel_size + i] = src_weight_gpu[x + y*kernel_size + i];
}
}
}
else if (stage_id > 0)
{
if (stage_id == 1) scale = 0.65;
else if (stage_id == 2) scale = 0.8;
else if (stage_id == 3) scale = 1.3;
if (reverse) scale = 1 / scale;
const int x_c = kernel_size / 2;
const int y_c = kernel_size / 2;
float dropout_sum = 0;
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
// Xsource = x_c + (x_d - x_c) / scale
// Ysource = y_c + (y_d - y_c) / scale
float x_s = x_c + (x - x_c) / scale;
float y_s = y_c + (y - y_c) / scale;
int x_0 = floorf(x_s); // round down
int x_1 = ceilf(x_s); // round up
if (x_0 == x_1) x_1 = x_0 + 1;
int y_0 = floorf(y_s);
int y_1 = ceilf(y_s);
if (y_0 == y_1) y_1 = y_0 + 1;
float c_x_0 = x_1 - x_s;
float c_x_1 = x_s - x_0;
float c_y_0 = y_1 - y_s;
float c_y_1 = y_s - y_0;
float val = 0;
if (x_0 >= 0 && x_0 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_0 + y_0*kernel_size + i] * c_x_0 * c_y_0;
else dropout_sum += c_x_0 * c_y_0;
if (x_1 >= 0 && x_1 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_1 + y_0*kernel_size + i] * c_x_1 * c_y_0;
else dropout_sum += c_x_1 * c_y_0;
if (x_0 >= 0 && x_0 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_0 + y_1*kernel_size + i] * c_x_0 * c_y_1;
else dropout_sum += c_x_0 * c_y_1;
if (x_1 >= 0 && x_1 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_1 + y_1*kernel_size + i] * c_x_1 * c_y_1;
else dropout_sum += c_x_1 * c_y_1;
weight_deform_gpu[x + y*kernel_size + i] = val;
}
}
// compensate for dropped items
//const float coef = (kernel_size*kernel_size) / (kernel_size*kernel_size - dropout_sum);
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
//if (scale < 1) weight_deform_gpu[x + y*kernel_size + i] /= scale;// *= coef;
weight_deform_gpu[x + y*kernel_size + i] /= scale;// *= coef;
}
}
}
}
}
extern "C" void stretch_weights_gpu(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int size, float scale, int reverse)
{
const int kernel_area = size*size;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(nweights / kernel_area, block_size);
stretch_weights_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (src_weight_gpu, weight_deform_gpu, nweights, n, size, scale, reverse);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void sway_and_flip_weights_kernel(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int kernel_size, int angle, int reverse)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int kernel_area = kernel_size * kernel_size;
const int i = index * kernel_area;
const int stage_step = (nweights / kernel_area) / 4; // 4 stages
const int stage_id = index / stage_step;
// nweights = (c / groups) * n * size * size;
// kernel_area = size*size
if (i < nweights)
{
if (stage_id == 0) {
// simple copy
for (int x = 0; x < kernel_size; ++x) {
for (int y = 0; y < kernel_size; ++y) {
weight_deform_gpu[x + y*kernel_size + i] = src_weight_gpu[x + y*kernel_size + i];
}
}
}
else if (stage_id == 1 || stage_id == 2)
{
// rotate left or right
if (stage_id == 2) angle = -angle;
if (reverse) angle = -angle;
const float cos_a = cosf(angle * 3.14159265 / 180);
const float sin_a = sinf(angle * 3.14159265 / 180);
const int x_c = kernel_size / 2;
const int y_c = kernel_size / 2;
float dropout_sum = 0;
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
// Xsource = x*cos(alpha) + y*sin(alpha)
// Ysource = -x*sin(alpha) + y*cos(alpha)
float x_s = x_c + (x - x_c)*cos_a + (y - y_c)*sin_a;
float y_s = y_c - (x - x_c)*sin_a + (y - y_c)*cos_a;
int x_0 = floorf(x_s); // round down
int x_1 = ceilf(x_s); // round up
if (x_0 == x_1) x_1 = x_0 + 1;
int y_0 = floorf(y_s);
int y_1 = ceilf(y_s);
if (y_0 == y_1) y_1 = y_0 + 1;
float c_x_0 = x_1 - x_s;
float c_x_1 = x_s - x_0;
float c_y_0 = y_1 - y_s;
float c_y_1 = y_s - y_0;
float val = 0;
if (x_0 >= 0 && x_0 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_0 + y_0*kernel_size + i] * c_x_0 * c_y_0;
else dropout_sum += c_x_0 * c_y_0;
if (x_1 >= 0 && x_1 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_1 + y_0*kernel_size + i] * c_x_1 * c_y_0;
else dropout_sum += c_x_1 * c_y_0;
if (x_0 >= 0 && x_0 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_0 + y_1*kernel_size + i] * c_x_0 * c_y_1;
else dropout_sum += c_x_0 * c_y_1;
if (x_1 >= 0 && x_1 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_1 + y_1*kernel_size + i] * c_x_1 * c_y_1;
else dropout_sum += c_x_1 * c_y_1;
weight_deform_gpu[x + y*kernel_size + i] = val;
}
}
// compensate for dropped items
const float coef = (kernel_size*kernel_size) / (kernel_size*kernel_size - dropout_sum);
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
weight_deform_gpu[x + y*kernel_size + i] *= coef;
}
}
}
else if (stage_id == 3)
{
// flip
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
weight_deform_gpu[(kernel_size - x - 1) + y*kernel_size + i] = src_weight_gpu[x + y*kernel_size + i];
}
}
}
}
}
extern "C" void sway_and_flip_weights_gpu(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int size, int angle, int reverse)
{
const int kernel_area = size*size;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(nweights / kernel_area, block_size);
sway_and_flip_weights_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (src_weight_gpu, weight_deform_gpu, nweights, n, size, angle, reverse);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void rotate_weights_kernel(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int kernel_size, int reverse)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int kernel_area = kernel_size * kernel_size;
const int i = index * kernel_area;
const int stage_step = (nweights / kernel_area) / 4; // 4 stages
const int stage_id = index / stage_step;
// nweights = (c / groups) * n * size * size;
// kernel_area = size*size
if (i < nweights)
{
// if(reverse)
if (stage_id == 0) {
// simple copy
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
const int src_i = x + y*kernel_size + i;
const int dst_i = x + y*kernel_size + i;
if (reverse) weight_deform_gpu[src_i] = src_weight_gpu[dst_i];
else weight_deform_gpu[dst_i] = src_weight_gpu[src_i];
}
}
}
else if (stage_id == 1)
{
// 90 degree clockwise rotation - 1
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
const int src_i = x + y*kernel_size + i;
const int dst_i = (kernel_size - 1 - y) + x*kernel_size + i;
if (reverse) weight_deform_gpu[src_i] = src_weight_gpu[dst_i];
else weight_deform_gpu[dst_i] = src_weight_gpu[src_i];
}
}
}
else if (stage_id == 2)
{
// 180 degree clockwise rotation - 2
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
const int src_i = x + y*kernel_size + i;
const int dst_i = (kernel_size - 1 - x) + (kernel_size - 1 - y)*kernel_size + i;
if (reverse) weight_deform_gpu[src_i] = src_weight_gpu[dst_i];
else weight_deform_gpu[dst_i] = src_weight_gpu[src_i];
}
}
}
else if (stage_id == 3)
{
// 270 degree clockwise rotation - 3
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
const int src_i = x + y*kernel_size + i;
const int dst_i = y + (kernel_size - 1 - x)*kernel_size + i;
if (reverse) weight_deform_gpu[src_i] = src_weight_gpu[dst_i];
else weight_deform_gpu[dst_i] = src_weight_gpu[src_i];
}
}
}
}
}
extern "C" void rotate_weights_gpu(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int size, int reverse)
{
const int kernel_area = size*size;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(nweights / kernel_area, block_size);
rotate_weights_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (src_weight_gpu, weight_deform_gpu, nweights, n, size, reverse);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void stretch_sway_flip_weights_kernel(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int kernel_size, float angle, int reverse)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int kernel_area = kernel_size * kernel_size;
const int i = index * kernel_area;
const int stage_step = (nweights / kernel_area) / 8; // 8 stages
const int stage_id = index / stage_step;
// nweights = (c / groups) * n * size * size;
// kernel_area = size*size
if (i < nweights)
{
if (stage_id == 0) {
// simple copy
for (int x = 0; x < kernel_size; ++x) {
for (int y = 0; y < kernel_size; ++y) {
weight_deform_gpu[x + y*kernel_size + i] = src_weight_gpu[x + y*kernel_size + i];
}
}
}
else if (stage_id == 1 || stage_id == 2 || stage_id == 3 || stage_id == 4)
{
float scale = 0.5;
if (stage_id == 1) scale = 0.65;
else if (stage_id == 2) scale = 0.8;
else if (stage_id == 3) scale = 1.2;
else if (stage_id == 4) scale = 1.4;
if (reverse) scale = 1 / scale;
const int x_c = kernel_size / 2;
const int y_c = kernel_size / 2;
float dropout_sum = 0;
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
// Xsource = x_c + (x_d - x_c) / scale
// Ysource = y_c + (y_d - y_c) / scale
float x_s = x_c + (x - x_c) / scale;
float y_s = y_c + (y - y_c) / scale;
int x_0 = floorf(x_s); // round down
int x_1 = ceilf(x_s); // round up
if (x_0 == x_1) x_1 = x_0 + 1;
int y_0 = floorf(y_s);
int y_1 = ceilf(y_s);
if (y_0 == y_1) y_1 = y_0 + 1;
float c_x_0 = x_1 - x_s;
float c_x_1 = x_s - x_0;
float c_y_0 = y_1 - y_s;
float c_y_1 = y_s - y_0;
float val = 0;
if (x_0 >= 0 && x_0 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_0 + y_0*kernel_size + i] * c_x_0 * c_y_0;
else dropout_sum += c_x_0 * c_y_0;
if (x_1 >= 0 && x_1 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_1 + y_0*kernel_size + i] * c_x_1 * c_y_0;
else dropout_sum += c_x_1 * c_y_0;
if (x_0 >= 0 && x_0 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_0 + y_1*kernel_size + i] * c_x_0 * c_y_1;
else dropout_sum += c_x_0 * c_y_1;
if (x_1 >= 0 && x_1 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_1 + y_1*kernel_size + i] * c_x_1 * c_y_1;
else dropout_sum += c_x_1 * c_y_1;
weight_deform_gpu[x + y*kernel_size + i] = val;
}
}
// compensate for dropped items
//const float coef = (kernel_size*kernel_size) / (kernel_size*kernel_size - dropout_sum);
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
if(scale > 1)
weight_deform_gpu[x + y*kernel_size + i] /= scale;// *= coef;
}
}
}
else if (stage_id == 5 || stage_id == 6)
{
// rotate left or right
if (stage_id == 6) angle = -angle;
if (reverse) angle = -angle;
const float cos_a = cosf(angle * 3.14159265 / 180);
const float sin_a = sinf(angle * 3.14159265 / 180);
const int x_c = kernel_size / 2;
const int y_c = kernel_size / 2;
float dropout_sum = 0;
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
// Xsource = x*cos(alpha) + y*sin(alpha)
// Ysource = -x*sin(alpha) + y*cos(alpha)
float x_s = x_c + (x - x_c)*cos_a + (y - y_c)*sin_a;
float y_s = y_c - (x - x_c)*sin_a + (y - y_c)*cos_a;
int x_0 = floorf(x_s); // round down
int x_1 = ceilf(x_s); // round up
if (x_0 == x_1) x_1 = x_0 + 1;
int y_0 = floorf(y_s);
int y_1 = ceilf(y_s);
if (y_0 == y_1) y_1 = y_0 + 1;
float c_x_0 = x_1 - x_s;
float c_x_1 = x_s - x_0;
float c_y_0 = y_1 - y_s;
float c_y_1 = y_s - y_0;
float val = 0;
if (x_0 >= 0 && x_0 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_0 + y_0*kernel_size + i] * c_x_0 * c_y_0;
else dropout_sum += c_x_0 * c_y_0;
if (x_1 >= 0 && x_1 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_1 + y_0*kernel_size + i] * c_x_1 * c_y_0;
else dropout_sum += c_x_1 * c_y_0;
if (x_0 >= 0 && x_0 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_0 + y_1*kernel_size + i] * c_x_0 * c_y_1;
else dropout_sum += c_x_0 * c_y_1;
if (x_1 >= 0 && x_1 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_1 + y_1*kernel_size + i] * c_x_1 * c_y_1;
else dropout_sum += c_x_1 * c_y_1;
weight_deform_gpu[x + y*kernel_size + i] = val;
}
}
// compensate for dropped items
const float coef = (kernel_size*kernel_size) / (kernel_size*kernel_size - dropout_sum);
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
weight_deform_gpu[x + y*kernel_size + i] *= coef;
}
}
}
else if (stage_id == 7)
{
// flip
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
weight_deform_gpu[(kernel_size - x - 1) + y*kernel_size + i] = src_weight_gpu[x + y*kernel_size + i];
}
}
}
}
}
extern "C" void stretch_sway_flip_weights_gpu(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int size, int angle, int reverse)
{
const int kernel_area = size*size;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(nweights / kernel_area, block_size);
stretch_sway_flip_weights_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (src_weight_gpu, weight_deform_gpu, nweights, n, size, angle, reverse);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void reduce_and_expand_array_kernel(const float *src_gpu, float *dst_gpu, int current_size, int groups)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < current_size) {
float val = 0;
for (int i = 0; i < groups; ++i) {
val += src_gpu[index + i*current_size];
}
for (int i = 0; i < groups; ++i) {
dst_gpu[index + i*current_size] = val / groups;
}
}
}
extern "C" void reduce_and_expand_array_gpu(const float *src_gpu, float *dst_gpu, int size, int groups)
{
const int current_size = size / groups;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(current_size, block_size);
reduce_and_expand_array_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (src_gpu, dst_gpu, current_size, groups);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void expand_array_kernel(const float *src_gpu, float *dst_gpu, int current_size, int groups)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < current_size) {
for (int i = 0; i < groups; ++i) {
dst_gpu[index + i*current_size] = src_gpu[index];
}
}
}
extern "C" void expand_array_gpu(const float *src_gpu, float *dst_gpu, int size, int groups)
{
const int current_size = size / groups;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(current_size, block_size);
expand_array_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (src_gpu, dst_gpu, current_size, groups);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void mult_inverse_array_kernel(const float *src_gpu, float *dst_gpu, int size, const float eps,
float divider, const float clip, const float abs_add)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
float val = src_gpu[index];
float sign = (val < 0) ? -1 : 1;
// eps = 1 by default
// eps = 2 - lower delta
// eps = 0 - higher delta (linear)
// eps = -1 - high delta (inverse number)
// = (abs(x)*10+1)^(-1)
float unsigned_val = powf(fabs(val)*10 + abs_add, eps);
unsigned_val = unsigned_val / divider;
if (unsigned_val > clip && clip != 0.0) unsigned_val = clip;
if (isnan(unsigned_val) || isinf(unsigned_val)) unsigned_val = 0;
dst_gpu[index] = unsigned_val * sign;
}
}
extern "C" void mult_inverse_array_gpu(const float *src_gpu, float *dst_gpu, int size, float eps, float divider, float clip, float abs_add)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
mult_inverse_array_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (src_gpu, dst_gpu, size, eps, divider, clip, abs_add);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void P_constrastive_f_det_kernel(int *labels, unsigned int feature_size, float temperature, contrastive_params *contrast_p, const int contrast_p_size)
{
const int il = blockIdx.x*blockDim.x + threadIdx.x;
if (il < contrast_p_size) {
const float sim = contrast_p[il].sim;
const size_t i = contrast_p[il].i;
const size_t j = contrast_p[il].j;
const float numerator = expf(sim / temperature);
float denominator = 0;
int k;
for (k = 0; k < contrast_p_size; ++k) {
contrastive_params cp = contrast_p[k];
//if (k != i && labels[k] != labels[i]) {
//if (k != i) {
if (cp.i != i && cp.j == j) {
//const float sim_den = cp.sim;
////const float sim_den = find_sim(k, l, contrast_p, contrast_p_size); // cosine_similarity(z[k], z[l], feature_size);
//denominator += expf(sim_den / temperature);
denominator += cp.exp_sim;
}
}
float result = 0.9999;
if (denominator != 0) result = numerator / denominator;
if (result > 1) result = 0.9999;
contrast_p[il].P = result;
}
}
extern "C" void P_constrastive_f_det_gpu(int *labels, unsigned int feature_size, float temperature, contrastive_params *contrast_p, const int contrast_p_size)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(contrast_p_size, block_size);
P_constrastive_f_det_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (labels, feature_size, temperature, contrast_p, contrast_p_size);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void coord_conv_kernel(float *dst, int w, int h, int chan, int batch, int type)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
const int x = i % w;
i = i / w;
const int y = i % h;
i = i / h;
const int c = i % chan;
//i = i / chan;
//const int b = i % batch;
if (type == 0) {
if (c == 0) {
const float x_val = (2.0f * x) / w - 1.0f; // [-1; 1)
dst[i] = x_val; // x - coord
}
else if (c == 1) {
const float y_val = (2.0f * y) / h - 1.0f; // [-1; 1)
dst[i] = y_val; // y - coord
}
else if (c == 2) {
const float x_val = (2.0f * x) / w - 1.0f; // [-1; 1)
const float y_val = (2.0f * y) / h - 1.0f; // [-1; 1)
const float rad_val = sqrtf(x_val*x_val + y_val*y_val); // [0; 1.414)
dst[i] = rad_val; // rad - coord
}
}
else if (type == 1) {
if (c >= 0 && c <= 2) {
dst[i] = 0;
}
}
}
extern "C" void coord_conv_gpu(float *dst, int size, int w, int h, int chan, int b, int type)
{
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
coord_conv_kernel << <num_blocks, block_size, 0, get_cuda_stream() >> > (dst, w, h, chan, b, type);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void forward_implicit_kernel(int size, int batch, int nweights, float *weight_gpu, float *output_gpu)
{
const int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
output_gpu[id] = weight_gpu[id % nweights];
}
extern "C" void forward_implicit_gpu(int batch, int nweights, float *weight_gpu, float *output_gpu)
{
int size = batch * nweights;
forward_implicit_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> > (size, batch, nweights, weight_gpu, output_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void backward_implicit_kernel(int size, int batch, int nweights, float *weight_updates_gpu, float *delta_gpu)
{
const int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
for (int i = 0; i < batch; ++i) {
weight_updates_gpu[id] += delta_gpu[id + i * nweights];
}
}
extern "C" void backward_implicit_gpu(int batch, int nweights, float *weight_updates_gpu, float *delta_gpu)
{
int size = nweights;
backward_implicit_kernel << <cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >> > (size, batch, nweights, weight_updates_gpu, delta_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
|
19d36bc5610168a98db1df3709ed7542856bdc6e.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "common.h"
#if THRUST_PATH
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#else
#include <bolt/amp/functional.h>
#include <bolt/amp/reduce.h>
#include <bolt/amp/transform.h>
#endif
struct l1cost_functor
{
__host__ __device__
l1cost_functor() = default;
__device__
float operator()(float x, float y) const
{
#ifdef __HIP_PLATFORM_HCC__
return fabsf(x) + fabsf(y);
#else
return std::abs(x) + std::abs(y);
#endif
}
l1cost_functor(const l1cost_functor& fun) = default;
__host__ __device__
~l1cost_functor() {}
};
void THNN_CudaL1Cost_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output)
{
THCUNN_assertSameGPU(state, 1, input);
float sum;
long size = THCudaTensor_nElement(state, input);
input = THCudaTensor_newContiguous(state, input);
#if THRUST_PATH
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
sum = thrust::reduce(input_data, input_data+size, (float) 0, l1cost_functor());
#else
auto input_data = THCudaTensor_data(state, input);
auto input_data_end = input_data + size;
sum = bolt::amp::reduce(input_data,
//input_data+size,
input_data_end,
0.0f,
l1cost_functor());
#endif
THCudaTensor_free(state, input);
THCudaTensor_set1d(state, output, 0, sum);
}
struct l1cost_updateGradInput_functor
{
__host__ __device__ float operator()(float x) const
{
if (x > 0)
return 1;
else if (x < 0)
return -1;
else
return 0;
}
};
void THNN_CudaL1Cost_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput)
{
THCUNN_assertSameGPU(state, 2, input, gradInput);
long size = THCudaTensor_nElement(state, input);
input = THCudaTensor_newContiguous(state, input);
THCudaTensor_resizeAs(state, gradInput, input);
#if THRUST_PATH
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput));
thrust::transform(input_data, input_data+size, gradInput_data, l1cost_updateGradInput_functor());
#else
auto input_data = THCudaTensor_data(state, input);
auto gradInput_data = THCudaTensor_data(state, gradInput);
bolt::amp::transform(input_data,
input_data+size,
gradInput_data,
l1cost_updateGradInput_functor());
#endif
THCudaTensor_free(state, input);
}
| 19d36bc5610168a98db1df3709ed7542856bdc6e.cu | #include "THCUNN.h"
#include "common.h"
#if THRUST_PATH
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#else
#include <bolt/amp/functional.h>
#include <bolt/amp/reduce.h>
#include <bolt/amp/transform.h>
#endif
struct l1cost_functor
{
__host__ __device__
l1cost_functor() = default;
__device__
float operator()(float x, float y) const
{
#ifdef __HIP_PLATFORM_HCC__
return fabsf(x) + fabsf(y);
#else
return std::abs(x) + std::abs(y);
#endif
}
l1cost_functor(const l1cost_functor& fun) = default;
__host__ __device__
~l1cost_functor() {}
};
void THNN_CudaL1Cost_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output)
{
THCUNN_assertSameGPU(state, 1, input);
float sum;
long size = THCudaTensor_nElement(state, input);
input = THCudaTensor_newContiguous(state, input);
#if THRUST_PATH
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
sum = thrust::reduce(input_data, input_data+size, (float) 0, l1cost_functor());
#else
auto input_data = THCudaTensor_data(state, input);
auto input_data_end = input_data + size;
sum = bolt::amp::reduce(input_data,
//input_data+size,
input_data_end,
0.0f,
l1cost_functor());
#endif
THCudaTensor_free(state, input);
THCudaTensor_set1d(state, output, 0, sum);
}
struct l1cost_updateGradInput_functor
{
__host__ __device__ float operator()(float x) const
{
if (x > 0)
return 1;
else if (x < 0)
return -1;
else
return 0;
}
};
void THNN_CudaL1Cost_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput)
{
THCUNN_assertSameGPU(state, 2, input, gradInput);
long size = THCudaTensor_nElement(state, input);
input = THCudaTensor_newContiguous(state, input);
THCudaTensor_resizeAs(state, gradInput, input);
#if THRUST_PATH
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput));
thrust::transform(input_data, input_data+size, gradInput_data, l1cost_updateGradInput_functor());
#else
auto input_data = THCudaTensor_data(state, input);
auto gradInput_data = THCudaTensor_data(state, gradInput);
bolt::amp::transform(input_data,
input_data+size,
gradInput_data,
l1cost_updateGradInput_functor());
#endif
THCudaTensor_free(state, input);
}
|
3c25c11059073ee24ac1f3fafa718bb4585950bb.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "hipfft.h"
namespace FFT {
namespace CuFFT {
FFT::Common::PerformanceTimer& timer();
void computeCuFFT(float2* h_signal, int size);
}
}
| 3c25c11059073ee24ac1f3fafa718bb4585950bb.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "cufft.h"
namespace FFT {
namespace CuFFT {
FFT::Common::PerformanceTimer& timer();
void computeCuFFT(float2* h_signal, int size);
}
}
|
fbb946da4a60b55f96147e59a4d3e55675aea64a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "../common/book.h"
__global__
void add( int a, int b, int *c)
{
*c = a + b;
}
int main()
{
int c;
int *dev_c;
HANDLE_ERROR(hipMalloc((void**) &dev_c, sizeof(int)));
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 2, 7, dev_c);
HANDLE_ERROR(hipMemcpy(&c,
dev_c,
sizeof(int),
hipMemcpyDeviceToHost));
printf("2 + 7 = %d\n", c);
hipFree(dev_c);
return 0;
}
| fbb946da4a60b55f96147e59a4d3e55675aea64a.cu | #include <stdio.h>
#include "../common/book.h"
__global__
void add( int a, int b, int *c)
{
*c = a + b;
}
int main()
{
int c;
int *dev_c;
HANDLE_ERROR(cudaMalloc((void**) &dev_c, sizeof(int)));
add<<<1,1>>>(2, 7, dev_c);
HANDLE_ERROR(cudaMemcpy(&c,
dev_c,
sizeof(int),
cudaMemcpyDeviceToHost));
printf("2 + 7 = %d\n", c);
cudaFree(dev_c);
return 0;
}
|
fa7bbfc9a534523f8fdfcc060600f575a1156565.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************
* Lempel, Ziv, Storer, and Szymanski Encoding and Decoding on CUDA
*
*
****************************************************************************
* CUDA LZSS
* Authors : Adnan Ozsoy, Martin Swany,Indiana University - Bloomington
* Date : April 11, 2011
****************************************************************************
Copyright 2011 Adnan Ozsoy, Martin Swany, Indiana University - Bloomington
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
****************************************************************************/
/***************************************************************************
* Code is adopted from below source
*
* LZSS: An ANSI C LZss Encoding/Decoding Routine
* Copyright (C) 2003 by Michael Dipperstein ([email protected])
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
***************************************************************************/
/***************************************************************************
* INCLUDED FILES
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "getopt.h"
#include <time.h>
#include "gpu_compress.h"
#include <pthread.h>
#include <unistd.h>
//#include "cuPrintf.hip"
#include <sys/time.h>
/***************************************************************************
* CUDA FILES
***************************************************************************/
#include <assert.h>
#include <hip/hip_runtime.h>
//#include "cuPrintf.hip"
/***************************************************************************
* GLOBAL VARIABLES
***************************************************************************/
//unsigned char * decompressed_buffer;
//unsigned char * init_in_d;
//unsigned char * init_out_d;
texture<unsigned char, 1, hipReadModeElementType> in_d_tex;
hipStream_t * streams;
int instreams = 16;
int nstreams = 4*instreams;
/***************************************************************************
* PROTOTYPES
***************************************************************************/
/****************************************************************************
* Function : FindMatch
* Description: This function will search through the slidingWindow
* dictionary for the longest sequence matching the MAX_CODED
* long string stored in uncodedLookahed.
* Parameters : windowHead - head of sliding window
* uncodedHead - head of uncoded lookahead buffer
* Effects : NONE
* Returned : The sliding window index where the match starts and the
* length of the match. If there is no match a length of
* zero will be returned.
****************************************************************************/
__device__ encoded_string_t FindMatch(int windowHead, int uncodedHead, unsigned char* slidingWindow, unsigned char* uncodedLookahead, \
int tx, int bx, int wfilepoint, int lastcheck, int loadcounter)
{
encoded_string_t matchData;
int i, j;
int maxcheck;
int matchingState=0;
int loop=0;
matchData.length = 1; // make it 1 in the 0 case, it will be returned as 1, 0 gives problems
matchData.offset = 1; // make it 1 in the 0 case, it will be returned as 1, 0 gives problems
i = windowHead ; // start at the beginning of the sliding window //
j = 0; //counter for matchings
//if(lastcheck)
maxcheck = MAX_CODED - tx*lastcheck;
//else
// maxcheck = MAX_CODED;
int tempi=0;
while (loop<WINDOW_SIZE)
{
if (slidingWindow[i] == uncodedLookahead[(uncodedHead+j)% (WINDOW_SIZE+MAX_CODED)])
{
j++;
matchingState=1;
}
else
{
if(matchingState && j > matchData.length)
{
matchData.length = j;
tempi=i-j;
if(tempi<0)
tempi+=WINDOW_SIZE+MAX_CODED;
matchData.offset = tempi;
}
j=0;
matchingState=0;
}
i = (i + 1) % (WINDOW_SIZE+MAX_CODED);
loop++;
if (loop >= maxcheck-1)
{
/// we wrapped around ///
loop = WINDOW_SIZE; //break;
}
}
if(j > matchData.length && matchingState )
{
matchData.length = j;
tempi=i-j;
if(tempi<0)
tempi+=WINDOW_SIZE+MAX_CODED;
matchData.offset = tempi;
}
return matchData;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
__global__ void EncodeKernel(unsigned char * in_d, unsigned char * out_d, int SIZEBLOCK)
{
/* cyclic buffer sliding window of already read characters */
__shared__ unsigned char slidingWindow[WINDOW_SIZE+(MAX_CODED)];
__shared__ unsigned char uncodedLookahead[MAX_CODED*2];
__shared__ unsigned char encodedData[MAX_CODED*2];
encoded_string_t matchData;
int windowHead, uncodedHead; // head of sliding window and lookahead //
int filepoint; //file index pointer for reading
int wfilepoint; //file index pointer for writing
int lastcheck; //flag for last run of the packet
int loadcounter=0;
int bx = blockIdx.x;
int tx = threadIdx.x;
//***********************************************************************
// * Fill the sliding window buffer with some known values. DecodeLZSS must
// * use the same values. If common characters are used, there's an
// * increased chance of matching to the earlier strings.
// *********************************************************************** //
slidingWindow[tx] = ' ';
windowHead = tx;
uncodedHead = tx;
filepoint=0;
wfilepoint=0;
lastcheck=0;
__syncthreads();
//***********************************************************************
//* Copy MAX_CODED bytes from the input file into the uncoded lookahead
//* buffer.
//*********************************************************************** //
//uncodedLookahead[tx] = tex1Dfetch(in_d_tex, bx * PCKTSIZE + tx); //in_d[bx * PCKTSIZE + tx];
uncodedLookahead[tx] = in_d[bx * PCKTSIZE + tx];
filepoint+=MAX_CODED;
slidingWindow[ (windowHead + WINDOW_SIZE ) % (WINDOW_SIZE + MAX_CODED) ] = uncodedLookahead[uncodedHead];
//tex1Dfetch(in_d_tex, bx * PCKTSIZE + tx);//uncodedLookahead[uncodedHead];
__syncthreads();
//uncodedLookahead[MAX_CODED+tx] = tex1Dfetch(in_d_tex, bx * PCKTSIZE + filepoint + tx); //in_d[bx * PCKTSIZE + filepoint + tx];
uncodedLookahead[MAX_CODED+tx] = in_d[bx * PCKTSIZE + filepoint + tx];
filepoint+=MAX_CODED;
__syncthreads();
loadcounter++;
// Look for matching string in sliding window //
matchData = FindMatch(windowHead, uncodedHead,slidingWindow,uncodedLookahead, tx, bx, 0, 0,loadcounter);
__syncthreads();
// now encoded the rest of the file until an EOF is read //
while ((filepoint) <= PCKTSIZE && !lastcheck)
{
if (matchData.length >= MAX_CODED)
{
// garbage beyond last data happened to extend match length //
matchData.length = MAX_CODED-1;
}
if (matchData.length <= MAX_UNCODED)
{
// not long enough match. write uncoded byte //
matchData.length = 1; // set to 1 for 1 byte uncoded //
encodedData[tx*2] = 1;
encodedData[tx*2 + 1] = uncodedLookahead[uncodedHead];
}
else if(matchData.length > MAX_UNCODED)
{
// match length > MAX_UNCODED. Encode as offset and length. //
encodedData[tx*2] = (unsigned char)matchData.length;
encodedData[tx*2+1] = (unsigned char)matchData.offset;
}
//write out the encoded data into output
out_d[bx * PCKTSIZE*2 + wfilepoint + tx*2] = encodedData[tx*2];
out_d[bx * PCKTSIZE*2 + wfilepoint + tx*2 + 1] = encodedData[tx*2+1];
//update written pointer and heads
wfilepoint = wfilepoint + MAX_CODED*2;
windowHead = (windowHead + MAX_CODED) % (WINDOW_SIZE+MAX_CODED);
uncodedHead = (uncodedHead + MAX_CODED) % (MAX_CODED*2);
__syncthreads();
//if(lastcheck==1)
//{
// break;
//}
//if(!lastcheck)
{
if(filepoint<PCKTSIZE){
//uncodedLookahead[(uncodedHead+ MAX_CODED)% (MAX_CODED*2)] = tex1Dfetch(in_d_tex, bx * PCKTSIZE + filepoint + tx);
uncodedLookahead[(uncodedHead+ MAX_CODED)% (MAX_CODED*2)] = in_d[bx * PCKTSIZE + filepoint + tx];
filepoint+=MAX_CODED;
//find the location for the thread specific view of window
slidingWindow[ (windowHead + WINDOW_SIZE ) % (WINDOW_SIZE + MAX_CODED) ] = uncodedLookahead[uncodedHead];
//__syncthreads();
}
else{
lastcheck++;
slidingWindow[(windowHead + MAX_CODED ) % (WINDOW_SIZE+MAX_CODED)] = '^';
}
__syncthreads();
loadcounter++;
matchData = FindMatch(windowHead, uncodedHead,slidingWindow,uncodedLookahead,tx,bx, wfilepoint, lastcheck,loadcounter);
}
} //while
if(lastcheck==1)
{
if(matchData.length > (MAX_CODED - tx))
matchData.length = MAX_CODED - tx;
}
if (matchData.length >= MAX_CODED)
{
// garbage beyond last data happened to extend match length //
matchData.length = MAX_CODED-1;
}
if (matchData.length <= MAX_UNCODED)
{
// not long enough match. write uncoded byte //
matchData.length = 1; // set to 1 for 1 byte uncoded //
encodedData[tx*2] = 1;
encodedData[tx*2 + 1] = uncodedLookahead[uncodedHead];
}
else if(matchData.length > MAX_UNCODED)
{
// match length > MAX_UNCODED. Encode as offset and length. //
encodedData[tx*2] = (unsigned char)matchData.length;
encodedData[tx*2+1] = (unsigned char)matchData.offset;
}
//write out the encoded data into output
out_d[bx * PCKTSIZE*2 + wfilepoint + tx*2] = encodedData[tx*2];
out_d[bx * PCKTSIZE*2 + wfilepoint + tx*2 + 1] = encodedData[tx*2+1];
//update written pointer and heads
wfilepoint = wfilepoint + MAX_CODED*2;
windowHead = (windowHead + MAX_CODED) % (WINDOW_SIZE+MAX_CODED);
uncodedHead = (uncodedHead + MAX_CODED) % (MAX_CODED*2);
}
unsigned char * initGPUmem(int buf_length)
{
unsigned char * mem_d;
hipMalloc((void **) &mem_d, sizeof(char)*buf_length);
checkCUDAError("function, initGPUmemIN, mem alloc to gpu");
return mem_d;
}
unsigned char * initCPUmem(int buf_length)
{
unsigned char * mem_d;
hipHostMalloc((void **) &mem_d, sizeof(char)*buf_length);
checkCUDAError("function, initCPUmemIN, mem alloc to cpu");
return mem_d;
}
void deleteGPUmem(unsigned char * mem_d)
{
hipFree(mem_d);
}
void deleteCPUmem(unsigned char * mem_d)
{
hipHostFree(mem_d);
checkCUDAError("deleteCPUmem func,hipHostFree");
}
void deleteGPUStreams()
{
for (int i = 0; i < nstreams; ++i)
{
hipStreamDestroy(streams[i]);
checkCUDAError("deleteCPUmem func, hipStreamDestroy" + i);
}
}
void initGPU()
{
//hipDeviceReset();
hipSetDevice(0);
//hipSetDeviceFlags(hipDeviceScheduleAuto);
checkCUDAError("initialize GPU");
streams = (hipStream_t*) malloc(nstreams * sizeof(hipStream_t));
for(int i = 0; i < nstreams; i++) {
hipStreamCreate(&(streams[i]));
checkCUDAError("streams created");
}
}
void resetGPU()
{
hipDeviceReset();
}
int streams_in_GPU(){
return true;
}
int onestream_finish_GPU(int index)
{
//hipStreamSynchronize(streams[(index+1)*instreams -1]);
int check = (index+1)*instreams-1;
if (check == instreams * nstreams)
check = check -1;
while(hipStreamQuery(streams[check])!=hipSuccess);
checkCUDAError("cuda stream sync");
return true;
}
int compression_kernel_wrapper(unsigned char *buffer, int buf_length, unsigned char * bufferout, int compression_type,int wsize,\
int numthre, int noop,int index,unsigned char * in_d,unsigned char * out_d)
{
int numThreads = numthre;
int numblocks = (buf_length / (PCKTSIZE*instreams)) + (((buf_length % (PCKTSIZE*instreams))>0)?1:0);
int i=0;
hipFuncSetCacheConfig(EncodeKernel, hipFuncCachePreferL1);//hipFuncCachePreferShared);
for(i = 0; i < instreams; i++)
{
//copy memory to cuda device
hipMemcpyAsync(in_d+ i * (buf_length / instreams), buffer+ i * (buf_length / instreams), \
sizeof(char)*(buf_length / instreams),hipMemcpyHostToDevice, streams[index*instreams + i]);
checkCUDAError("mem copy to gpu");
}
for(i = 0; i < instreams; i++)
{
hipLaunchKernelGGL(( EncodeKernel), dim3(numblocks), dim3(numThreads), 0, streams[index*instreams + i], in_d + i * (buf_length / instreams),\
out_d + 2 * i * (buf_length / instreams),numThreads);
checkCUDAError("kernel invocation"); // Check for any CUDA errors
}
//copy memory back
for(i = 0; i < instreams; i++)
{
hipMemcpyAsync(bufferout + 2 * i * (buf_length / instreams), out_d + 2 * i * (buf_length / instreams),\
sizeof(char)*(buf_length / instreams)*2, hipMemcpyDeviceToHost, streams[index*instreams + i]);
checkCUDAError("mem copy back");
}
return 1;
}
void *aftercomp (void *q)
{
aftercompdata_t * data=(aftercompdata_t *)q;
int i=0, j=0, k=0, m=0, temptot=0, tempj=0;
int finish=0;
unsigned char flags =0;
unsigned char flagPos = 0x01;
unsigned char holdbuf[16];
int holdbufcount=0;
int morecounter=0;
//reset the flags again
flagPos = 0x01;
flags =0;
temptot=0;
holdbufcount=0;
unsigned char * bufferout = data->bufferout;
unsigned char * buffer = data->buffer;
int * header = data->header;
int buf_length = data->buf_length;
i = (data->tid)*((buf_length*2)/(data->numts));
j = (data->tid)*((buf_length)/(data->numts));
k = (data->tid)*(buf_length/(PCKTSIZE*data->numts));
finish = (data->tid + 1)*((buf_length)/(data->numts));
while(i<(finish*2))
{
if (j>finish) {
printf("compression took more, size is %d!!! \n",j);
data->comptookmore = 1;
break;
}
temptot = bufferout[i];
if(temptot == 1) //if no matching
{
flags |= flagPos; // mark with uncoded byte flag //
holdbuf[holdbufcount]=bufferout[i+1];
holdbufcount++;
i=i+2;
}
else //if there is mathcing
{
holdbuf[holdbufcount]=temptot;
holdbufcount++;
holdbuf[holdbufcount]=bufferout[i+1];
holdbufcount++;
i=i+(temptot*2);
}
if (flagPos == 0x80) //if we have looked at 8 characters that fills the flag holder
{
buffer[j] = flags;
j++;
for(m=0;m<holdbufcount;m++){
buffer[j] = holdbuf[m];
j++;
}
// reset encoded data buffer //
flags = 0;
flagPos = 0x01;
holdbufcount=0;
}
else
{
// we don't have 8 code flags yet, use next bit for next flag //
flagPos <<= 1;
}
// for each packet with the size of 4096 bytes
if(i%8192 == 0 && i>0){ //PCKTSIZE*2
if(holdbufcount>0){
buffer[j] = flags;
j++;
for(m=0;m<holdbufcount;m++){
buffer[j] = holdbuf[m];
j++;
}
holdbufcount=0;
}
flags = 0;
flagPos = 0x01;
if((j-tempj) >= PCKTSIZE){
morecounter++;
//compression took more, so just write the file without compression info
}
header[k]=j-tempj;
tempj=j;
k++;
}
}
data->newlen = j - (data->tid)*((buf_length)/(data->numts)) ;
return 0;
}
int aftercompression_wrapper(unsigned char * buffer, int buf_length, unsigned char * bufferout, int * comp_length)
{
int comptookmore = 0;
//struct timeval t1_start,t1_end;
//double alltime;
//gettimeofday(&t1_start,0);
// allocate memory to contain the header of the file:
int * header;
header = (int *)malloc (sizeof(int)*(buf_length/PCKTSIZE));
if (header == NULL) {printf ("Memory error, header"); exit (2);}
pthread_t afcomp[NWORKERS];
aftercompdata_t data[NWORKERS];
int l=0;
for(l=0;l<NWORKERS;l++)
{
data[l].tid=l;
data[l].header=header; /* offset to start of longest match */
data[l].buffer=buffer;
data[l].buf_length=buf_length;
data[l].bufferout=bufferout;
data[l].numts = NWORKERS;
data[l].comptookmore=0;
data[l].newlen=0;
pthread_create (&afcomp[l], NULL, &aftercomp, &data[l]);
}
int i=0, j=0, k=0;//, m=0, temptot=0, tempj=0;
void *status;
for(l=0;l<NWORKERS;l++){
pthread_join( afcomp[l], &status);
comptookmore += data[l].comptookmore;
if(l!=0)
{
for(i=0;i<data[l].newlen;i++)
{
buffer[j+i]=buffer[(l*(buf_length/NWORKERS))+i];
}
}
j+=data[l].newlen;
}
k=(buf_length/PCKTSIZE);
if(!comptookmore){
//Add header to buffer
unsigned char cc;
for(i=0;i<k;i++)
{
cc = (unsigned char)(header[i]>>8);
buffer[j]=cc;
j++;
cc=(unsigned char)header[i];
buffer[j]=cc;
j++;
}
//Add total size
cc = (unsigned char)(buf_length>>24);
buffer[j]=cc;
j++;
cc = (unsigned char)(buf_length>>16);
buffer[j]=cc;
j++;
cc = (unsigned char)(buf_length>>8);
buffer[j]=cc;
j++;
cc=(unsigned char)buf_length;
buffer[j]=cc;
j++;
//Add pad size
int paddingsize = 0;
cc = (unsigned char)(paddingsize>>8);
buffer[j]=cc;
j++;
cc=(unsigned char)paddingsize;
buffer[j]=cc;
j++;
}
if(comptookmore!=0)
return 0;
if(j>buf_length)
printf("compression TOOK more!!! %d\n",j);
*comp_length = j;
free(header);
return 1;
} | fa7bbfc9a534523f8fdfcc060600f575a1156565.cu | /***************************************************************************
* Lempel, Ziv, Storer, and Szymanski Encoding and Decoding on CUDA
*
*
****************************************************************************
* CUDA LZSS
* Authors : Adnan Ozsoy, Martin Swany,Indiana University - Bloomington
* Date : April 11, 2011
****************************************************************************
Copyright 2011 Adnan Ozsoy, Martin Swany, Indiana University - Bloomington
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
****************************************************************************/
/***************************************************************************
* Code is adopted from below source
*
* LZSS: An ANSI C LZss Encoding/Decoding Routine
* Copyright (C) 2003 by Michael Dipperstein ([email protected])
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
***************************************************************************/
/***************************************************************************
* INCLUDED FILES
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "getopt.h"
#include <time.h>
#include "gpu_compress.h"
#include <pthread.h>
#include <unistd.h>
//#include "cuPrintf.cu"
#include <sys/time.h>
/***************************************************************************
* CUDA FILES
***************************************************************************/
#include <assert.h>
#include <cuda.h>
//#include "cuPrintf.cu"
/***************************************************************************
* GLOBAL VARIABLES
***************************************************************************/
//unsigned char * decompressed_buffer;
//unsigned char * init_in_d;
//unsigned char * init_out_d;
texture<unsigned char, 1, cudaReadModeElementType> in_d_tex;
cudaStream_t * streams;
int instreams = 16;
int nstreams = 4*instreams;
/***************************************************************************
* PROTOTYPES
***************************************************************************/
/****************************************************************************
* Function : FindMatch
* Description: This function will search through the slidingWindow
* dictionary for the longest sequence matching the MAX_CODED
* long string stored in uncodedLookahed.
* Parameters : windowHead - head of sliding window
* uncodedHead - head of uncoded lookahead buffer
* Effects : NONE
* Returned : The sliding window index where the match starts and the
* length of the match. If there is no match a length of
* zero will be returned.
****************************************************************************/
__device__ encoded_string_t FindMatch(int windowHead, int uncodedHead, unsigned char* slidingWindow, unsigned char* uncodedLookahead, \
int tx, int bx, int wfilepoint, int lastcheck, int loadcounter)
{
encoded_string_t matchData;
int i, j;
int maxcheck;
int matchingState=0;
int loop=0;
matchData.length = 1; // make it 1 in the 0 case, it will be returned as 1, 0 gives problems
matchData.offset = 1; // make it 1 in the 0 case, it will be returned as 1, 0 gives problems
i = windowHead ; // start at the beginning of the sliding window //
j = 0; //counter for matchings
//if(lastcheck)
maxcheck = MAX_CODED - tx*lastcheck;
//else
// maxcheck = MAX_CODED;
int tempi=0;
while (loop<WINDOW_SIZE)
{
if (slidingWindow[i] == uncodedLookahead[(uncodedHead+j)% (WINDOW_SIZE+MAX_CODED)])
{
j++;
matchingState=1;
}
else
{
if(matchingState && j > matchData.length)
{
matchData.length = j;
tempi=i-j;
if(tempi<0)
tempi+=WINDOW_SIZE+MAX_CODED;
matchData.offset = tempi;
}
j=0;
matchingState=0;
}
i = (i + 1) % (WINDOW_SIZE+MAX_CODED);
loop++;
if (loop >= maxcheck-1)
{
/// we wrapped around ///
loop = WINDOW_SIZE; //break;
}
}
if(j > matchData.length && matchingState )
{
matchData.length = j;
tempi=i-j;
if(tempi<0)
tempi+=WINDOW_SIZE+MAX_CODED;
matchData.offset = tempi;
}
return matchData;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
__global__ void EncodeKernel(unsigned char * in_d, unsigned char * out_d, int SIZEBLOCK)
{
/* cyclic buffer sliding window of already read characters */
__shared__ unsigned char slidingWindow[WINDOW_SIZE+(MAX_CODED)];
__shared__ unsigned char uncodedLookahead[MAX_CODED*2];
__shared__ unsigned char encodedData[MAX_CODED*2];
encoded_string_t matchData;
int windowHead, uncodedHead; // head of sliding window and lookahead //
int filepoint; //file index pointer for reading
int wfilepoint; //file index pointer for writing
int lastcheck; //flag for last run of the packet
int loadcounter=0;
int bx = blockIdx.x;
int tx = threadIdx.x;
//***********************************************************************
// * Fill the sliding window buffer with some known values. DecodeLZSS must
// * use the same values. If common characters are used, there's an
// * increased chance of matching to the earlier strings.
// *********************************************************************** //
slidingWindow[tx] = ' ';
windowHead = tx;
uncodedHead = tx;
filepoint=0;
wfilepoint=0;
lastcheck=0;
__syncthreads();
//***********************************************************************
//* Copy MAX_CODED bytes from the input file into the uncoded lookahead
//* buffer.
//*********************************************************************** //
//uncodedLookahead[tx] = tex1Dfetch(in_d_tex, bx * PCKTSIZE + tx); //in_d[bx * PCKTSIZE + tx];
uncodedLookahead[tx] = in_d[bx * PCKTSIZE + tx];
filepoint+=MAX_CODED;
slidingWindow[ (windowHead + WINDOW_SIZE ) % (WINDOW_SIZE + MAX_CODED) ] = uncodedLookahead[uncodedHead];
//tex1Dfetch(in_d_tex, bx * PCKTSIZE + tx);//uncodedLookahead[uncodedHead];
__syncthreads();
//uncodedLookahead[MAX_CODED+tx] = tex1Dfetch(in_d_tex, bx * PCKTSIZE + filepoint + tx); //in_d[bx * PCKTSIZE + filepoint + tx];
uncodedLookahead[MAX_CODED+tx] = in_d[bx * PCKTSIZE + filepoint + tx];
filepoint+=MAX_CODED;
__syncthreads();
loadcounter++;
// Look for matching string in sliding window //
matchData = FindMatch(windowHead, uncodedHead,slidingWindow,uncodedLookahead, tx, bx, 0, 0,loadcounter);
__syncthreads();
// now encoded the rest of the file until an EOF is read //
while ((filepoint) <= PCKTSIZE && !lastcheck)
{
if (matchData.length >= MAX_CODED)
{
// garbage beyond last data happened to extend match length //
matchData.length = MAX_CODED-1;
}
if (matchData.length <= MAX_UNCODED)
{
// not long enough match. write uncoded byte //
matchData.length = 1; // set to 1 for 1 byte uncoded //
encodedData[tx*2] = 1;
encodedData[tx*2 + 1] = uncodedLookahead[uncodedHead];
}
else if(matchData.length > MAX_UNCODED)
{
// match length > MAX_UNCODED. Encode as offset and length. //
encodedData[tx*2] = (unsigned char)matchData.length;
encodedData[tx*2+1] = (unsigned char)matchData.offset;
}
//write out the encoded data into output
out_d[bx * PCKTSIZE*2 + wfilepoint + tx*2] = encodedData[tx*2];
out_d[bx * PCKTSIZE*2 + wfilepoint + tx*2 + 1] = encodedData[tx*2+1];
//update written pointer and heads
wfilepoint = wfilepoint + MAX_CODED*2;
windowHead = (windowHead + MAX_CODED) % (WINDOW_SIZE+MAX_CODED);
uncodedHead = (uncodedHead + MAX_CODED) % (MAX_CODED*2);
__syncthreads();
//if(lastcheck==1)
//{
// break;
//}
//if(!lastcheck)
{
if(filepoint<PCKTSIZE){
//uncodedLookahead[(uncodedHead+ MAX_CODED)% (MAX_CODED*2)] = tex1Dfetch(in_d_tex, bx * PCKTSIZE + filepoint + tx);
uncodedLookahead[(uncodedHead+ MAX_CODED)% (MAX_CODED*2)] = in_d[bx * PCKTSIZE + filepoint + tx];
filepoint+=MAX_CODED;
//find the location for the thread specific view of window
slidingWindow[ (windowHead + WINDOW_SIZE ) % (WINDOW_SIZE + MAX_CODED) ] = uncodedLookahead[uncodedHead];
//__syncthreads();
}
else{
lastcheck++;
slidingWindow[(windowHead + MAX_CODED ) % (WINDOW_SIZE+MAX_CODED)] = '^';
}
__syncthreads();
loadcounter++;
matchData = FindMatch(windowHead, uncodedHead,slidingWindow,uncodedLookahead,tx,bx, wfilepoint, lastcheck,loadcounter);
}
} //while
if(lastcheck==1)
{
if(matchData.length > (MAX_CODED - tx))
matchData.length = MAX_CODED - tx;
}
if (matchData.length >= MAX_CODED)
{
// garbage beyond last data happened to extend match length //
matchData.length = MAX_CODED-1;
}
if (matchData.length <= MAX_UNCODED)
{
// not long enough match. write uncoded byte //
matchData.length = 1; // set to 1 for 1 byte uncoded //
encodedData[tx*2] = 1;
encodedData[tx*2 + 1] = uncodedLookahead[uncodedHead];
}
else if(matchData.length > MAX_UNCODED)
{
// match length > MAX_UNCODED. Encode as offset and length. //
encodedData[tx*2] = (unsigned char)matchData.length;
encodedData[tx*2+1] = (unsigned char)matchData.offset;
}
//write out the encoded data into output
out_d[bx * PCKTSIZE*2 + wfilepoint + tx*2] = encodedData[tx*2];
out_d[bx * PCKTSIZE*2 + wfilepoint + tx*2 + 1] = encodedData[tx*2+1];
//update written pointer and heads
wfilepoint = wfilepoint + MAX_CODED*2;
windowHead = (windowHead + MAX_CODED) % (WINDOW_SIZE+MAX_CODED);
uncodedHead = (uncodedHead + MAX_CODED) % (MAX_CODED*2);
}
unsigned char * initGPUmem(int buf_length)
{
unsigned char * mem_d;
cudaMalloc((void **) &mem_d, sizeof(char)*buf_length);
checkCUDAError("function, initGPUmemIN, mem alloc to gpu");
return mem_d;
}
unsigned char * initCPUmem(int buf_length)
{
unsigned char * mem_d;
cudaMallocHost((void **) &mem_d, sizeof(char)*buf_length);
checkCUDAError("function, initCPUmemIN, mem alloc to cpu");
return mem_d;
}
void deleteGPUmem(unsigned char * mem_d)
{
cudaFree(mem_d);
}
void deleteCPUmem(unsigned char * mem_d)
{
cudaFreeHost(mem_d);
checkCUDAError("deleteCPUmem func,cudaFreeHost");
}
void deleteGPUStreams()
{
for (int i = 0; i < nstreams; ++i)
{
cudaStreamDestroy(streams[i]);
checkCUDAError("deleteCPUmem func, cudaStreamDestroy" + i);
}
}
void initGPU()
{
//cudaDeviceReset();
cudaSetDevice(0);
//cudaSetDeviceFlags(cudaDeviceScheduleAuto);
checkCUDAError("initialize GPU");
streams = (cudaStream_t*) malloc(nstreams * sizeof(cudaStream_t));
for(int i = 0; i < nstreams; i++) {
cudaStreamCreate(&(streams[i]));
checkCUDAError("streams created");
}
}
void resetGPU()
{
cudaDeviceReset();
}
int streams_in_GPU(){
return true;
}
int onestream_finish_GPU(int index)
{
//cudaStreamSynchronize(streams[(index+1)*instreams -1]);
int check = (index+1)*instreams-1;
if (check == instreams * nstreams)
check = check -1;
while(cudaStreamQuery(streams[check])!=cudaSuccess);
checkCUDAError("cuda stream sync");
return true;
}
int compression_kernel_wrapper(unsigned char *buffer, int buf_length, unsigned char * bufferout, int compression_type,int wsize,\
int numthre, int noop,int index,unsigned char * in_d,unsigned char * out_d)
{
int numThreads = numthre;
int numblocks = (buf_length / (PCKTSIZE*instreams)) + (((buf_length % (PCKTSIZE*instreams))>0)?1:0);
int i=0;
cudaFuncSetCacheConfig(EncodeKernel, cudaFuncCachePreferL1);//cudaFuncCachePreferShared);
for(i = 0; i < instreams; i++)
{
//copy memory to cuda device
cudaMemcpyAsync(in_d+ i * (buf_length / instreams), buffer+ i * (buf_length / instreams), \
sizeof(char)*(buf_length / instreams),cudaMemcpyHostToDevice, streams[index*instreams + i]);
checkCUDAError("mem copy to gpu");
}
for(i = 0; i < instreams; i++)
{
EncodeKernel<<< numblocks, numThreads, 0, streams[index*instreams + i]>>>(in_d + i * (buf_length / instreams),\
out_d + 2 * i * (buf_length / instreams),numThreads);
checkCUDAError("kernel invocation"); // Check for any CUDA errors
}
//copy memory back
for(i = 0; i < instreams; i++)
{
cudaMemcpyAsync(bufferout + 2 * i * (buf_length / instreams), out_d + 2 * i * (buf_length / instreams),\
sizeof(char)*(buf_length / instreams)*2, cudaMemcpyDeviceToHost, streams[index*instreams + i]);
checkCUDAError("mem copy back");
}
return 1;
}
void *aftercomp (void *q)
{
aftercompdata_t * data=(aftercompdata_t *)q;
int i=0, j=0, k=0, m=0, temptot=0, tempj=0;
int finish=0;
unsigned char flags =0;
unsigned char flagPos = 0x01;
unsigned char holdbuf[16];
int holdbufcount=0;
int morecounter=0;
//reset the flags again
flagPos = 0x01;
flags =0;
temptot=0;
holdbufcount=0;
unsigned char * bufferout = data->bufferout;
unsigned char * buffer = data->buffer;
int * header = data->header;
int buf_length = data->buf_length;
i = (data->tid)*((buf_length*2)/(data->numts));
j = (data->tid)*((buf_length)/(data->numts));
k = (data->tid)*(buf_length/(PCKTSIZE*data->numts));
finish = (data->tid + 1)*((buf_length)/(data->numts));
while(i<(finish*2))
{
if (j>finish) {
printf("compression took more, size is %d!!! \n",j);
data->comptookmore = 1;
break;
}
temptot = bufferout[i];
if(temptot == 1) //if no matching
{
flags |= flagPos; // mark with uncoded byte flag //
holdbuf[holdbufcount]=bufferout[i+1];
holdbufcount++;
i=i+2;
}
else //if there is mathcing
{
holdbuf[holdbufcount]=temptot;
holdbufcount++;
holdbuf[holdbufcount]=bufferout[i+1];
holdbufcount++;
i=i+(temptot*2);
}
if (flagPos == 0x80) //if we have looked at 8 characters that fills the flag holder
{
buffer[j] = flags;
j++;
for(m=0;m<holdbufcount;m++){
buffer[j] = holdbuf[m];
j++;
}
// reset encoded data buffer //
flags = 0;
flagPos = 0x01;
holdbufcount=0;
}
else
{
// we don't have 8 code flags yet, use next bit for next flag //
flagPos <<= 1;
}
// for each packet with the size of 4096 bytes
if(i%8192 == 0 && i>0){ //PCKTSIZE*2
if(holdbufcount>0){
buffer[j] = flags;
j++;
for(m=0;m<holdbufcount;m++){
buffer[j] = holdbuf[m];
j++;
}
holdbufcount=0;
}
flags = 0;
flagPos = 0x01;
if((j-tempj) >= PCKTSIZE){
morecounter++;
//compression took more, so just write the file without compression info
}
header[k]=j-tempj;
tempj=j;
k++;
}
}
data->newlen = j - (data->tid)*((buf_length)/(data->numts)) ;
return 0;
}
int aftercompression_wrapper(unsigned char * buffer, int buf_length, unsigned char * bufferout, int * comp_length)
{
int comptookmore = 0;
//struct timeval t1_start,t1_end;
//double alltime;
//gettimeofday(&t1_start,0);
// allocate memory to contain the header of the file:
int * header;
header = (int *)malloc (sizeof(int)*(buf_length/PCKTSIZE));
if (header == NULL) {printf ("Memory error, header"); exit (2);}
pthread_t afcomp[NWORKERS];
aftercompdata_t data[NWORKERS];
int l=0;
for(l=0;l<NWORKERS;l++)
{
data[l].tid=l;
data[l].header=header; /* offset to start of longest match */
data[l].buffer=buffer;
data[l].buf_length=buf_length;
data[l].bufferout=bufferout;
data[l].numts = NWORKERS;
data[l].comptookmore=0;
data[l].newlen=0;
pthread_create (&afcomp[l], NULL, &aftercomp, &data[l]);
}
int i=0, j=0, k=0;//, m=0, temptot=0, tempj=0;
void *status;
for(l=0;l<NWORKERS;l++){
pthread_join( afcomp[l], &status);
comptookmore += data[l].comptookmore;
if(l!=0)
{
for(i=0;i<data[l].newlen;i++)
{
buffer[j+i]=buffer[(l*(buf_length/NWORKERS))+i];
}
}
j+=data[l].newlen;
}
k=(buf_length/PCKTSIZE);
if(!comptookmore){
//Add header to buffer
unsigned char cc;
for(i=0;i<k;i++)
{
cc = (unsigned char)(header[i]>>8);
buffer[j]=cc;
j++;
cc=(unsigned char)header[i];
buffer[j]=cc;
j++;
}
//Add total size
cc = (unsigned char)(buf_length>>24);
buffer[j]=cc;
j++;
cc = (unsigned char)(buf_length>>16);
buffer[j]=cc;
j++;
cc = (unsigned char)(buf_length>>8);
buffer[j]=cc;
j++;
cc=(unsigned char)buf_length;
buffer[j]=cc;
j++;
//Add pad size
int paddingsize = 0;
cc = (unsigned char)(paddingsize>>8);
buffer[j]=cc;
j++;
cc=(unsigned char)paddingsize;
buffer[j]=cc;
j++;
}
if(comptookmore!=0)
return 0;
if(j>buf_length)
printf("compression TOOK more!!! %d\n",j);
*comp_length = j;
free(header);
return 1;
} |
5adfe227724c83aab360010413bb6c29a226ec8e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* \file broadcast.cu
* \brief The broadcast utils
*/
#include "blaze/math/broadcast.h"
#include "blaze/math/elementwise/broadcast_elementwise.h"
#include "blaze/math/elementwise/gpu_kernel_launcher.h"
#include "blaze/math/elementwise/elementwise_kernel.h"
namespace blaze {
template <typename DType, typename OP>
__global__ void UBroadcastUnaryDimEqualKernel(DType* y, TIndex y_size,
const DType* x, TIndex x_size) {
CUDA_KERNEL_LOOP(index, y_size) {
y[index] = OP::Map(x[index % x_size], y[index]);
}
}
template <typename DType, typename OP>
static void UBroadcastUnaryDimEqual(DType* y,
const std::vector<TIndex>& y_shape,
const DType* x,
const std::vector<TIndex>& x_shape,
CUDAContext* ctx) {
TIndex y_size = 1;
for (auto dim : y_shape) y_size *= dim;
TIndex x_size = 1;
for (auto dim : x_shape) x_size *= dim;
int thread_num = GetThreadsNum(y_size);
int block_num = GetBlockNum(CUDA_GET_BLOCKS(y_size, thread_num));
hipStream_t stream = ctx->cuda_stream();
hipLaunchKernelGGL(( UBroadcastUnaryDimEqualKernel<DType, OP>), dim3(block_num), dim3(thread_num), 0, stream,
y, y_size, x, x_size);
}
#ifndef INSTANTIATE_BROADCAST_UNARY
#define INSTANTIATE_BROADCAST_UNARY(FuncName, OP, T) \
template <> \
void FuncName<T, CUDAContext>(T* y, \
const std::vector<TIndex>& y_shape, \
const T* x, \
const std::vector<TIndex>& x_shape, \
CUDAContext* ctx) { \
UBroadcastUnaryDimEqual<T, OP>(y, y_shape, x, x_shape, ctx); \
}
#endif
// INSTANTIATE Broadcast ASSIGN
INSTANTIATE_BROADCAST_UNARY(DimEqualBroadcastAssign, broadcast::Assign, float16)
INSTANTIATE_BROADCAST_UNARY(DimEqualBroadcastAssign, broadcast::Assign, float)
INSTANTIATE_BROADCAST_UNARY(DimEqualBroadcastAssign, broadcast::Assign, double)
INSTANTIATE_BROADCAST_UNARY(DimEqualBroadcastFMA, broadcast::Sum, float16)
INSTANTIATE_BROADCAST_UNARY(DimEqualBroadcastFMA, broadcast::Sum, float)
INSTANTIATE_BROADCAST_UNARY(DimEqualBroadcastFMA, broadcast::Sum, double)
#undef INSTANTIATE_BROADCAST_UNARY
template <typename DType, typename OP>
__global__ void BatchedUBroadcastUnaryDimEqualKernel(DType* y, int batch_count, TIndex y_size,
const DType* x, TIndex x_size) {
TIndex total_y_size = batch_count * y_size;
CUDA_KERNEL_LOOP(index, total_y_size) {
int batch_index = index / y_size;
int batch_offset = index % y_size;
y[index] = OP::Map(x[batch_index * x_size + batch_offset % x_size], y[index]);
}
}
template <typename DType, typename OP>
static void BacthedUBroadcastUnaryDimEqual(DType* y,
int batch_count,
const std::vector<TIndex>& y_shape,
const DType* x,
const std::vector<TIndex>& x_shape,
CUDAContext* ctx) {
TIndex y_size = 1;
for (auto dim : y_shape) y_size *= dim;
TIndex x_size = 1;
for (auto dim : x_shape) x_size *= dim;
int thread_num = GetThreadsNum(y_size * batch_count);
int block_num = GetBlockNum(CUDA_GET_BLOCKS(y_size * batch_count, thread_num));
hipStream_t stream = ctx->cuda_stream();
hipLaunchKernelGGL(( BatchedUBroadcastUnaryDimEqualKernel<DType, OP>), dim3(block_num), dim3(thread_num), 0, stream,
y, batch_count, y_size, x, x_size);
}
#ifndef INSTANTIATE_BATCHED_BROADCAST_UNARY
#define INSTANTIATE_BATCHED_BROADCAST_UNARY(FuncName, OP, T) \
template <> \
void FuncName<T, CUDAContext>(T* y, \
int batch_count, \
const std::vector<TIndex>& y_shape, \
const T* x, \
const std::vector<TIndex>& x_shape, \
CUDAContext* ctx) { \
BacthedUBroadcastUnaryDimEqual<T, OP>(y, batch_count, y_shape, x, x_shape, ctx); \
}
#endif
// INSTANTIATE Batched Broadcast UNARY
INSTANTIATE_BATCHED_BROADCAST_UNARY(DimEqualBatchedBroadcastAssign, broadcast::Assign, float16)
INSTANTIATE_BATCHED_BROADCAST_UNARY(DimEqualBatchedBroadcastAssign, broadcast::Assign, float)
INSTANTIATE_BATCHED_BROADCAST_UNARY(DimEqualBatchedBroadcastAssign, broadcast::Assign, double)
#undef INSTANTIATE_BATCHED_BROADCAST_UNARY
// INSTANTIATE Broadcast GEMM
INSTANTIATE_BROADCAST_GEMM(float16, CUDAContext)
INSTANTIATE_BROADCAST_GEMM(float, CUDAContext)
INSTANTIATE_BROADCAST_GEMM(double, CUDAContext)
// INSTANTIATE Batched Broadcast GEMM
INSTANTIATE_BATCHED_BROADCAST_GEMM(float16, CUDAContext)
INSTANTIATE_BATCHED_BROADCAST_GEMM(float, CUDAContext)
INSTANTIATE_BATCHED_BROADCAST_GEMM(double, CUDAContext)
template <typename DType, typename OP>
static void BatchedBroadcastElementwise(const DType* a,
const std::vector<TIndex>& a_shape,
const DType* b,
const std::vector<TIndex>& b_shape,
DType* c,
const std::vector<TIndex>& c_shape,
int batch_count,
CUDAContext* ctx) {
std::vector<TIndex> ba_shape;
std::vector<TIndex> bb_shape;
std::vector<TIndex> bc_shape;
ba_shape.push_back(batch_count);
for (size_t k = 0; k < ::max(a_shape.size(), b_shape.size()) - a_shape.size(); ++k) {
ba_shape.push_back(1UL);
}
for (auto dim : a_shape) ba_shape.push_back(dim);
bb_shape.push_back(batch_count);
for (size_t k = 0; k < ::max(a_shape.size(), b_shape.size()) - b_shape.size(); ++k) {
bb_shape.push_back(1UL);
}
for (auto dim : b_shape) bb_shape.push_back(dim);
bc_shape.push_back(batch_count);
for (auto dim : c_shape) bc_shape.push_back(dim);
bool res = broadcast::BroadcastCompute<DType, OP, GpuKernelLauncher, CUDAContext>(a, ba_shape, b, bb_shape, c, bc_shape, *ctx);
BLAZE_CONDITION_THROW(res, "broadcast compute failed");
}
#ifndef INSTANTIATE_BATCHED_BROADCAST_ELEMENTWISE
#define INSTANTIATE_BATCHED_BROADCAST_ELEMENTWISE(FuncName, OP, T) \
template <> \
void FuncName<T, CUDAContext>(const T* a, \
TIndex lda, \
const std::vector<TIndex>& a_shape, \
const T* b, \
TIndex ldb, \
const std::vector<TIndex>& b_shape, \
T* c, \
TIndex ldc, \
const std::vector<TIndex>& c_shape, \
int batch_count, \
CUDAContext* context) { \
BatchedBroadcastElementwise<T, OP>(a, a_shape, b, b_shape, c, c_shape, batch_count, context); \
}
#endif
// INSTANTIATE Batched Broadcast Mul
INSTANTIATE_BATCHED_BROADCAST_ELEMENTWISE(BatchedBroadcastMul, broadcast::Mul, float16)
INSTANTIATE_BATCHED_BROADCAST_ELEMENTWISE(BatchedBroadcastMul, broadcast::Mul, float)
INSTANTIATE_BATCHED_BROADCAST_ELEMENTWISE(BatchedBroadcastMul, broadcast::Mul, double)
#undef INSTANTIATE_BATCHED_BROADCAST_ELEMENTWISE
} // namespace blaze
| 5adfe227724c83aab360010413bb6c29a226ec8e.cu | /*
* \file broadcast.cu
* \brief The broadcast utils
*/
#include "blaze/math/broadcast.h"
#include "blaze/math/elementwise/broadcast_elementwise.h"
#include "blaze/math/elementwise/gpu_kernel_launcher.h"
#include "blaze/math/elementwise/elementwise_kernel.h"
namespace blaze {
template <typename DType, typename OP>
__global__ void UBroadcastUnaryDimEqualKernel(DType* y, TIndex y_size,
const DType* x, TIndex x_size) {
CUDA_KERNEL_LOOP(index, y_size) {
y[index] = OP::Map(x[index % x_size], y[index]);
}
}
template <typename DType, typename OP>
static void UBroadcastUnaryDimEqual(DType* y,
const std::vector<TIndex>& y_shape,
const DType* x,
const std::vector<TIndex>& x_shape,
CUDAContext* ctx) {
TIndex y_size = 1;
for (auto dim : y_shape) y_size *= dim;
TIndex x_size = 1;
for (auto dim : x_shape) x_size *= dim;
int thread_num = GetThreadsNum(y_size);
int block_num = GetBlockNum(CUDA_GET_BLOCKS(y_size, thread_num));
cudaStream_t stream = ctx->cuda_stream();
UBroadcastUnaryDimEqualKernel<DType, OP><<<block_num, thread_num, 0, stream>>>
(y, y_size, x, x_size);
}
#ifndef INSTANTIATE_BROADCAST_UNARY
#define INSTANTIATE_BROADCAST_UNARY(FuncName, OP, T) \
template <> \
void FuncName<T, CUDAContext>(T* y, \
const std::vector<TIndex>& y_shape, \
const T* x, \
const std::vector<TIndex>& x_shape, \
CUDAContext* ctx) { \
UBroadcastUnaryDimEqual<T, OP>(y, y_shape, x, x_shape, ctx); \
}
#endif
// INSTANTIATE Broadcast ASSIGN
INSTANTIATE_BROADCAST_UNARY(DimEqualBroadcastAssign, broadcast::Assign, float16)
INSTANTIATE_BROADCAST_UNARY(DimEqualBroadcastAssign, broadcast::Assign, float)
INSTANTIATE_BROADCAST_UNARY(DimEqualBroadcastAssign, broadcast::Assign, double)
INSTANTIATE_BROADCAST_UNARY(DimEqualBroadcastFMA, broadcast::Sum, float16)
INSTANTIATE_BROADCAST_UNARY(DimEqualBroadcastFMA, broadcast::Sum, float)
INSTANTIATE_BROADCAST_UNARY(DimEqualBroadcastFMA, broadcast::Sum, double)
#undef INSTANTIATE_BROADCAST_UNARY
template <typename DType, typename OP>
__global__ void BatchedUBroadcastUnaryDimEqualKernel(DType* y, int batch_count, TIndex y_size,
const DType* x, TIndex x_size) {
TIndex total_y_size = batch_count * y_size;
CUDA_KERNEL_LOOP(index, total_y_size) {
int batch_index = index / y_size;
int batch_offset = index % y_size;
y[index] = OP::Map(x[batch_index * x_size + batch_offset % x_size], y[index]);
}
}
template <typename DType, typename OP>
static void BacthedUBroadcastUnaryDimEqual(DType* y,
int batch_count,
const std::vector<TIndex>& y_shape,
const DType* x,
const std::vector<TIndex>& x_shape,
CUDAContext* ctx) {
TIndex y_size = 1;
for (auto dim : y_shape) y_size *= dim;
TIndex x_size = 1;
for (auto dim : x_shape) x_size *= dim;
int thread_num = GetThreadsNum(y_size * batch_count);
int block_num = GetBlockNum(CUDA_GET_BLOCKS(y_size * batch_count, thread_num));
cudaStream_t stream = ctx->cuda_stream();
BatchedUBroadcastUnaryDimEqualKernel<DType, OP><<<block_num, thread_num, 0, stream>>>
(y, batch_count, y_size, x, x_size);
}
#ifndef INSTANTIATE_BATCHED_BROADCAST_UNARY
#define INSTANTIATE_BATCHED_BROADCAST_UNARY(FuncName, OP, T) \
template <> \
void FuncName<T, CUDAContext>(T* y, \
int batch_count, \
const std::vector<TIndex>& y_shape, \
const T* x, \
const std::vector<TIndex>& x_shape, \
CUDAContext* ctx) { \
BacthedUBroadcastUnaryDimEqual<T, OP>(y, batch_count, y_shape, x, x_shape, ctx); \
}
#endif
// INSTANTIATE Batched Broadcast UNARY
INSTANTIATE_BATCHED_BROADCAST_UNARY(DimEqualBatchedBroadcastAssign, broadcast::Assign, float16)
INSTANTIATE_BATCHED_BROADCAST_UNARY(DimEqualBatchedBroadcastAssign, broadcast::Assign, float)
INSTANTIATE_BATCHED_BROADCAST_UNARY(DimEqualBatchedBroadcastAssign, broadcast::Assign, double)
#undef INSTANTIATE_BATCHED_BROADCAST_UNARY
// INSTANTIATE Broadcast GEMM
INSTANTIATE_BROADCAST_GEMM(float16, CUDAContext)
INSTANTIATE_BROADCAST_GEMM(float, CUDAContext)
INSTANTIATE_BROADCAST_GEMM(double, CUDAContext)
// INSTANTIATE Batched Broadcast GEMM
INSTANTIATE_BATCHED_BROADCAST_GEMM(float16, CUDAContext)
INSTANTIATE_BATCHED_BROADCAST_GEMM(float, CUDAContext)
INSTANTIATE_BATCHED_BROADCAST_GEMM(double, CUDAContext)
template <typename DType, typename OP>
static void BatchedBroadcastElementwise(const DType* a,
const std::vector<TIndex>& a_shape,
const DType* b,
const std::vector<TIndex>& b_shape,
DType* c,
const std::vector<TIndex>& c_shape,
int batch_count,
CUDAContext* ctx) {
std::vector<TIndex> ba_shape;
std::vector<TIndex> bb_shape;
std::vector<TIndex> bc_shape;
ba_shape.push_back(batch_count);
for (size_t k = 0; k < std::max(a_shape.size(), b_shape.size()) - a_shape.size(); ++k) {
ba_shape.push_back(1UL);
}
for (auto dim : a_shape) ba_shape.push_back(dim);
bb_shape.push_back(batch_count);
for (size_t k = 0; k < std::max(a_shape.size(), b_shape.size()) - b_shape.size(); ++k) {
bb_shape.push_back(1UL);
}
for (auto dim : b_shape) bb_shape.push_back(dim);
bc_shape.push_back(batch_count);
for (auto dim : c_shape) bc_shape.push_back(dim);
bool res = broadcast::BroadcastCompute<DType, OP, GpuKernelLauncher, CUDAContext>(a, ba_shape, b, bb_shape, c, bc_shape, *ctx);
BLAZE_CONDITION_THROW(res, "broadcast compute failed");
}
#ifndef INSTANTIATE_BATCHED_BROADCAST_ELEMENTWISE
#define INSTANTIATE_BATCHED_BROADCAST_ELEMENTWISE(FuncName, OP, T) \
template <> \
void FuncName<T, CUDAContext>(const T* a, \
TIndex lda, \
const std::vector<TIndex>& a_shape, \
const T* b, \
TIndex ldb, \
const std::vector<TIndex>& b_shape, \
T* c, \
TIndex ldc, \
const std::vector<TIndex>& c_shape, \
int batch_count, \
CUDAContext* context) { \
BatchedBroadcastElementwise<T, OP>(a, a_shape, b, b_shape, c, c_shape, batch_count, context); \
}
#endif
// INSTANTIATE Batched Broadcast Mul
INSTANTIATE_BATCHED_BROADCAST_ELEMENTWISE(BatchedBroadcastMul, broadcast::Mul, float16)
INSTANTIATE_BATCHED_BROADCAST_ELEMENTWISE(BatchedBroadcastMul, broadcast::Mul, float)
INSTANTIATE_BATCHED_BROADCAST_ELEMENTWISE(BatchedBroadcastMul, broadcast::Mul, double)
#undef INSTANTIATE_BATCHED_BROADCAST_ELEMENTWISE
} // namespace blaze
|
cc7c2266f2e222076b33ee265f45a10d44bd0d97.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
// Device code
__global__ void VecAdd(float* A, float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
float *initarray(float *a, int N, float value) {
int i;
for (i=0; i<N; i++)
a[i] = drand48()*value;
return a;
}
void printarray(float *a, int N) {
int i;
for (i=0; i<N; i++) {
printf("%f ", a[i]);
printf("\n");
}
}
// Host code
int main(int argc, char **argv)
{
int N = atoi(argv[1]);
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
// Initialize input vectors
initarray(h_A, N, 10);
initarray(h_B, N, 10);
#ifdef DEBUG
printf("h_a:\n");
printarray(h_A, N);
printf("h_b:\n");
printarray(h_B, N);
#endif
// Allocate vectors in device memory
float* d_A;
hipMalloc(&d_A, size);
float* d_B;
hipMalloc(&d_B, size);
float* d_C;
hipMalloc(&d_C, size);
// Copy vectors from host memory to device memory
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid =
(N + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N);
// Copy result from device memory to host memory
// h_C contains the result in host memory
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
#ifdef DEBUG
// print result
printf("h_c:\n");
printarray(h_C, N);
#endif
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// Free host memory
free(h_A);
free(h_B);
free(h_C);
}
| cc7c2266f2e222076b33ee265f45a10d44bd0d97.cu | #include <stdio.h>
#include <stdlib.h>
// Device code
__global__ void VecAdd(float* A, float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
float *initarray(float *a, int N, float value) {
int i;
for (i=0; i<N; i++)
a[i] = drand48()*value;
return a;
}
void printarray(float *a, int N) {
int i;
for (i=0; i<N; i++) {
printf("%f ", a[i]);
printf("\n");
}
}
// Host code
int main(int argc, char **argv)
{
int N = atoi(argv[1]);
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
// Initialize input vectors
initarray(h_A, N, 10);
initarray(h_B, N, 10);
#ifdef DEBUG
printf("h_a:\n");
printarray(h_A, N);
printf("h_b:\n");
printarray(h_B, N);
#endif
// Allocate vectors in device memory
float* d_A;
cudaMalloc(&d_A, size);
float* d_B;
cudaMalloc(&d_B, size);
float* d_C;
cudaMalloc(&d_C, size);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid =
(N + threadsPerBlock - 1) / threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
// Copy result from device memory to host memory
// h_C contains the result in host memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
#ifdef DEBUG
// print result
printf("h_c:\n");
printarray(h_C, N);
#endif
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Free host memory
free(h_A);
free(h_B);
free(h_C);
}
|
baf5b0071ac90cf0fd3ddfbc46833c982f2214a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//==========================================================================
// This file has been automatically generated for C++ Standalone by
// MadGraph5_aMC@NLO v. 2.7.3.py3, 2020-06-28
// By the MadGraph5_aMC@NLO Development Team
// Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch
//==========================================================================
#include "HelAmps_sm.h"
#include <complex>
#include <cmath>
#include <iostream>
#include <cstdlib>
#include <thrust/complex.h>
using namespace std;
namespace MG5_sm
{
__device__ void ixxxxx(double pvec[3], double fmass, int nhel, int nsf,
thrust::complex<double> fi[6])
{
thrust::complex<double> chi[2];
double sf[2], sfomega[2], omega[2], pp, pp3, sqp0p3, sqm[2];
int ip, im, nh;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + fmass * fmass);
fi[0] = thrust::complex<double> (-p[0] * nsf, -p[3] * nsf);
fi[1] = thrust::complex<double> (-p[1] * nsf, -p[2] * nsf);
nh = nhel * nsf;
if (fmass != 0.0)
{
pp = min(p[0], sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3]));
if (pp == 0.0)
{
sqm[0] = sqrt(std::abs(fmass));
sqm[1] = (fmass < 0) ? - abs(sqm[0]) : abs(sqm[0]);
ip = (1 + nh)/2;
im = (1 - nh)/2;
fi[2] = ip * sqm[ip];
fi[3] = im * nsf * sqm[ip];
fi[4] = ip * nsf * sqm[im];
fi[5] = im * sqm[im];
}
else
{
sf[0] = (1 + nsf + (1 - nsf) * nh) * 0.5;
sf[1] = (1 + nsf - (1 - nsf) * nh) * 0.5;
omega[0] = sqrt(p[0] + pp);
omega[1] = fmass/omega[0];
ip = (1 + nh)/2;
im = (1 - nh)/2;
sfomega[0] = sf[0] * omega[ip];
sfomega[1] = sf[1] * omega[im];
pp3 = max(pp + p[3], 0.0);
chi[0] = thrust::complex<double> (sqrt(pp3 * 0.5/pp), 0);
if (pp3 == 0.0)
{
chi[1] = thrust::complex<double> (-nh, 0);
}
else
{
chi[1] =
thrust::complex<double> (nh * p[1], p[2])/sqrt(2.0 * pp * pp3);
}
fi[2] = sfomega[0] * chi[im];
fi[3] = sfomega[0] * chi[ip];
fi[4] = sfomega[1] * chi[im];
fi[5] = sfomega[1] * chi[ip];
}
}
else
{
if (p[1] == 0.0 and p[2] == 0.0 and p[3] < 0.0)
{
sqp0p3 = 0.0;
}
else
{
sqp0p3 = sqrt(max(p[0] + p[3], 0.0)) * nsf;
}
chi[0] = thrust::complex<double> (sqp0p3, 0.0);
if (sqp0p3 == 0.0)
{
chi[1] = thrust::complex<double> (-nhel * sqrt(2.0 * p[0]), 0.0);
}
else
{
chi[1] = thrust::complex<double> (nh * p[1], p[2])/sqp0p3;
}
if (nh == 1)
{
fi[2] = thrust::complex<double> (0.0, 0.0);
fi[3] = thrust::complex<double> (0.0, 0.0);
fi[4] = chi[0];
fi[5] = chi[1];
}
else
{
fi[2] = chi[1];
fi[3] = chi[0];
fi[4] = thrust::complex<double> (0.0, 0.0);
fi[5] = thrust::complex<double> (0.0, 0.0);
}
}
return;
}
__device__ void txxxxx(double pvec[3], double tmass, int nhel, int nst,
thrust::complex<double> tc[18])
{
thrust::complex<double> ft[6][4], ep[4], em[4], e0[4];
double pt, pt2, pp, pzpt, emp, sqh, sqs;
int i, j;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + tmass * tmass);
sqh = sqrt(0.5);
sqs = sqrt(0.5/3);
pt2 = p[1] * p[1] + p[2] * p[2];
pp = min(p[0], sqrt(pt2 + p[3] * p[3]));
pt = min(pp, sqrt(pt2));
ft[4][0] = thrust::complex<double> (p[0] * nst, p[3] * nst);
ft[5][0] = thrust::complex<double> (p[1] * nst, p[2] * nst);
// construct eps+
if (nhel >= 0)
{
if (pp == 0)
{
ep[0] = thrust::complex<double> (0, 0);
ep[1] = thrust::complex<double> (-sqh, 0);
ep[2] = thrust::complex<double> (0, nst * sqh);
ep[3] = thrust::complex<double> (0, 0);
}
else
{
ep[0] = thrust::complex<double> (0, 0);
ep[3] = thrust::complex<double> (pt/pp * sqh, 0);
if (pt != 0)
{
pzpt = p[3]/(pp * pt) * sqh;
ep[1] = thrust::complex<double> (-p[1] * pzpt, -nst * p[2]/pt * sqh);
ep[2] = thrust::complex<double> (-p[2] * pzpt, nst * p[1]/pt * sqh);
}
else
{
ep[1] = thrust::complex<double> (-sqh, 0);
ep[2] =
thrust::complex<double> (0, nst * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
}
// construct eps-
if (nhel <= 0)
{
if (pp == 0)
{
em[0] = thrust::complex<double> (0, 0);
em[1] = thrust::complex<double> (sqh, 0);
em[2] = thrust::complex<double> (0, nst * sqh);
em[3] = thrust::complex<double> (0, 0);
}
else
{
em[0] = thrust::complex<double> (0, 0);
em[3] = thrust::complex<double> (-pt/pp * sqh, 0);
if (pt != 0)
{
pzpt = -p[3]/(pp * pt) * sqh;
em[1] = thrust::complex<double> (-p[1] * pzpt, -nst * p[2]/pt * sqh);
em[2] = thrust::complex<double> (-p[2] * pzpt, nst * p[1]/pt * sqh);
}
else
{
em[1] = thrust::complex<double> (sqh, 0);
em[2] =
thrust::complex<double> (0, nst * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
}
// construct eps0
if (std::labs(nhel) <= 1)
{
if (pp == 0)
{
e0[0] = thrust::complex<double> (0, 0);
e0[1] = thrust::complex<double> (0, 0);
e0[2] = thrust::complex<double> (0, 0);
e0[3] = thrust::complex<double> (1, 0);
}
else
{
emp = p[0]/(tmass * pp);
e0[0] = thrust::complex<double> (pp/tmass, 0);
e0[3] = thrust::complex<double> (p[3] * emp, 0);
if (pt != 0)
{
e0[1] = thrust::complex<double> (p[1] * emp, 0);
e0[2] = thrust::complex<double> (p[2] * emp, 0);
}
else
{
e0[1] = thrust::complex<double> (0, 0);
e0[2] = thrust::complex<double> (0, 0);
}
}
}
if (nhel == 2)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = ep[i] * ep[j];
}
}
else if (nhel == -2)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = em[i] * em[j];
}
}
else if (tmass == 0)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = 0;
}
}
else if (tmass != 0)
{
if (nhel == 1)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = sqh * (ep[i] * e0[j] + e0[i] * ep[j]);
}
}
else if (nhel == 0)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] =
sqs * (ep[i] * em[j] + em[i] * ep[j] + 2.0 * e0[i] * e0[j]);
}
}
else if (nhel == -1)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = sqh * (em[i] * e0[j] + e0[i] * em[j]);
}
}
else
{
// sr fixme // std::cerr << "Invalid helicity in txxxxx.\n";
// sr fixme // std::exit(1);
}
}
tc[0] = ft[4][0];
tc[1] = ft[5][0];
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
tc[j * 4 + i + 2] = ft[j][i];
}
}
__device__ void vxxxxx(double pvec[3], double vmass, int nhel, int nsv,
thrust::complex<double> vc[6])
{
double hel, hel0, pt, pt2, pp, pzpt, emp, sqh;
int nsvahl;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + vmass * vmass);
sqh = sqrt(0.5);
hel = double(nhel);
nsvahl = nsv * std::abs(hel);
pt2 = (p[1] * p[1]) + (p[2] * p[2]);
pp = min(p[0], sqrt(pt2 + (p[3] * p[3])));
pt = min(pp, sqrt(pt2));
vc[0] = thrust::complex<double> (p[0] * nsv, p[3] * nsv);
vc[1] = thrust::complex<double> (p[1] * nsv, p[2] * nsv);
if (vmass != 0.0)
{
hel0 = 1.0 - std::abs(hel);
if (pp == 0.0)
{
vc[2] = thrust::complex<double> (0.0, 0.0);
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] = thrust::complex<double> (0.0, nsvahl * sqh);
vc[5] = thrust::complex<double> (hel0, 0.0);
}
else
{
emp = p[0]/(vmass * pp);
vc[2] = thrust::complex<double> (hel0 * pp/vmass, 0.0);
vc[5] =
thrust::complex<double> (hel0 * p[3] * emp + hel * pt/pp * sqh, 0.0);
if (pt != 0.0)
{
pzpt = p[3]/(pp * pt) * sqh * hel;
vc[3] = thrust::complex<double> (hel0 * p[1] * emp - p[1] * pzpt,
- nsvahl * p[2]/pt * sqh);
vc[4] = thrust::complex<double> (hel0 * p[2] * emp - p[2] * pzpt,
nsvahl * p[1]/pt * sqh);
}
else
{
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] = thrust::complex<double> (0.0, nsvahl * (p[3] < 0) ? - abs(sqh)
: abs(sqh));
}
}
}
else
{
pp = p[0];
pt = sqrt((p[1] * p[1]) + (p[2] * p[2]));
vc[2] = thrust::complex<double> (0.0, 0.0);
vc[5] = thrust::complex<double> (hel * pt/pp * sqh, 0.0);
if (pt != 0.0)
{
pzpt = p[3]/(pp * pt) * sqh * hel;
vc[3] = thrust::complex<double> (-p[1] * pzpt, -nsv * p[2]/pt * sqh);
vc[4] = thrust::complex<double> (-p[2] * pzpt, nsv * p[1]/pt * sqh);
}
else
{
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] =
thrust::complex<double> (0.0, nsv * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
return;
}
__device__ void sxxxxx(double pvec[3], int nss, thrust::complex<double> sc[3])
{
// double p[4] = {0, pvec[0], pvec[1], pvec[2]};
// p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3]+fmass*fmass);
double p[4] = {0, 0, 0, 0};
printf("scalar not supported so far. to do: fix mass issue");
sc[2] = thrust::complex<double> (1.00, 0.00);
sc[0] = thrust::complex<double> (p[0] * nss, p[3] * nss);
sc[1] = thrust::complex<double> (p[1] * nss, p[2] * nss);
return;
}
__device__ void oxxxxx(double pvec[3], double fmass, int nhel, int nsf,
thrust::complex<double> fo[6])
{
thrust::complex<double> chi[2];
double sf[2], sfomeg[2], omega[2], pp, pp3, sqp0p3, sqm[2];
int nh, ip, im;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + fmass * fmass);
fo[0] = thrust::complex<double> (p[0] * nsf, p[3] * nsf);
fo[1] = thrust::complex<double> (p[1] * nsf, p[2] * nsf);
nh = nhel * nsf;
if (fmass != 0.000)
{
pp = min(p[0], sqrt((p[1] * p[1]) + (p[2] * p[2]) + (p[3] * p[3])));
if (pp == 0.000)
{
sqm[0] = sqrt(std::abs(fmass));
sqm[1] = (fmass < 0) ? - abs(sqm[0]) : abs(sqm[0]);
ip = -((1 - nh)/2) * nhel;
im = (1 + nh)/2 * nhel;
fo[2] = im * sqm[std::abs(ip)];
fo[3] = ip * nsf * sqm[std::abs(ip)];
fo[4] = im * nsf * sqm[std::abs(im)];
fo[5] = ip * sqm[std::abs(im)];
}
else
{
pp = min(p[0], sqrt((p[1] * p[1]) + (p[2] * p[2]) + (p[3] * p[3])));
sf[0] = double(1 + nsf + (1 - nsf) * nh) * 0.5;
sf[1] = double(1 + nsf - (1 - nsf) * nh) * 0.5;
omega[0] = sqrt(p[0] + pp);
omega[1] = fmass/omega[0];
ip = (1 + nh)/2;
im = (1 - nh)/2;
sfomeg[0] = sf[0] * omega[ip];
sfomeg[1] = sf[1] * omega[im];
pp3 = max(pp + p[3], 0.00);
chi[0] = thrust::complex<double> (sqrt(pp3 * 0.5/pp), 0.00);
if (pp3 == 0.00)
{
chi[1] = thrust::complex<double> (-nh, 0.00);
}
else
{
chi[1] =
thrust::complex<double> (nh * p[1], -p[2])/sqrt(2.0 * pp * pp3);
}
fo[2] = sfomeg[1] * chi[im];
fo[3] = sfomeg[1] * chi[ip];
fo[4] = sfomeg[0] * chi[im];
fo[5] = sfomeg[0] * chi[ip];
}
}
else
{
if ((p[1] == 0.00) and (p[2] == 0.00) and (p[3] < 0.00))
{
sqp0p3 = 0.00;
}
else
{
sqp0p3 = sqrt(max(p[0] + p[3], 0.00)) * nsf;
}
chi[0] = thrust::complex<double> (sqp0p3, 0.00);
if (sqp0p3 == 0.000)
{
chi[1] = thrust::complex<double> (-nhel, 0.00) * sqrt(2.0 * p[0]);
}
else
{
chi[1] = thrust::complex<double> (nh * p[1], -p[2])/sqp0p3;
}
if (nh == 1)
{
fo[2] = chi[0];
fo[3] = chi[1];
fo[4] = thrust::complex<double> (0.00, 0.00);
fo[5] = thrust::complex<double> (0.00, 0.00);
}
else
{
fo[2] = thrust::complex<double> (0.00, 0.00);
fo[3] = thrust::complex<double> (0.00, 0.00);
fo[4] = chi[1];
fo[5] = chi[0];
}
}
return;
}
__device__ void FFV2_0(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP, thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP0;
TMP0 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])));
(*vertex) = COUP * - cI * TMP0;
}
__device__ void FFV2_3(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP, const
double M3, const double W3, thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double OM3;
double P3[4];
thrust::complex<double> TMP1;
thrust::complex<double> denom;
OM3 = 0.;
if (M3 != 0.)
OM3 = 1./(M3 * M3);
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
TMP1 = (F1[2] * (F2[4] * (P3[0] + P3[3]) + F2[5] * (P3[1] + cI * (P3[2]))) +
F1[3] * (F2[4] * (P3[1] - cI * (P3[2])) + F2[5] * (P3[0] - P3[3])));
denom = COUP/((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-cI) * (F1[2] * F2[4] + F1[3] * F2[5] - P3[0] * OM3 * TMP1);
V3[3] = denom * (-cI) * (-F1[2] * F2[5] - F1[3] * F2[4] - P3[1] * OM3 *
TMP1);
V3[4] = denom * (-cI) * (-cI * (F1[2] * F2[5]) + cI * (F1[3] * F2[4]) - P3[2]
* OM3 * TMP1);
V3[5] = denom * (-cI) * (-F1[2] * F2[4] - P3[3] * OM3 * TMP1 + F1[3] *
F2[5]);
}
__device__ void FFV4_0(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP, thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP0;
thrust::complex<double> TMP2;
TMP2 = (F1[4] * (F2[2] * (V3[2] - V3[5]) - F2[3] * (V3[3] + cI * (V3[4]))) +
F1[5] * (F2[2] * (-V3[3] + cI * (V3[4])) + F2[3] * (V3[2] + V3[5])));
TMP0 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])));
(*vertex) = COUP * (-1.) * (+cI * (TMP0) + 2. * cI * (TMP2));
}
__device__ void FFV4_3(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP, const
double M3, const double W3, thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double OM3;
double P3[4];
thrust::complex<double> TMP1;
thrust::complex<double> TMP3;
thrust::complex<double> denom;
OM3 = 0.;
if (M3 != 0.)
OM3 = 1./(M3 * M3);
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
TMP1 = (F1[2] * (F2[4] * (P3[0] + P3[3]) + F2[5] * (P3[1] + cI * (P3[2]))) +
F1[3] * (F2[4] * (P3[1] - cI * (P3[2])) + F2[5] * (P3[0] - P3[3])));
TMP3 = (F1[4] * (F2[2] * (P3[0] - P3[3]) - F2[3] * (P3[1] + cI * (P3[2]))) +
F1[5] * (F2[2] * (-P3[1] + cI * (P3[2])) + F2[3] * (P3[0] + P3[3])));
denom = COUP/((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-2. * cI) * (OM3 * - 1./2. * P3[0] * (TMP1 + 2. * (TMP3)) +
(+1./2. * (F1[2] * F2[4] + F1[3] * F2[5]) + F1[4] * F2[2] + F1[5] *
F2[3]));
V3[3] = denom * (-2. * cI) * (OM3 * - 1./2. * P3[1] * (TMP1 + 2. * (TMP3)) +
(-1./2. * (F1[2] * F2[5] + F1[3] * F2[4]) + F1[4] * F2[3] + F1[5] *
F2[2]));
V3[4] = denom * 2. * cI * (OM3 * 1./2. * P3[2] * (TMP1 + 2. * (TMP3)) +
(+1./2. * cI * (F1[2] * F2[5]) - 1./2. * cI * (F1[3] * F2[4]) - cI *
(F1[4] * F2[3]) + cI * (F1[5] * F2[2])));
V3[5] = denom * 2. * cI * (OM3 * 1./2. * P3[3] * (TMP1 + 2. * (TMP3)) +
(+1./2. * (F1[2] * F2[4]) - 1./2. * (F1[3] * F2[5]) - F1[4] * F2[2] +
F1[5] * F2[3]));
}
__device__ void FFV1_0(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP, thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP4;
TMP4 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
(F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])) +
(F1[4] * (F2[2] * (V3[2] - V3[5]) - F2[3] * (V3[3] + cI * (V3[4]))) +
F1[5] * (F2[2] * (-V3[3] + cI * (V3[4])) + F2[3] * (V3[2] + V3[5])))));
(*vertex) = COUP * - cI * TMP4;
}
__device__ void FFV1P0_3(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP, const
double M3, const double W3, thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P3[4];
thrust::complex<double> denom;
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
denom = COUP/((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-cI) * (F1[2] * F2[4] + F1[3] * F2[5] + F1[4] * F2[2] +
F1[5] * F2[3]);
V3[3] = denom * (-cI) * (-F1[2] * F2[5] - F1[3] * F2[4] + F1[4] * F2[3] +
F1[5] * F2[2]);
V3[4] = denom * (-cI) * (-cI * (F1[2] * F2[5] + F1[5] * F2[2]) + cI * (F1[3]
* F2[4] + F1[4] * F2[3]));
V3[5] = denom * (-cI) * (-F1[2] * F2[4] - F1[5] * F2[3] + F1[3] * F2[5] +
F1[4] * F2[2]);
}
__device__ void FFV2_4_0(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP1, const thrust::complex<double> COUP2,
thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP0;
thrust::complex<double> TMP2;
TMP2 = (F1[4] * (F2[2] * (V3[2] - V3[5]) - F2[3] * (V3[3] + cI * (V3[4]))) +
F1[5] * (F2[2] * (-V3[3] + cI * (V3[4])) + F2[3] * (V3[2] + V3[5])));
TMP0 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])));
(*vertex) = (-1.) * (COUP2 * (+cI * (TMP0) + 2. * cI * (TMP2)) + cI * (TMP0 *
COUP1));
}
__device__ void FFV2_4_3(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP1, const
thrust::complex<double> COUP2, const double M3, const double W3,
thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double OM3;
double P3[4];
thrust::complex<double> TMP1;
thrust::complex<double> TMP3;
thrust::complex<double> denom;
OM3 = 0.;
if (M3 != 0.)
OM3 = 1./(M3 * M3);
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
TMP1 = (F1[2] * (F2[4] * (P3[0] + P3[3]) + F2[5] * (P3[1] + cI * (P3[2]))) +
F1[3] * (F2[4] * (P3[1] - cI * (P3[2])) + F2[5] * (P3[0] - P3[3])));
TMP3 = (F1[4] * (F2[2] * (P3[0] - P3[3]) - F2[3] * (P3[1] + cI * (P3[2]))) +
F1[5] * (F2[2] * (-P3[1] + cI * (P3[2])) + F2[3] * (P3[0] + P3[3])));
denom = 1./((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-2. * cI) * (COUP2 * (OM3 * - 1./2. * P3[0] * (TMP1 + 2. *
(TMP3)) + (+1./2. * (F1[2] * F2[4] + F1[3] * F2[5]) + F1[4] * F2[2] +
F1[5] * F2[3])) + 1./2. * (COUP1 * (F1[2] * F2[4] + F1[3] * F2[5] - P3[0]
* OM3 * TMP1)));
V3[3] = denom * (-2. * cI) * (COUP2 * (OM3 * - 1./2. * P3[1] * (TMP1 + 2. *
(TMP3)) + (-1./2. * (F1[2] * F2[5] + F1[3] * F2[4]) + F1[4] * F2[3] +
F1[5] * F2[2])) - 1./2. * (COUP1 * (F1[2] * F2[5] + F1[3] * F2[4] + P3[1]
* OM3 * TMP1)));
V3[4] = denom * cI * (COUP2 * (OM3 * P3[2] * (TMP1 + 2. * (TMP3)) + (+cI *
(F1[2] * F2[5]) - cI * (F1[3] * F2[4]) - 2. * cI * (F1[4] * F2[3]) + 2. *
cI * (F1[5] * F2[2]))) + COUP1 * (+cI * (F1[2] * F2[5]) - cI * (F1[3] *
F2[4]) + P3[2] * OM3 * TMP1));
V3[5] = denom * 2. * cI * (COUP2 * (OM3 * 1./2. * P3[3] * (TMP1 + 2. *
(TMP3)) + (+1./2. * (F1[2] * F2[4]) - 1./2. * (F1[3] * F2[5]) - F1[4] *
F2[2] + F1[5] * F2[3])) + 1./2. * (COUP1 * (F1[2] * F2[4] + P3[3] * OM3 *
TMP1 - F1[3] * F2[5])));
}
} // end namespace $(namespace)s_sm
//==========================================================================
// This file has been automatically generated for C++ Standalone by
// MadGraph5_aMC@NLO v. 2.7.3.py3, 2020-06-28
// By the MadGraph5_aMC@NLO Development Team
// Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch
//==========================================================================
#include "CPPProcess.h"
#include "HelAmps_sm.h"
#include <algorithm>
#include <iostream>
#include <thrust/complex.h>
using namespace MG5_sm;
//==========================================================================
// Class member functions for calculating the matrix elements for
// Process: e+ e- > mu+ mu- WEIGHTED<=4 @1
__constant__ int cHel[16][4];
// __constant__ double cmME[4]; value hardcoded now
// extern __constant__ int cPerm[4];
//
__constant__ double cIPC[6]; // coupling ?
__constant__ double cIPD[2];
// Evaluate |M|^2 for each subprocess
__device__ void calculate_wavefunctions(int ihel, double local_mom[4][3],
double &matrix)
{
thrust::complex<double> amp[2];
// Calculate wavefunctions for all processes
thrust::complex<double> w[5][6];
oxxxxx(local_mom[0], 0., cHel[ihel][0], -1, w[0]);
ixxxxx(local_mom[1], 0., cHel[ihel][1], +1, w[1]);
ixxxxx(local_mom[2], 0., cHel[ihel][2], -1, w[2]);
oxxxxx(local_mom[3], 0., cHel[ihel][3], +1, w[3]);
FFV1P0_3(w[1], w[0], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[4]);
// Amplitude(s) for diagram number 1
FFV1_0(w[2], w[3], w[4], thrust::complex<double> (cIPC[0], cIPC[1]),
&[0]);
FFV2_4_3(w[1], w[0], thrust::complex<double> (cIPC[2], cIPC[3]),
thrust::complex<double> (cIPC[4], cIPC[5]), cIPD[0], cIPD[1], w[4]);
// Amplitude(s) for diagram number 2
FFV2_4_0(w[2], w[3], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
thrust::complex<double> (cIPC[4], cIPC[5]), &[1]);
// double CPPProcess::matrix_1_epem_mupmum() {
int i, j;
// Local variables
// const int ngraphs = 2;
const int ncolor = 1;
thrust::complex<double> ztemp;
thrust::complex<double> jamp[ncolor];
// The color matrix;
static const double denom[ncolor] = {1};
static const double cf[ncolor][ncolor] = {{1}};
// Calculate color flows
jamp[0] = -amp[0] - amp[1];
// Sum and square the color flows to get the matrix element
for(i = 0; i < ncolor; i++ )
{
ztemp = 0.;
for(j = 0; j < ncolor; j++ )
ztemp = ztemp + cf[i][j] * jamp[j];
matrix = matrix + (ztemp * conj(jamp[i])).real()/denom[i];
}
// Store the leading color flows for choice of color
// for(i=0;i < ncolor; i++)
// jamp2[0][i] += real(jamp[i]*conj(jamp[i]));
}
CPPProcess::CPPProcess(int numiterations, int gpublocks, int gputhreads,
bool verbose, bool debug)
: m_numiterations(numiterations), gpu_nblocks(gpublocks),
gpu_nthreads(gputhreads), dim(gpu_nblocks * gpu_nthreads)
{
// Helicities for the process - nodim
static const int tHel[ncomb][nexternal] = {{-1, -1, -1, -1}, {-1, -1, -1, 1},
{-1, -1, 1, -1}, {-1, -1, 1, 1}, {-1, 1, -1, -1}, {-1, 1, -1, 1}, {-1, 1,
1, -1}, {-1, 1, 1, 1}, {1, -1, -1, -1}, {1, -1, -1, 1}, {1, -1, 1, -1},
{1, -1, 1, 1}, {1, 1, -1, -1}, {1, 1, -1, 1}, {1, 1, 1, -1}, {1, 1, 1,
1}};
hipMemcpyToSymbol(cHel, tHel, ncomb * nexternal * sizeof(int));
// perm - nodim
// static int perm[nexternal] = {0, 1, 2, 3};
}
CPPProcess::~CPPProcess() {}
const std::vector<double> &CPPProcess::getMasses() const {return mME;}
//--------------------------------------------------------------------------
// Initialize process.
void CPPProcess::initProc(string param_card_name)
{
// Instantiate the model class and set parameters that stay fixed during run
pars = Parameters_sm::getInstance();
SLHAReader slha(param_card_name);
pars->setIndependentParameters(slha);
pars->setIndependentCouplings();
pars->printIndependentParameters();
pars->printIndependentCouplings();
pars->setDependentParameters();
pars->setDependentCouplings();
// Set external particle masses for this matrix element
mME.push_back(pars->ZERO);
mME.push_back(pars->ZERO);
mME.push_back(pars->ZERO);
mME.push_back(pars->ZERO);
static thrust::complex<double> tIPC[3] = {pars->GC_3, pars->GC_50,
pars->GC_59};
static double tIPD[2] = {pars->mdl_MZ, pars->mdl_WZ};
hipMemcpyToSymbol(cIPC, tIPC, 3 * sizeof(thrust::complex<double> ));
hipMemcpyToSymbol(cIPD, tIPD, 2 * sizeof(double));
}
//--------------------------------------------------------------------------
// Evaluate |M|^2, part independent of incoming flavour.
__global__ void sigmaKin(double * allmomenta, double * output)
{
// Set the parameters which change event by event
// Need to discuss this with Stefan
// pars->setDependentParameters();
// pars->setDependentCouplings();
// Reset color flows
// for (int xx = 0; xx < 384; ++xx) {
const int nprocesses = 1;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// char *devPtr = (char *)tp.ptr;
// size_t dpt = tp.pitch;
// size_t slicePitch = dpt * 4;
// char *dps = devPtr + dim * slicePitch;
double matrix_element[nprocesses];
thrust::complex<double> amp[2];
// __shared__ double local_m[4][3];
__shared__ double local_m[4][3];
int DIM = blockDim.x * gridDim.x;
// for (int i=0; i<20;i++){
// printf(" %f ", allmomenta[i]);
// }
// printf("\n");
// printf("DIM is %i/%i\n", tid, DIM);
const int ncomb = 16;
// int mid = tid % 12;
// if (tid <12){
// int i = tid/4;
// int j = (tid/4)%3;
// local_m[i][j] = allmomenta[i * 3 * (DIM/ncomb) + j * (DIM/ncomb) + (tid/ncomb)];
// }
for (int i = 0; i < 4; i++ )
{
for (int j = 0; j < 3; j++ )
{
local_m[i][j] = allmomenta[i * 3 * (DIM/ncomb) + j * (DIM/ncomb) + (tid/ncomb)];
//if (tid == 0){
// printf(" %f ", local_m[i][j]);
//}
}
// if (tid == 0){
// printf("\n");}
}
__syncthreads();
// Local variables and constants
// const int ncomb = 16;
// static bool goodhel[ncomb] = {ncomb * false};
// static int ntry = 0, sum_hel = 0, ngood = 0;
// static int igood[ncomb];
// static int jhel;
// std::complex<double> **wfs;
// double t[1];
// Helicities for the process
// static const int helicities[ncomb][nexternal] =
// {{-1,-1,-1,-1},{-1,-1,-1,1},{-1,-1,1,-1},{-1,-1,1,1},{-1,1,-1,-1},{-1,1,-1,
// 1},{-1,1,1,-1},{-1,1,1,1},{1,-1,-1,-1},{1,-1,-1,1},{1,-1,1,-1},{1,-1,1,1},{
// 1,1,-1,-1},{1,1,-1,1},{1,1,1,-1},{1,1,1,1}};
// Denominators: spins, colors and identical particles
const int denominators[1] = {4};
// Reset the matrix elements
for(int i = 0; i < nprocesses; i++ )
{
matrix_element[i] = 0.;
}
// Define permutation
// int perm[nexternal];
// for(int i = 0; i < nexternal; i++){
// perm[i]=i;
// }
//for (int ihel = 0; ihel < ncomb; ihel++ )
//{
calculate_wavefunctions(tid % ncomb, local_m, matrix_element[0]);
//}
for (int i = 0; i < nprocesses; ++ i)
{
matrix_element[i] /= denominators[i];
}
for (int i = 0; i < nprocesses; ++ i)
{
output[i * nprocesses + tid] = matrix_element[i];
// printf("output %i %i %i %f", tid, i, i*nprocesses+tid, output[i*nprocesses+tid]);
}
}
//==========================================================================
// Private class member functions
//--------------------------------------------------------------------------
| baf5b0071ac90cf0fd3ddfbc46833c982f2214a9.cu | //==========================================================================
// This file has been automatically generated for C++ Standalone by
// MadGraph5_aMC@NLO v. 2.7.3.py3, 2020-06-28
// By the MadGraph5_aMC@NLO Development Team
// Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch
//==========================================================================
#include "HelAmps_sm.h"
#include <complex>
#include <cmath>
#include <iostream>
#include <cstdlib>
#include <thrust/complex.h>
using namespace std;
namespace MG5_sm
{
__device__ void ixxxxx(double pvec[3], double fmass, int nhel, int nsf,
thrust::complex<double> fi[6])
{
thrust::complex<double> chi[2];
double sf[2], sfomega[2], omega[2], pp, pp3, sqp0p3, sqm[2];
int ip, im, nh;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + fmass * fmass);
fi[0] = thrust::complex<double> (-p[0] * nsf, -p[3] * nsf);
fi[1] = thrust::complex<double> (-p[1] * nsf, -p[2] * nsf);
nh = nhel * nsf;
if (fmass != 0.0)
{
pp = min(p[0], sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3]));
if (pp == 0.0)
{
sqm[0] = sqrt(std::abs(fmass));
sqm[1] = (fmass < 0) ? - abs(sqm[0]) : abs(sqm[0]);
ip = (1 + nh)/2;
im = (1 - nh)/2;
fi[2] = ip * sqm[ip];
fi[3] = im * nsf * sqm[ip];
fi[4] = ip * nsf * sqm[im];
fi[5] = im * sqm[im];
}
else
{
sf[0] = (1 + nsf + (1 - nsf) * nh) * 0.5;
sf[1] = (1 + nsf - (1 - nsf) * nh) * 0.5;
omega[0] = sqrt(p[0] + pp);
omega[1] = fmass/omega[0];
ip = (1 + nh)/2;
im = (1 - nh)/2;
sfomega[0] = sf[0] * omega[ip];
sfomega[1] = sf[1] * omega[im];
pp3 = max(pp + p[3], 0.0);
chi[0] = thrust::complex<double> (sqrt(pp3 * 0.5/pp), 0);
if (pp3 == 0.0)
{
chi[1] = thrust::complex<double> (-nh, 0);
}
else
{
chi[1] =
thrust::complex<double> (nh * p[1], p[2])/sqrt(2.0 * pp * pp3);
}
fi[2] = sfomega[0] * chi[im];
fi[3] = sfomega[0] * chi[ip];
fi[4] = sfomega[1] * chi[im];
fi[5] = sfomega[1] * chi[ip];
}
}
else
{
if (p[1] == 0.0 and p[2] == 0.0 and p[3] < 0.0)
{
sqp0p3 = 0.0;
}
else
{
sqp0p3 = sqrt(max(p[0] + p[3], 0.0)) * nsf;
}
chi[0] = thrust::complex<double> (sqp0p3, 0.0);
if (sqp0p3 == 0.0)
{
chi[1] = thrust::complex<double> (-nhel * sqrt(2.0 * p[0]), 0.0);
}
else
{
chi[1] = thrust::complex<double> (nh * p[1], p[2])/sqp0p3;
}
if (nh == 1)
{
fi[2] = thrust::complex<double> (0.0, 0.0);
fi[3] = thrust::complex<double> (0.0, 0.0);
fi[4] = chi[0];
fi[5] = chi[1];
}
else
{
fi[2] = chi[1];
fi[3] = chi[0];
fi[4] = thrust::complex<double> (0.0, 0.0);
fi[5] = thrust::complex<double> (0.0, 0.0);
}
}
return;
}
__device__ void txxxxx(double pvec[3], double tmass, int nhel, int nst,
thrust::complex<double> tc[18])
{
thrust::complex<double> ft[6][4], ep[4], em[4], e0[4];
double pt, pt2, pp, pzpt, emp, sqh, sqs;
int i, j;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + tmass * tmass);
sqh = sqrt(0.5);
sqs = sqrt(0.5/3);
pt2 = p[1] * p[1] + p[2] * p[2];
pp = min(p[0], sqrt(pt2 + p[3] * p[3]));
pt = min(pp, sqrt(pt2));
ft[4][0] = thrust::complex<double> (p[0] * nst, p[3] * nst);
ft[5][0] = thrust::complex<double> (p[1] * nst, p[2] * nst);
// construct eps+
if (nhel >= 0)
{
if (pp == 0)
{
ep[0] = thrust::complex<double> (0, 0);
ep[1] = thrust::complex<double> (-sqh, 0);
ep[2] = thrust::complex<double> (0, nst * sqh);
ep[3] = thrust::complex<double> (0, 0);
}
else
{
ep[0] = thrust::complex<double> (0, 0);
ep[3] = thrust::complex<double> (pt/pp * sqh, 0);
if (pt != 0)
{
pzpt = p[3]/(pp * pt) * sqh;
ep[1] = thrust::complex<double> (-p[1] * pzpt, -nst * p[2]/pt * sqh);
ep[2] = thrust::complex<double> (-p[2] * pzpt, nst * p[1]/pt * sqh);
}
else
{
ep[1] = thrust::complex<double> (-sqh, 0);
ep[2] =
thrust::complex<double> (0, nst * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
}
// construct eps-
if (nhel <= 0)
{
if (pp == 0)
{
em[0] = thrust::complex<double> (0, 0);
em[1] = thrust::complex<double> (sqh, 0);
em[2] = thrust::complex<double> (0, nst * sqh);
em[3] = thrust::complex<double> (0, 0);
}
else
{
em[0] = thrust::complex<double> (0, 0);
em[3] = thrust::complex<double> (-pt/pp * sqh, 0);
if (pt != 0)
{
pzpt = -p[3]/(pp * pt) * sqh;
em[1] = thrust::complex<double> (-p[1] * pzpt, -nst * p[2]/pt * sqh);
em[2] = thrust::complex<double> (-p[2] * pzpt, nst * p[1]/pt * sqh);
}
else
{
em[1] = thrust::complex<double> (sqh, 0);
em[2] =
thrust::complex<double> (0, nst * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
}
// construct eps0
if (std::labs(nhel) <= 1)
{
if (pp == 0)
{
e0[0] = thrust::complex<double> (0, 0);
e0[1] = thrust::complex<double> (0, 0);
e0[2] = thrust::complex<double> (0, 0);
e0[3] = thrust::complex<double> (1, 0);
}
else
{
emp = p[0]/(tmass * pp);
e0[0] = thrust::complex<double> (pp/tmass, 0);
e0[3] = thrust::complex<double> (p[3] * emp, 0);
if (pt != 0)
{
e0[1] = thrust::complex<double> (p[1] * emp, 0);
e0[2] = thrust::complex<double> (p[2] * emp, 0);
}
else
{
e0[1] = thrust::complex<double> (0, 0);
e0[2] = thrust::complex<double> (0, 0);
}
}
}
if (nhel == 2)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = ep[i] * ep[j];
}
}
else if (nhel == -2)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = em[i] * em[j];
}
}
else if (tmass == 0)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = 0;
}
}
else if (tmass != 0)
{
if (nhel == 1)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = sqh * (ep[i] * e0[j] + e0[i] * ep[j]);
}
}
else if (nhel == 0)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] =
sqs * (ep[i] * em[j] + em[i] * ep[j] + 2.0 * e0[i] * e0[j]);
}
}
else if (nhel == -1)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = sqh * (em[i] * e0[j] + e0[i] * em[j]);
}
}
else
{
// sr fixme // std::cerr << "Invalid helicity in txxxxx.\n";
// sr fixme // std::exit(1);
}
}
tc[0] = ft[4][0];
tc[1] = ft[5][0];
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
tc[j * 4 + i + 2] = ft[j][i];
}
}
__device__ void vxxxxx(double pvec[3], double vmass, int nhel, int nsv,
thrust::complex<double> vc[6])
{
double hel, hel0, pt, pt2, pp, pzpt, emp, sqh;
int nsvahl;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + vmass * vmass);
sqh = sqrt(0.5);
hel = double(nhel);
nsvahl = nsv * std::abs(hel);
pt2 = (p[1] * p[1]) + (p[2] * p[2]);
pp = min(p[0], sqrt(pt2 + (p[3] * p[3])));
pt = min(pp, sqrt(pt2));
vc[0] = thrust::complex<double> (p[0] * nsv, p[3] * nsv);
vc[1] = thrust::complex<double> (p[1] * nsv, p[2] * nsv);
if (vmass != 0.0)
{
hel0 = 1.0 - std::abs(hel);
if (pp == 0.0)
{
vc[2] = thrust::complex<double> (0.0, 0.0);
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] = thrust::complex<double> (0.0, nsvahl * sqh);
vc[5] = thrust::complex<double> (hel0, 0.0);
}
else
{
emp = p[0]/(vmass * pp);
vc[2] = thrust::complex<double> (hel0 * pp/vmass, 0.0);
vc[5] =
thrust::complex<double> (hel0 * p[3] * emp + hel * pt/pp * sqh, 0.0);
if (pt != 0.0)
{
pzpt = p[3]/(pp * pt) * sqh * hel;
vc[3] = thrust::complex<double> (hel0 * p[1] * emp - p[1] * pzpt,
- nsvahl * p[2]/pt * sqh);
vc[4] = thrust::complex<double> (hel0 * p[2] * emp - p[2] * pzpt,
nsvahl * p[1]/pt * sqh);
}
else
{
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] = thrust::complex<double> (0.0, nsvahl * (p[3] < 0) ? - abs(sqh)
: abs(sqh));
}
}
}
else
{
pp = p[0];
pt = sqrt((p[1] * p[1]) + (p[2] * p[2]));
vc[2] = thrust::complex<double> (0.0, 0.0);
vc[5] = thrust::complex<double> (hel * pt/pp * sqh, 0.0);
if (pt != 0.0)
{
pzpt = p[3]/(pp * pt) * sqh * hel;
vc[3] = thrust::complex<double> (-p[1] * pzpt, -nsv * p[2]/pt * sqh);
vc[4] = thrust::complex<double> (-p[2] * pzpt, nsv * p[1]/pt * sqh);
}
else
{
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] =
thrust::complex<double> (0.0, nsv * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
return;
}
__device__ void sxxxxx(double pvec[3], int nss, thrust::complex<double> sc[3])
{
// double p[4] = {0, pvec[0], pvec[1], pvec[2]};
// p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3]+fmass*fmass);
double p[4] = {0, 0, 0, 0};
printf("scalar not supported so far. to do: fix mass issue");
sc[2] = thrust::complex<double> (1.00, 0.00);
sc[0] = thrust::complex<double> (p[0] * nss, p[3] * nss);
sc[1] = thrust::complex<double> (p[1] * nss, p[2] * nss);
return;
}
__device__ void oxxxxx(double pvec[3], double fmass, int nhel, int nsf,
thrust::complex<double> fo[6])
{
thrust::complex<double> chi[2];
double sf[2], sfomeg[2], omega[2], pp, pp3, sqp0p3, sqm[2];
int nh, ip, im;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + fmass * fmass);
fo[0] = thrust::complex<double> (p[0] * nsf, p[3] * nsf);
fo[1] = thrust::complex<double> (p[1] * nsf, p[2] * nsf);
nh = nhel * nsf;
if (fmass != 0.000)
{
pp = min(p[0], sqrt((p[1] * p[1]) + (p[2] * p[2]) + (p[3] * p[3])));
if (pp == 0.000)
{
sqm[0] = sqrt(std::abs(fmass));
sqm[1] = (fmass < 0) ? - abs(sqm[0]) : abs(sqm[0]);
ip = -((1 - nh)/2) * nhel;
im = (1 + nh)/2 * nhel;
fo[2] = im * sqm[std::abs(ip)];
fo[3] = ip * nsf * sqm[std::abs(ip)];
fo[4] = im * nsf * sqm[std::abs(im)];
fo[5] = ip * sqm[std::abs(im)];
}
else
{
pp = min(p[0], sqrt((p[1] * p[1]) + (p[2] * p[2]) + (p[3] * p[3])));
sf[0] = double(1 + nsf + (1 - nsf) * nh) * 0.5;
sf[1] = double(1 + nsf - (1 - nsf) * nh) * 0.5;
omega[0] = sqrt(p[0] + pp);
omega[1] = fmass/omega[0];
ip = (1 + nh)/2;
im = (1 - nh)/2;
sfomeg[0] = sf[0] * omega[ip];
sfomeg[1] = sf[1] * omega[im];
pp3 = max(pp + p[3], 0.00);
chi[0] = thrust::complex<double> (sqrt(pp3 * 0.5/pp), 0.00);
if (pp3 == 0.00)
{
chi[1] = thrust::complex<double> (-nh, 0.00);
}
else
{
chi[1] =
thrust::complex<double> (nh * p[1], -p[2])/sqrt(2.0 * pp * pp3);
}
fo[2] = sfomeg[1] * chi[im];
fo[3] = sfomeg[1] * chi[ip];
fo[4] = sfomeg[0] * chi[im];
fo[5] = sfomeg[0] * chi[ip];
}
}
else
{
if ((p[1] == 0.00) and (p[2] == 0.00) and (p[3] < 0.00))
{
sqp0p3 = 0.00;
}
else
{
sqp0p3 = sqrt(max(p[0] + p[3], 0.00)) * nsf;
}
chi[0] = thrust::complex<double> (sqp0p3, 0.00);
if (sqp0p3 == 0.000)
{
chi[1] = thrust::complex<double> (-nhel, 0.00) * sqrt(2.0 * p[0]);
}
else
{
chi[1] = thrust::complex<double> (nh * p[1], -p[2])/sqp0p3;
}
if (nh == 1)
{
fo[2] = chi[0];
fo[3] = chi[1];
fo[4] = thrust::complex<double> (0.00, 0.00);
fo[5] = thrust::complex<double> (0.00, 0.00);
}
else
{
fo[2] = thrust::complex<double> (0.00, 0.00);
fo[3] = thrust::complex<double> (0.00, 0.00);
fo[4] = chi[1];
fo[5] = chi[0];
}
}
return;
}
__device__ void FFV2_0(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP, thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP0;
TMP0 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])));
(*vertex) = COUP * - cI * TMP0;
}
__device__ void FFV2_3(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP, const
double M3, const double W3, thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double OM3;
double P3[4];
thrust::complex<double> TMP1;
thrust::complex<double> denom;
OM3 = 0.;
if (M3 != 0.)
OM3 = 1./(M3 * M3);
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
TMP1 = (F1[2] * (F2[4] * (P3[0] + P3[3]) + F2[5] * (P3[1] + cI * (P3[2]))) +
F1[3] * (F2[4] * (P3[1] - cI * (P3[2])) + F2[5] * (P3[0] - P3[3])));
denom = COUP/((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-cI) * (F1[2] * F2[4] + F1[3] * F2[5] - P3[0] * OM3 * TMP1);
V3[3] = denom * (-cI) * (-F1[2] * F2[5] - F1[3] * F2[4] - P3[1] * OM3 *
TMP1);
V3[4] = denom * (-cI) * (-cI * (F1[2] * F2[5]) + cI * (F1[3] * F2[4]) - P3[2]
* OM3 * TMP1);
V3[5] = denom * (-cI) * (-F1[2] * F2[4] - P3[3] * OM3 * TMP1 + F1[3] *
F2[5]);
}
__device__ void FFV4_0(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP, thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP0;
thrust::complex<double> TMP2;
TMP2 = (F1[4] * (F2[2] * (V3[2] - V3[5]) - F2[3] * (V3[3] + cI * (V3[4]))) +
F1[5] * (F2[2] * (-V3[3] + cI * (V3[4])) + F2[3] * (V3[2] + V3[5])));
TMP0 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])));
(*vertex) = COUP * (-1.) * (+cI * (TMP0) + 2. * cI * (TMP2));
}
__device__ void FFV4_3(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP, const
double M3, const double W3, thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double OM3;
double P3[4];
thrust::complex<double> TMP1;
thrust::complex<double> TMP3;
thrust::complex<double> denom;
OM3 = 0.;
if (M3 != 0.)
OM3 = 1./(M3 * M3);
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
TMP1 = (F1[2] * (F2[4] * (P3[0] + P3[3]) + F2[5] * (P3[1] + cI * (P3[2]))) +
F1[3] * (F2[4] * (P3[1] - cI * (P3[2])) + F2[5] * (P3[0] - P3[3])));
TMP3 = (F1[4] * (F2[2] * (P3[0] - P3[3]) - F2[3] * (P3[1] + cI * (P3[2]))) +
F1[5] * (F2[2] * (-P3[1] + cI * (P3[2])) + F2[3] * (P3[0] + P3[3])));
denom = COUP/((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-2. * cI) * (OM3 * - 1./2. * P3[0] * (TMP1 + 2. * (TMP3)) +
(+1./2. * (F1[2] * F2[4] + F1[3] * F2[5]) + F1[4] * F2[2] + F1[5] *
F2[3]));
V3[3] = denom * (-2. * cI) * (OM3 * - 1./2. * P3[1] * (TMP1 + 2. * (TMP3)) +
(-1./2. * (F1[2] * F2[5] + F1[3] * F2[4]) + F1[4] * F2[3] + F1[5] *
F2[2]));
V3[4] = denom * 2. * cI * (OM3 * 1./2. * P3[2] * (TMP1 + 2. * (TMP3)) +
(+1./2. * cI * (F1[2] * F2[5]) - 1./2. * cI * (F1[3] * F2[4]) - cI *
(F1[4] * F2[3]) + cI * (F1[5] * F2[2])));
V3[5] = denom * 2. * cI * (OM3 * 1./2. * P3[3] * (TMP1 + 2. * (TMP3)) +
(+1./2. * (F1[2] * F2[4]) - 1./2. * (F1[3] * F2[5]) - F1[4] * F2[2] +
F1[5] * F2[3]));
}
__device__ void FFV1_0(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP, thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP4;
TMP4 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
(F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])) +
(F1[4] * (F2[2] * (V3[2] - V3[5]) - F2[3] * (V3[3] + cI * (V3[4]))) +
F1[5] * (F2[2] * (-V3[3] + cI * (V3[4])) + F2[3] * (V3[2] + V3[5])))));
(*vertex) = COUP * - cI * TMP4;
}
__device__ void FFV1P0_3(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP, const
double M3, const double W3, thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P3[4];
thrust::complex<double> denom;
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
denom = COUP/((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-cI) * (F1[2] * F2[4] + F1[3] * F2[5] + F1[4] * F2[2] +
F1[5] * F2[3]);
V3[3] = denom * (-cI) * (-F1[2] * F2[5] - F1[3] * F2[4] + F1[4] * F2[3] +
F1[5] * F2[2]);
V3[4] = denom * (-cI) * (-cI * (F1[2] * F2[5] + F1[5] * F2[2]) + cI * (F1[3]
* F2[4] + F1[4] * F2[3]));
V3[5] = denom * (-cI) * (-F1[2] * F2[4] - F1[5] * F2[3] + F1[3] * F2[5] +
F1[4] * F2[2]);
}
__device__ void FFV2_4_0(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP1, const thrust::complex<double> COUP2,
thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP0;
thrust::complex<double> TMP2;
TMP2 = (F1[4] * (F2[2] * (V3[2] - V3[5]) - F2[3] * (V3[3] + cI * (V3[4]))) +
F1[5] * (F2[2] * (-V3[3] + cI * (V3[4])) + F2[3] * (V3[2] + V3[5])));
TMP0 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])));
(*vertex) = (-1.) * (COUP2 * (+cI * (TMP0) + 2. * cI * (TMP2)) + cI * (TMP0 *
COUP1));
}
__device__ void FFV2_4_3(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP1, const
thrust::complex<double> COUP2, const double M3, const double W3,
thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double OM3;
double P3[4];
thrust::complex<double> TMP1;
thrust::complex<double> TMP3;
thrust::complex<double> denom;
OM3 = 0.;
if (M3 != 0.)
OM3 = 1./(M3 * M3);
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
TMP1 = (F1[2] * (F2[4] * (P3[0] + P3[3]) + F2[5] * (P3[1] + cI * (P3[2]))) +
F1[3] * (F2[4] * (P3[1] - cI * (P3[2])) + F2[5] * (P3[0] - P3[3])));
TMP3 = (F1[4] * (F2[2] * (P3[0] - P3[3]) - F2[3] * (P3[1] + cI * (P3[2]))) +
F1[5] * (F2[2] * (-P3[1] + cI * (P3[2])) + F2[3] * (P3[0] + P3[3])));
denom = 1./((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-2. * cI) * (COUP2 * (OM3 * - 1./2. * P3[0] * (TMP1 + 2. *
(TMP3)) + (+1./2. * (F1[2] * F2[4] + F1[3] * F2[5]) + F1[4] * F2[2] +
F1[5] * F2[3])) + 1./2. * (COUP1 * (F1[2] * F2[4] + F1[3] * F2[5] - P3[0]
* OM3 * TMP1)));
V3[3] = denom * (-2. * cI) * (COUP2 * (OM3 * - 1./2. * P3[1] * (TMP1 + 2. *
(TMP3)) + (-1./2. * (F1[2] * F2[5] + F1[3] * F2[4]) + F1[4] * F2[3] +
F1[5] * F2[2])) - 1./2. * (COUP1 * (F1[2] * F2[5] + F1[3] * F2[4] + P3[1]
* OM3 * TMP1)));
V3[4] = denom * cI * (COUP2 * (OM3 * P3[2] * (TMP1 + 2. * (TMP3)) + (+cI *
(F1[2] * F2[5]) - cI * (F1[3] * F2[4]) - 2. * cI * (F1[4] * F2[3]) + 2. *
cI * (F1[5] * F2[2]))) + COUP1 * (+cI * (F1[2] * F2[5]) - cI * (F1[3] *
F2[4]) + P3[2] * OM3 * TMP1));
V3[5] = denom * 2. * cI * (COUP2 * (OM3 * 1./2. * P3[3] * (TMP1 + 2. *
(TMP3)) + (+1./2. * (F1[2] * F2[4]) - 1./2. * (F1[3] * F2[5]) - F1[4] *
F2[2] + F1[5] * F2[3])) + 1./2. * (COUP1 * (F1[2] * F2[4] + P3[3] * OM3 *
TMP1 - F1[3] * F2[5])));
}
} // end namespace $(namespace)s_sm
//==========================================================================
// This file has been automatically generated for C++ Standalone by
// MadGraph5_aMC@NLO v. 2.7.3.py3, 2020-06-28
// By the MadGraph5_aMC@NLO Development Team
// Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch
//==========================================================================
#include "CPPProcess.h"
#include "HelAmps_sm.h"
#include <algorithm>
#include <iostream>
#include <thrust/complex.h>
using namespace MG5_sm;
//==========================================================================
// Class member functions for calculating the matrix elements for
// Process: e+ e- > mu+ mu- WEIGHTED<=4 @1
__constant__ int cHel[16][4];
// __constant__ double cmME[4]; value hardcoded now
// extern __constant__ int cPerm[4];
//
__constant__ double cIPC[6]; // coupling ?
__constant__ double cIPD[2];
// Evaluate |M|^2 for each subprocess
__device__ void calculate_wavefunctions(int ihel, double local_mom[4][3],
double &matrix)
{
thrust::complex<double> amp[2];
// Calculate wavefunctions for all processes
thrust::complex<double> w[5][6];
oxxxxx(local_mom[0], 0., cHel[ihel][0], -1, w[0]);
ixxxxx(local_mom[1], 0., cHel[ihel][1], +1, w[1]);
ixxxxx(local_mom[2], 0., cHel[ihel][2], -1, w[2]);
oxxxxx(local_mom[3], 0., cHel[ihel][3], +1, w[3]);
FFV1P0_3(w[1], w[0], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[4]);
// Amplitude(s) for diagram number 1
FFV1_0(w[2], w[3], w[4], thrust::complex<double> (cIPC[0], cIPC[1]),
&[0]);
FFV2_4_3(w[1], w[0], thrust::complex<double> (cIPC[2], cIPC[3]),
thrust::complex<double> (cIPC[4], cIPC[5]), cIPD[0], cIPD[1], w[4]);
// Amplitude(s) for diagram number 2
FFV2_4_0(w[2], w[3], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
thrust::complex<double> (cIPC[4], cIPC[5]), &[1]);
// double CPPProcess::matrix_1_epem_mupmum() {
int i, j;
// Local variables
// const int ngraphs = 2;
const int ncolor = 1;
thrust::complex<double> ztemp;
thrust::complex<double> jamp[ncolor];
// The color matrix;
static const double denom[ncolor] = {1};
static const double cf[ncolor][ncolor] = {{1}};
// Calculate color flows
jamp[0] = -amp[0] - amp[1];
// Sum and square the color flows to get the matrix element
for(i = 0; i < ncolor; i++ )
{
ztemp = 0.;
for(j = 0; j < ncolor; j++ )
ztemp = ztemp + cf[i][j] * jamp[j];
matrix = matrix + (ztemp * conj(jamp[i])).real()/denom[i];
}
// Store the leading color flows for choice of color
// for(i=0;i < ncolor; i++)
// jamp2[0][i] += real(jamp[i]*conj(jamp[i]));
}
CPPProcess::CPPProcess(int numiterations, int gpublocks, int gputhreads,
bool verbose, bool debug)
: m_numiterations(numiterations), gpu_nblocks(gpublocks),
gpu_nthreads(gputhreads), dim(gpu_nblocks * gpu_nthreads)
{
// Helicities for the process - nodim
static const int tHel[ncomb][nexternal] = {{-1, -1, -1, -1}, {-1, -1, -1, 1},
{-1, -1, 1, -1}, {-1, -1, 1, 1}, {-1, 1, -1, -1}, {-1, 1, -1, 1}, {-1, 1,
1, -1}, {-1, 1, 1, 1}, {1, -1, -1, -1}, {1, -1, -1, 1}, {1, -1, 1, -1},
{1, -1, 1, 1}, {1, 1, -1, -1}, {1, 1, -1, 1}, {1, 1, 1, -1}, {1, 1, 1,
1}};
cudaMemcpyToSymbol(cHel, tHel, ncomb * nexternal * sizeof(int));
// perm - nodim
// static int perm[nexternal] = {0, 1, 2, 3};
}
CPPProcess::~CPPProcess() {}
const std::vector<double> &CPPProcess::getMasses() const {return mME;}
//--------------------------------------------------------------------------
// Initialize process.
void CPPProcess::initProc(string param_card_name)
{
// Instantiate the model class and set parameters that stay fixed during run
pars = Parameters_sm::getInstance();
SLHAReader slha(param_card_name);
pars->setIndependentParameters(slha);
pars->setIndependentCouplings();
pars->printIndependentParameters();
pars->printIndependentCouplings();
pars->setDependentParameters();
pars->setDependentCouplings();
// Set external particle masses for this matrix element
mME.push_back(pars->ZERO);
mME.push_back(pars->ZERO);
mME.push_back(pars->ZERO);
mME.push_back(pars->ZERO);
static thrust::complex<double> tIPC[3] = {pars->GC_3, pars->GC_50,
pars->GC_59};
static double tIPD[2] = {pars->mdl_MZ, pars->mdl_WZ};
cudaMemcpyToSymbol(cIPC, tIPC, 3 * sizeof(thrust::complex<double> ));
cudaMemcpyToSymbol(cIPD, tIPD, 2 * sizeof(double));
}
//--------------------------------------------------------------------------
// Evaluate |M|^2, part independent of incoming flavour.
__global__ void sigmaKin(double * allmomenta, double * output)
{
// Set the parameters which change event by event
// Need to discuss this with Stefan
// pars->setDependentParameters();
// pars->setDependentCouplings();
// Reset color flows
// for (int xx = 0; xx < 384; ++xx) {
const int nprocesses = 1;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// char *devPtr = (char *)tp.ptr;
// size_t dpt = tp.pitch;
// size_t slicePitch = dpt * 4;
// char *dps = devPtr + dim * slicePitch;
double matrix_element[nprocesses];
thrust::complex<double> amp[2];
// __shared__ double local_m[4][3];
__shared__ double local_m[4][3];
int DIM = blockDim.x * gridDim.x;
// for (int i=0; i<20;i++){
// printf(" %f ", allmomenta[i]);
// }
// printf("\n");
// printf("DIM is %i/%i\n", tid, DIM);
const int ncomb = 16;
// int mid = tid % 12;
// if (tid <12){
// int i = tid/4;
// int j = (tid/4)%3;
// local_m[i][j] = allmomenta[i * 3 * (DIM/ncomb) + j * (DIM/ncomb) + (tid/ncomb)];
// }
for (int i = 0; i < 4; i++ )
{
for (int j = 0; j < 3; j++ )
{
local_m[i][j] = allmomenta[i * 3 * (DIM/ncomb) + j * (DIM/ncomb) + (tid/ncomb)];
//if (tid == 0){
// printf(" %f ", local_m[i][j]);
//}
}
// if (tid == 0){
// printf("\n");}
}
__syncthreads();
// Local variables and constants
// const int ncomb = 16;
// static bool goodhel[ncomb] = {ncomb * false};
// static int ntry = 0, sum_hel = 0, ngood = 0;
// static int igood[ncomb];
// static int jhel;
// std::complex<double> **wfs;
// double t[1];
// Helicities for the process
// static const int helicities[ncomb][nexternal] =
// {{-1,-1,-1,-1},{-1,-1,-1,1},{-1,-1,1,-1},{-1,-1,1,1},{-1,1,-1,-1},{-1,1,-1,
// 1},{-1,1,1,-1},{-1,1,1,1},{1,-1,-1,-1},{1,-1,-1,1},{1,-1,1,-1},{1,-1,1,1},{
// 1,1,-1,-1},{1,1,-1,1},{1,1,1,-1},{1,1,1,1}};
// Denominators: spins, colors and identical particles
const int denominators[1] = {4};
// Reset the matrix elements
for(int i = 0; i < nprocesses; i++ )
{
matrix_element[i] = 0.;
}
// Define permutation
// int perm[nexternal];
// for(int i = 0; i < nexternal; i++){
// perm[i]=i;
// }
//for (int ihel = 0; ihel < ncomb; ihel++ )
//{
calculate_wavefunctions(tid % ncomb, local_m, matrix_element[0]);
//}
for (int i = 0; i < nprocesses; ++ i)
{
matrix_element[i] /= denominators[i];
}
for (int i = 0; i < nprocesses; ++ i)
{
output[i * nprocesses + tid] = matrix_element[i];
// printf("output %i %i %i %f", tid, i, i*nprocesses+tid, output[i*nprocesses+tid]);
}
}
//==========================================================================
// Private class member functions
//--------------------------------------------------------------------------
|
66320574c68be2612d822a08626086edc466eba4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2017 NVIDIA Corporation. All rights reserved
*
* Sample app to demonstrate use of CUPTI library to obtain metric values
* using callbacks for CUDA runtime APIs
*
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cupti.h>
#define METRIC_NAME "ipc"
#define DRIVER_API_CALL(apiFuncCall) \
do { \
hipError_t _status = apiFuncCall; \
if (_status != hipSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \
__FILE__, __LINE__, #apiFuncCall, _status); \
exit(-1); \
} \
} while (0)
#define RUNTIME_API_CALL(apiFuncCall) \
do { \
hipError_t _status = apiFuncCall; \
if (_status != hipSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #apiFuncCall, hipGetErrorString(_status));\
exit(-1); \
} \
} while (0)
#define CUPTI_CALL(call) \
do { \
CUptiResult _status = call; \
if (_status != CUPTI_SUCCESS) { \
const char *errstr; \
cuptiGetResultString(_status, &errstr); \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #call, errstr); \
if(_status == CUPTI_ERROR_LEGACY_PROFILER_NOT_SUPPORTED) \
exit(0); \
else \
exit(-1); \
} \
} while (0)
#define ALIGN_SIZE (8)
#define ALIGN_BUFFER(buffer, align) \
(((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer))
// User data for event collection callback
typedef struct MetricData_st {
// the device where metric is being collected
hipDevice_t device;
// the set of event groups to collect for a pass
CUpti_EventGroupSet *eventGroups;
// the current number of events collected in eventIdArray and
// eventValueArray
uint32_t eventIdx;
// the number of entries in eventIdArray and eventValueArray
uint32_t numEvents;
// array of event ids
CUpti_EventID *eventIdArray;
// array of event values
uint64_t *eventValueArray;
} MetricData_t;
static uint64_t kernelDuration;
// Device code
__global__ void VecAdd(const int* A, const int* B, int* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
static void
initVec(int *vec, int n)
{
for (int i=0; i< n; i++)
vec[i] = i;
}
void CUPTIAPI
getMetricValueCallback(void *userdata, CUpti_CallbackDomain domain,
CUpti_CallbackId cbid, const CUpti_CallbackData *cbInfo)
{
MetricData_t *metricData = (MetricData_t*)userdata;
unsigned int i, j, k;
// This callback is enabled only for launch so we shouldn't see
// anything else.
if ((cbid != CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020) &&
(cbid != CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000))
{
printf("%s:%d: unexpected cbid %d\n", __FILE__, __LINE__, cbid);
exit(-1);
}
// on entry, enable all the event groups being collected this pass,
// for metrics we collect for all instances of the event
if (cbInfo->callbackSite == CUPTI_API_ENTER) {
hipDeviceSynchronize();
CUPTI_CALL(cuptiSetEventCollectionMode(cbInfo->context,
CUPTI_EVENT_COLLECTION_MODE_KERNEL));
for (i = 0; i < metricData->eventGroups->numEventGroups; i++) {
uint32_t all = 1;
CUPTI_CALL(cuptiEventGroupSetAttribute(metricData->eventGroups->eventGroups[i],
CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES,
sizeof(all), &all));
CUPTI_CALL(cuptiEventGroupEnable(metricData->eventGroups->eventGroups[i]));
}
}
// on exit, read and record event values
if (cbInfo->callbackSite == CUPTI_API_EXIT) {
hipDeviceSynchronize();
// for each group, read the event values from the group and record
// in metricData
for (i = 0; i < metricData->eventGroups->numEventGroups; i++) {
CUpti_EventGroup group = metricData->eventGroups->eventGroups[i];
CUpti_EventDomainID groupDomain;
uint32_t numEvents, numInstances, numTotalInstances;
CUpti_EventID *eventIds;
size_t groupDomainSize = sizeof(groupDomain);
size_t numEventsSize = sizeof(numEvents);
size_t numInstancesSize = sizeof(numInstances);
size_t numTotalInstancesSize = sizeof(numTotalInstances);
uint64_t *values, normalized, *sum;
size_t valuesSize, eventIdsSize;
size_t numCountersRead = 0;
CUPTI_CALL(cuptiEventGroupGetAttribute(group,
CUPTI_EVENT_GROUP_ATTR_EVENT_DOMAIN_ID,
&groupDomainSize, &groupDomain));
CUPTI_CALL(cuptiDeviceGetEventDomainAttribute(metricData->device, groupDomain,
CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT,
&numTotalInstancesSize, &numTotalInstances));
CUPTI_CALL(cuptiEventGroupGetAttribute(group,
CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT,
&numInstancesSize, &numInstances));
CUPTI_CALL(cuptiEventGroupGetAttribute(group,
CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS,
&numEventsSize, &numEvents));
eventIdsSize = numEvents * sizeof(CUpti_EventID);
eventIds = (CUpti_EventID *)malloc(eventIdsSize);
CUPTI_CALL(cuptiEventGroupGetAttribute(group,
CUPTI_EVENT_GROUP_ATTR_EVENTS,
&eventIdsSize, eventIds));
valuesSize = sizeof(uint64_t) * numInstances * numEvents;
values = (uint64_t *)malloc(valuesSize);
CUPTI_CALL(cuptiEventGroupReadAllEvents(group,
CUPTI_EVENT_READ_FLAG_NONE,
&valuesSize,
values,
&eventIdsSize,
eventIds,
&numCountersRead));
if (metricData->eventIdx >= metricData->numEvents) {
fprintf(stderr, "error: too many events collected, metric expects only %d\n",
(int)metricData->numEvents);
exit(-1);
}
sum = (uint64_t *)calloc(sizeof(uint64_t), numEvents);
// sum collect event values from all instances
for (k = 0; k < numInstances; k++) {
for (j = 0; j < numEvents; j++) {
sum[j] += values[(k * numEvents) + j];
}
}
for (j = 0; j < numEvents; j++) {
// normalize the event value to represent the total number of
// domain instances on the device
normalized = (sum[j] * numTotalInstances) / numInstances;
metricData->eventIdArray[metricData->eventIdx] = eventIds[j];
metricData->eventValueArray[metricData->eventIdx] = normalized;
metricData->eventIdx++;
// print collected value
{
char eventName[128];
size_t eventNameSize = sizeof(eventName) - 1;
CUPTI_CALL(cuptiEventGetAttribute(eventIds[j], CUPTI_EVENT_ATTR_NAME,
&eventNameSize, eventName));
eventName[127] = '\0';
printf("\t%s = %llu (", eventName, (unsigned long long)sum[j]);
if (numInstances > 1) {
for (k = 0; k < numInstances; k++) {
if (k != 0)
printf(", ");
printf("%llu", (unsigned long long)values[(k * numEvents) + j]);
}
}
printf(")\n");
printf("\t%s (normalized) (%llu * %u) / %u = %llu\n",
eventName, (unsigned long long)sum[j],
numTotalInstances, numInstances,
(unsigned long long)normalized);
}
}
free(values);
free(sum);
}
for (i = 0; i < metricData->eventGroups->numEventGroups; i++)
CUPTI_CALL(cuptiEventGroupDisable(metricData->eventGroups->eventGroups[i]));
}
}
static void
cleanUp(int *h_A, int *h_B, int *h_C, int *d_A, int *d_B, int *d_C)
{
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
static void
runPass()
{
int N = 50000;
size_t size = N * sizeof(int);
int threadsPerBlock = 0;
int blocksPerGrid = 0;
int *h_A, *h_B, *h_C;
int *d_A, *d_B, *d_C;
int i, sum;
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
h_B = (int*)malloc(size);
h_C = (int*)malloc(size);
// Initialize input vectors
initVec(h_A, N);
initVec(h_B, N);
memset(h_C, 0, size);
// Allocate vectors in device memory
hipMalloc((void**)&d_A, size);
hipMalloc((void**)&d_B, size);
hipMalloc((void**)&d_C, size);
// Copy vectors from host memory to device memory
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// Invoke kernel
threadsPerBlock = 256;
blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
printf("Launching kernel: blocks %d, thread/block %d\n",
blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N);
// Copy result from device memory to host memory
// h_C contains the result in host memory
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
// Verify result
for (i = 0; i < N; ++i) {
sum = h_A[i] + h_B[i];
if (h_C[i] != sum) {
fprintf(stderr, "error: result verification failed\n");
exit(-1);
}
}
cleanUp(h_A, h_B, h_C, d_A, d_B, d_C);
}
static void CUPTIAPI
bufferRequested(uint8_t **buffer, size_t *size, size_t *maxNumRecords)
{
uint8_t *rawBuffer;
*size = 16 * 1024;
rawBuffer = (uint8_t *)malloc(*size + ALIGN_SIZE);
*buffer = ALIGN_BUFFER(rawBuffer, ALIGN_SIZE);
*maxNumRecords = 0;
if (*buffer == NULL) {
printf("Error: out of memory\n");
exit(-1);
}
}
static void CUPTIAPI
bufferCompleted(hipCtx_t ctx, uint32_t streamId, uint8_t *buffer, size_t size, size_t validSize)
{
CUpti_Activity *record = NULL;
CUpti_ActivityKernel4 *kernel;
//since we launched only 1 kernel, we should have only 1 kernel record
CUPTI_CALL(cuptiActivityGetNextRecord(buffer, validSize, &record));
kernel = (CUpti_ActivityKernel4 *)record;
if (kernel->kind != CUPTI_ACTIVITY_KIND_KERNEL) {
fprintf(stderr, "Error: expected kernel activity record, got %d\n", (int)kernel->kind);
exit(-1);
}
kernelDuration = kernel->end - kernel->start;
free(buffer);
}
int
main(int argc, char *argv[])
{
CUpti_SubscriberHandle subscriber;
hipCtx_t context = 0;
hipDevice_t device = 0;
int deviceNum;
int deviceCount;
char deviceName[32];
const char *metricName;
CUpti_MetricID metricId;
CUpti_EventGroupSets *passData;
MetricData_t metricData;
unsigned int pass;
CUpti_MetricValue metricValue;
printf("Usage: %s [device_num] [metric_name]\n", argv[0]);
// make sure activity is enabled before any CUDA API
CUPTI_CALL(cuptiActivityEnable(CUPTI_ACTIVITY_KIND_KERNEL));
DRIVER_API_CALL(hipInit(0));
DRIVER_API_CALL(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
return -2;
}
if (argc > 1)
deviceNum = atoi(argv[1]);
else
deviceNum = 0;
printf("CUDA Device Number: %d\n", deviceNum);
DRIVER_API_CALL(hipDeviceGet(&device, deviceNum));
DRIVER_API_CALL(hipDeviceGetName(deviceName, 32, device));
printf("CUDA Device Name: %s\n", deviceName);
DRIVER_API_CALL(hipCtxCreate(&context, 0, device));
// Get the name of the metric to collect
if (argc > 2)
metricName = argv[2];
else {
metricName = METRIC_NAME;
}
// need to collect duration of kernel execution without any event
// collection enabled (some metrics need kernel duration as part of
// calculation). The only accurate way to do this is by using the
// activity API.
{
CUPTI_CALL(cuptiActivityRegisterCallbacks(bufferRequested, bufferCompleted));
runPass();
hipDeviceSynchronize();
CUPTI_CALL(cuptiActivityFlushAll(0));
}
// setup launch callback for event collection
CUPTI_CALL(cuptiSubscribe(&subscriber, (CUpti_CallbackFunc)getMetricValueCallback, &metricData));
CUPTI_CALL(cuptiEnableCallback(1, subscriber, CUPTI_CB_DOMAIN_RUNTIME_API,
CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020));
CUPTI_CALL(cuptiEnableCallback(1, subscriber, CUPTI_CB_DOMAIN_RUNTIME_API,
CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000));
// allocate space to hold all the events needed for the metric
CUPTI_CALL(cuptiMetricGetIdFromName(device, metricName, &metricId));
CUPTI_CALL(cuptiMetricGetNumEvents(metricId, &metricData.numEvents));
metricData.device = device;
metricData.eventIdArray = (CUpti_EventID *)malloc(metricData.numEvents * sizeof(CUpti_EventID));
metricData.eventValueArray = (uint64_t *)malloc(metricData.numEvents * sizeof(uint64_t));
metricData.eventIdx = 0;
// get the number of passes required to collect all the events
// needed for the metric and the event groups for each pass
CUPTI_CALL(cuptiMetricCreateEventGroupSets(context, sizeof(metricId), &metricId, &passData));
for (pass = 0; pass < passData->numSets; pass++) {
printf("Pass %u\n", pass);
metricData.eventGroups = passData->sets + pass;
runPass();
}
if (metricData.eventIdx != metricData.numEvents) {
fprintf(stderr, "error: expected %u metric events, got %u\n",
metricData.numEvents, metricData.eventIdx);
exit(-1);
}
// use all the collected events to calculate the metric value
CUPTI_CALL(cuptiMetricGetValue(device, metricId,
metricData.numEvents * sizeof(CUpti_EventID),
metricData.eventIdArray,
metricData.numEvents * sizeof(uint64_t),
metricData.eventValueArray,
kernelDuration, &metricValue));
// print metric value, we format based on the value kind
{
CUpti_MetricValueKind valueKind;
size_t valueKindSize = sizeof(valueKind);
CUPTI_CALL(cuptiMetricGetAttribute(metricId, CUPTI_METRIC_ATTR_VALUE_KIND,
&valueKindSize, &valueKind));
switch (valueKind) {
case CUPTI_METRIC_VALUE_KIND_DOUBLE:
printf("Metric %s = %f\n", metricName, metricValue.metricValueDouble);
break;
case CUPTI_METRIC_VALUE_KIND_UINT64:
printf("Metric %s = %llu\n", metricName,
(unsigned long long)metricValue.metricValueUint64);
break;
case CUPTI_METRIC_VALUE_KIND_INT64:
printf("Metric %s = %lld\n", metricName,
(long long)metricValue.metricValueInt64);
break;
case CUPTI_METRIC_VALUE_KIND_PERCENT:
printf("Metric %s = %f%%\n", metricName, metricValue.metricValuePercent);
break;
case CUPTI_METRIC_VALUE_KIND_THROUGHPUT:
printf("Metric %s = %llu bytes/sec\n", metricName,
(unsigned long long)metricValue.metricValueThroughput);
break;
case CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL:
printf("Metric %s = utilization level %u\n", metricName,
(unsigned int)metricValue.metricValueUtilizationLevel);
break;
default:
fprintf(stderr, "error: unknown value kind\n");
exit(-1);
}
}
CUPTI_CALL(cuptiUnsubscribe(subscriber));
return 0;
}
| 66320574c68be2612d822a08626086edc466eba4.cu | /*
* Copyright 2011-2017 NVIDIA Corporation. All rights reserved
*
* Sample app to demonstrate use of CUPTI library to obtain metric values
* using callbacks for CUDA runtime APIs
*
*/
#include <stdio.h>
#include <cuda.h>
#include <cupti.h>
#define METRIC_NAME "ipc"
#define DRIVER_API_CALL(apiFuncCall) \
do { \
CUresult _status = apiFuncCall; \
if (_status != CUDA_SUCCESS) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \
__FILE__, __LINE__, #apiFuncCall, _status); \
exit(-1); \
} \
} while (0)
#define RUNTIME_API_CALL(apiFuncCall) \
do { \
cudaError_t _status = apiFuncCall; \
if (_status != cudaSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #apiFuncCall, cudaGetErrorString(_status));\
exit(-1); \
} \
} while (0)
#define CUPTI_CALL(call) \
do { \
CUptiResult _status = call; \
if (_status != CUPTI_SUCCESS) { \
const char *errstr; \
cuptiGetResultString(_status, &errstr); \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #call, errstr); \
if(_status == CUPTI_ERROR_LEGACY_PROFILER_NOT_SUPPORTED) \
exit(0); \
else \
exit(-1); \
} \
} while (0)
#define ALIGN_SIZE (8)
#define ALIGN_BUFFER(buffer, align) \
(((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer))
// User data for event collection callback
typedef struct MetricData_st {
// the device where metric is being collected
CUdevice device;
// the set of event groups to collect for a pass
CUpti_EventGroupSet *eventGroups;
// the current number of events collected in eventIdArray and
// eventValueArray
uint32_t eventIdx;
// the number of entries in eventIdArray and eventValueArray
uint32_t numEvents;
// array of event ids
CUpti_EventID *eventIdArray;
// array of event values
uint64_t *eventValueArray;
} MetricData_t;
static uint64_t kernelDuration;
// Device code
__global__ void VecAdd(const int* A, const int* B, int* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
static void
initVec(int *vec, int n)
{
for (int i=0; i< n; i++)
vec[i] = i;
}
void CUPTIAPI
getMetricValueCallback(void *userdata, CUpti_CallbackDomain domain,
CUpti_CallbackId cbid, const CUpti_CallbackData *cbInfo)
{
MetricData_t *metricData = (MetricData_t*)userdata;
unsigned int i, j, k;
// This callback is enabled only for launch so we shouldn't see
// anything else.
if ((cbid != CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020) &&
(cbid != CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000))
{
printf("%s:%d: unexpected cbid %d\n", __FILE__, __LINE__, cbid);
exit(-1);
}
// on entry, enable all the event groups being collected this pass,
// for metrics we collect for all instances of the event
if (cbInfo->callbackSite == CUPTI_API_ENTER) {
cudaDeviceSynchronize();
CUPTI_CALL(cuptiSetEventCollectionMode(cbInfo->context,
CUPTI_EVENT_COLLECTION_MODE_KERNEL));
for (i = 0; i < metricData->eventGroups->numEventGroups; i++) {
uint32_t all = 1;
CUPTI_CALL(cuptiEventGroupSetAttribute(metricData->eventGroups->eventGroups[i],
CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES,
sizeof(all), &all));
CUPTI_CALL(cuptiEventGroupEnable(metricData->eventGroups->eventGroups[i]));
}
}
// on exit, read and record event values
if (cbInfo->callbackSite == CUPTI_API_EXIT) {
cudaDeviceSynchronize();
// for each group, read the event values from the group and record
// in metricData
for (i = 0; i < metricData->eventGroups->numEventGroups; i++) {
CUpti_EventGroup group = metricData->eventGroups->eventGroups[i];
CUpti_EventDomainID groupDomain;
uint32_t numEvents, numInstances, numTotalInstances;
CUpti_EventID *eventIds;
size_t groupDomainSize = sizeof(groupDomain);
size_t numEventsSize = sizeof(numEvents);
size_t numInstancesSize = sizeof(numInstances);
size_t numTotalInstancesSize = sizeof(numTotalInstances);
uint64_t *values, normalized, *sum;
size_t valuesSize, eventIdsSize;
size_t numCountersRead = 0;
CUPTI_CALL(cuptiEventGroupGetAttribute(group,
CUPTI_EVENT_GROUP_ATTR_EVENT_DOMAIN_ID,
&groupDomainSize, &groupDomain));
CUPTI_CALL(cuptiDeviceGetEventDomainAttribute(metricData->device, groupDomain,
CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT,
&numTotalInstancesSize, &numTotalInstances));
CUPTI_CALL(cuptiEventGroupGetAttribute(group,
CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT,
&numInstancesSize, &numInstances));
CUPTI_CALL(cuptiEventGroupGetAttribute(group,
CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS,
&numEventsSize, &numEvents));
eventIdsSize = numEvents * sizeof(CUpti_EventID);
eventIds = (CUpti_EventID *)malloc(eventIdsSize);
CUPTI_CALL(cuptiEventGroupGetAttribute(group,
CUPTI_EVENT_GROUP_ATTR_EVENTS,
&eventIdsSize, eventIds));
valuesSize = sizeof(uint64_t) * numInstances * numEvents;
values = (uint64_t *)malloc(valuesSize);
CUPTI_CALL(cuptiEventGroupReadAllEvents(group,
CUPTI_EVENT_READ_FLAG_NONE,
&valuesSize,
values,
&eventIdsSize,
eventIds,
&numCountersRead));
if (metricData->eventIdx >= metricData->numEvents) {
fprintf(stderr, "error: too many events collected, metric expects only %d\n",
(int)metricData->numEvents);
exit(-1);
}
sum = (uint64_t *)calloc(sizeof(uint64_t), numEvents);
// sum collect event values from all instances
for (k = 0; k < numInstances; k++) {
for (j = 0; j < numEvents; j++) {
sum[j] += values[(k * numEvents) + j];
}
}
for (j = 0; j < numEvents; j++) {
// normalize the event value to represent the total number of
// domain instances on the device
normalized = (sum[j] * numTotalInstances) / numInstances;
metricData->eventIdArray[metricData->eventIdx] = eventIds[j];
metricData->eventValueArray[metricData->eventIdx] = normalized;
metricData->eventIdx++;
// print collected value
{
char eventName[128];
size_t eventNameSize = sizeof(eventName) - 1;
CUPTI_CALL(cuptiEventGetAttribute(eventIds[j], CUPTI_EVENT_ATTR_NAME,
&eventNameSize, eventName));
eventName[127] = '\0';
printf("\t%s = %llu (", eventName, (unsigned long long)sum[j]);
if (numInstances > 1) {
for (k = 0; k < numInstances; k++) {
if (k != 0)
printf(", ");
printf("%llu", (unsigned long long)values[(k * numEvents) + j]);
}
}
printf(")\n");
printf("\t%s (normalized) (%llu * %u) / %u = %llu\n",
eventName, (unsigned long long)sum[j],
numTotalInstances, numInstances,
(unsigned long long)normalized);
}
}
free(values);
free(sum);
}
for (i = 0; i < metricData->eventGroups->numEventGroups; i++)
CUPTI_CALL(cuptiEventGroupDisable(metricData->eventGroups->eventGroups[i]));
}
}
static void
cleanUp(int *h_A, int *h_B, int *h_C, int *d_A, int *d_B, int *d_C)
{
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
static void
runPass()
{
int N = 50000;
size_t size = N * sizeof(int);
int threadsPerBlock = 0;
int blocksPerGrid = 0;
int *h_A, *h_B, *h_C;
int *d_A, *d_B, *d_C;
int i, sum;
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
h_B = (int*)malloc(size);
h_C = (int*)malloc(size);
// Initialize input vectors
initVec(h_A, N);
initVec(h_B, N);
memset(h_C, 0, size);
// Allocate vectors in device memory
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Invoke kernel
threadsPerBlock = 256;
blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
printf("Launching kernel: blocks %d, thread/block %d\n",
blocksPerGrid, threadsPerBlock);
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
// Copy result from device memory to host memory
// h_C contains the result in host memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Verify result
for (i = 0; i < N; ++i) {
sum = h_A[i] + h_B[i];
if (h_C[i] != sum) {
fprintf(stderr, "error: result verification failed\n");
exit(-1);
}
}
cleanUp(h_A, h_B, h_C, d_A, d_B, d_C);
}
static void CUPTIAPI
bufferRequested(uint8_t **buffer, size_t *size, size_t *maxNumRecords)
{
uint8_t *rawBuffer;
*size = 16 * 1024;
rawBuffer = (uint8_t *)malloc(*size + ALIGN_SIZE);
*buffer = ALIGN_BUFFER(rawBuffer, ALIGN_SIZE);
*maxNumRecords = 0;
if (*buffer == NULL) {
printf("Error: out of memory\n");
exit(-1);
}
}
static void CUPTIAPI
bufferCompleted(CUcontext ctx, uint32_t streamId, uint8_t *buffer, size_t size, size_t validSize)
{
CUpti_Activity *record = NULL;
CUpti_ActivityKernel4 *kernel;
//since we launched only 1 kernel, we should have only 1 kernel record
CUPTI_CALL(cuptiActivityGetNextRecord(buffer, validSize, &record));
kernel = (CUpti_ActivityKernel4 *)record;
if (kernel->kind != CUPTI_ACTIVITY_KIND_KERNEL) {
fprintf(stderr, "Error: expected kernel activity record, got %d\n", (int)kernel->kind);
exit(-1);
}
kernelDuration = kernel->end - kernel->start;
free(buffer);
}
int
main(int argc, char *argv[])
{
CUpti_SubscriberHandle subscriber;
CUcontext context = 0;
CUdevice device = 0;
int deviceNum;
int deviceCount;
char deviceName[32];
const char *metricName;
CUpti_MetricID metricId;
CUpti_EventGroupSets *passData;
MetricData_t metricData;
unsigned int pass;
CUpti_MetricValue metricValue;
printf("Usage: %s [device_num] [metric_name]\n", argv[0]);
// make sure activity is enabled before any CUDA API
CUPTI_CALL(cuptiActivityEnable(CUPTI_ACTIVITY_KIND_KERNEL));
DRIVER_API_CALL(cuInit(0));
DRIVER_API_CALL(cuDeviceGetCount(&deviceCount));
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
return -2;
}
if (argc > 1)
deviceNum = atoi(argv[1]);
else
deviceNum = 0;
printf("CUDA Device Number: %d\n", deviceNum);
DRIVER_API_CALL(cuDeviceGet(&device, deviceNum));
DRIVER_API_CALL(cuDeviceGetName(deviceName, 32, device));
printf("CUDA Device Name: %s\n", deviceName);
DRIVER_API_CALL(cuCtxCreate(&context, 0, device));
// Get the name of the metric to collect
if (argc > 2)
metricName = argv[2];
else {
metricName = METRIC_NAME;
}
// need to collect duration of kernel execution without any event
// collection enabled (some metrics need kernel duration as part of
// calculation). The only accurate way to do this is by using the
// activity API.
{
CUPTI_CALL(cuptiActivityRegisterCallbacks(bufferRequested, bufferCompleted));
runPass();
cudaDeviceSynchronize();
CUPTI_CALL(cuptiActivityFlushAll(0));
}
// setup launch callback for event collection
CUPTI_CALL(cuptiSubscribe(&subscriber, (CUpti_CallbackFunc)getMetricValueCallback, &metricData));
CUPTI_CALL(cuptiEnableCallback(1, subscriber, CUPTI_CB_DOMAIN_RUNTIME_API,
CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020));
CUPTI_CALL(cuptiEnableCallback(1, subscriber, CUPTI_CB_DOMAIN_RUNTIME_API,
CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000));
// allocate space to hold all the events needed for the metric
CUPTI_CALL(cuptiMetricGetIdFromName(device, metricName, &metricId));
CUPTI_CALL(cuptiMetricGetNumEvents(metricId, &metricData.numEvents));
metricData.device = device;
metricData.eventIdArray = (CUpti_EventID *)malloc(metricData.numEvents * sizeof(CUpti_EventID));
metricData.eventValueArray = (uint64_t *)malloc(metricData.numEvents * sizeof(uint64_t));
metricData.eventIdx = 0;
// get the number of passes required to collect all the events
// needed for the metric and the event groups for each pass
CUPTI_CALL(cuptiMetricCreateEventGroupSets(context, sizeof(metricId), &metricId, &passData));
for (pass = 0; pass < passData->numSets; pass++) {
printf("Pass %u\n", pass);
metricData.eventGroups = passData->sets + pass;
runPass();
}
if (metricData.eventIdx != metricData.numEvents) {
fprintf(stderr, "error: expected %u metric events, got %u\n",
metricData.numEvents, metricData.eventIdx);
exit(-1);
}
// use all the collected events to calculate the metric value
CUPTI_CALL(cuptiMetricGetValue(device, metricId,
metricData.numEvents * sizeof(CUpti_EventID),
metricData.eventIdArray,
metricData.numEvents * sizeof(uint64_t),
metricData.eventValueArray,
kernelDuration, &metricValue));
// print metric value, we format based on the value kind
{
CUpti_MetricValueKind valueKind;
size_t valueKindSize = sizeof(valueKind);
CUPTI_CALL(cuptiMetricGetAttribute(metricId, CUPTI_METRIC_ATTR_VALUE_KIND,
&valueKindSize, &valueKind));
switch (valueKind) {
case CUPTI_METRIC_VALUE_KIND_DOUBLE:
printf("Metric %s = %f\n", metricName, metricValue.metricValueDouble);
break;
case CUPTI_METRIC_VALUE_KIND_UINT64:
printf("Metric %s = %llu\n", metricName,
(unsigned long long)metricValue.metricValueUint64);
break;
case CUPTI_METRIC_VALUE_KIND_INT64:
printf("Metric %s = %lld\n", metricName,
(long long)metricValue.metricValueInt64);
break;
case CUPTI_METRIC_VALUE_KIND_PERCENT:
printf("Metric %s = %f%%\n", metricName, metricValue.metricValuePercent);
break;
case CUPTI_METRIC_VALUE_KIND_THROUGHPUT:
printf("Metric %s = %llu bytes/sec\n", metricName,
(unsigned long long)metricValue.metricValueThroughput);
break;
case CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL:
printf("Metric %s = utilization level %u\n", metricName,
(unsigned int)metricValue.metricValueUtilizationLevel);
break;
default:
fprintf(stderr, "error: unknown value kind\n");
exit(-1);
}
}
CUPTI_CALL(cuptiUnsubscribe(subscriber));
return 0;
}
|
c5ffef92ded676b08db8fdc32b248fce63fb6149.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __MAXKERNEL__CU__
#define __MAXKERNEL__CU__
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdio.h>
#include "config.h"
/*
* Performs a sum reduction
*
* in - the array to be reduced
* out - output array that holds the result
* n - array length
*/
template <class T>
__global__ void maxRowsKernel(T * in, T * out, unsigned int M, unsigned int N){
extern __shared__ __align__(sizeof(T)) unsigned char my_smem[];
T * sdata= reinterpret_cast<T * >(my_smem);
int row = blockDim.y * blockIdx.y + threadIdx.y;
int column = blockDim.x * blockIdx.x + threadIdx.x;
int ty = threadIdx.y, tx = threadIdx.x, width = blockDim.x < N ? blockDim.x : N;
//collaboratively load given array into shared memory
//and synchronize
int arrayPosition = row * N + column;
sdata[ty * blockDim.x + tx] = (row < M && column < N) ? in[arrayPosition]: 0;
// if(width % 2 != 0)
// width++;
__syncthreads();
// if(row == 0 && column == 0)
// printf("s: %f\n", ceilf(width / 2.0f));
// //now we need to do the actual reduction.
for(unsigned int s = (width + 1)/2; s > 0; s >>= 1){
if(tx < s){
if(sdata[ty * blockDim.x + tx] < sdata[ty * blockDim.x + tx + s])
sdata[ty * blockDim.x + tx] = sdata[ty * blockDim.x + tx + s];
}
__syncthreads();
}
//store result in out array
if(tx == 0){
out[row * N / width + blockIdx.x] = sdata[ty * blockDim.x];
}
}
template <class T, unsigned int BLOCK_SIZE>
__global__ void maxColsKernel(T * in, T * out, int M, int N){
__shared__ T sdata[BLOCK_SIZE][BLOCK_SIZE];
int row = blockDim.y * blockIdx.y + threadIdx.y;
int column = blockDim.x * blockIdx.x + threadIdx.x;
int ty = threadIdx.y, tx = threadIdx.x, width = BLOCK_SIZE < N ? BLOCK_SIZE : N;
sdata[ty][tx] = (row < M && column < N) ? in[row * N + column]: 0;
__syncthreads();
for(unsigned int s = BLOCK_SIZE/2; s > 0; s >>= 1){
if(ty < s){
sdata[ty][tx] = sdata[ty][tx] < sdata[ty + s][tx] ? sdata[ty + s][tx]: sdata[ty][tx];
}
__syncthreads();
}
//store result in out array
if(ty == 0 && column < N){
//printf("writing out[%d] + sdata[%d][%d]\n, ", blockIdx.y * N + column, 0, tx);
out[blockIdx.y * N + column] = sdata[0][tx];
}
}
template <class T>
void maxCols(T * in, T * out, int M, int N, int threadsPerBlock){
int nBlocksXPrimary = ceil(N/(float)THREADS_PER_BLOCK);
dim3 grid(nBlocksXPrimary, ceil(M/(float)THREADS_PER_BLOCK), 1);
dim3 block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
dim3 gridSecondary(ceil(nBlocksXPrimary/(float)THREADS_PER_BLOCK), ceil(M/(float)THREADS_PER_BLOCK), 1);
//first launch kernel to perform initial reduce
if(ceil(M/(float)THREADS_PER_BLOCK) < 0){
T * tmp;
hipMalloc((void **) &tmp, ceil(N/(float)THREADS_PER_BLOCK) * sizeof(T));
hipLaunchKernelGGL(( maxColsKernel<T, THREADS_PER_BLOCK>), dim3(grid), dim3(block), 0, 0, in, tmp, M, N);
// hipError_t cudaerr = hipDeviceSynchronize();
// if (cudaerr != hipSuccess)
// printf("\033[1;31m maxColsKernel launch failed with error \"%s\". \033[0m\n",
// hipGetErrorString(cudaerr));
// //printf("n = %d\n", (int)ceil(N/(float)THREADS_PER_BLOCK));
hipLaunchKernelGGL(( maxColsKernel<T, THREADS_PER_BLOCK>), dim3(gridSecondary), dim3(block), 0, 0, tmp, out, M, ceil(N/(float)THREADS_PER_BLOCK));
//printf("launching 2ndary\n");
hipFree(tmp);
}else{
//printf("n = %d\n", (int)ceil(N/(float)THREADS_PER_BLOCK));
hipLaunchKernelGGL(( maxColsKernel<T, THREADS_PER_BLOCK>), dim3(grid), dim3(block), 0, 0, in, out, M, N);
// hipError_t cudaerr = hipDeviceSynchronize();
// if (cudaerr != hipSuccess)
// printf("\033[1;31m rowSumsKernel launch failed with error \"%s\". \033[0m\n",
// hipGetErrorString(cudaerr));
// //printf("not launching 2ndary\n");
}
}
/*
* host code wrapper to for sumReduceKernel
*
*/
template <class T>
void maxRows(T * in, T * out, int M, int N, int threadsPerBlock){
dim3 grid(ceil(N/(float)threadsPerBlock), ceil(M/(float)threadsPerBlock), 1);
dim3 block(threadsPerBlock, threadsPerBlock, 1);
//first launch kernel to perform initial reduce
hipLaunchKernelGGL(( maxRowsKernel<T>), dim3(grid), dim3(block), threadsPerBlock * threadsPerBlock * sizeof(T), 0, in, out, M, N);
// hipError_t cudaerr = hipDeviceSynchronize();
// if (cudaerr != hipSuccess)
// printf("\033[1;31mmaxRowsKernel launch failed with error \"%s\". \033[0m\n",
// hipGetErrorString(cudaerr));
}
template void
maxRows<int>(int * in, int * out, int M, int N, int threadsPerBlock);
template void
maxRows<float>(float * in, float * out, int M, int N, int threadsPerBlock);
template void
maxRows<double>(double * in, double * out, int M, int N, int threadsPerBlock);
template void
maxCols<int>(int * in, int * out, int M, int N, int threadsPerBlock);
template void
maxCols<float>(float * in, float * out, int M, int N, int threadsPerBlock);
template void
maxCols<double>(double * in, double * out, int M, int N, int threadsPerBlock);
#endif | c5ffef92ded676b08db8fdc32b248fce63fb6149.cu | #ifndef __MAXKERNEL__CU__
#define __MAXKERNEL__CU__
#include <cuda.h>
#include <math.h>
#include <stdio.h>
#include "config.h"
/*
* Performs a sum reduction
*
* in - the array to be reduced
* out - output array that holds the result
* n - array length
*/
template <class T>
__global__ void maxRowsKernel(T * in, T * out, unsigned int M, unsigned int N){
extern __shared__ __align__(sizeof(T)) unsigned char my_smem[];
T * sdata= reinterpret_cast<T * >(my_smem);
int row = blockDim.y * blockIdx.y + threadIdx.y;
int column = blockDim.x * blockIdx.x + threadIdx.x;
int ty = threadIdx.y, tx = threadIdx.x, width = blockDim.x < N ? blockDim.x : N;
//collaboratively load given array into shared memory
//and synchronize
int arrayPosition = row * N + column;
sdata[ty * blockDim.x + tx] = (row < M && column < N) ? in[arrayPosition]: 0;
// if(width % 2 != 0)
// width++;
__syncthreads();
// if(row == 0 && column == 0)
// printf("s: %f\n", ceilf(width / 2.0f));
// //now we need to do the actual reduction.
for(unsigned int s = (width + 1)/2; s > 0; s >>= 1){
if(tx < s){
if(sdata[ty * blockDim.x + tx] < sdata[ty * blockDim.x + tx + s])
sdata[ty * blockDim.x + tx] = sdata[ty * blockDim.x + tx + s];
}
__syncthreads();
}
//store result in out array
if(tx == 0){
out[row * N / width + blockIdx.x] = sdata[ty * blockDim.x];
}
}
template <class T, unsigned int BLOCK_SIZE>
__global__ void maxColsKernel(T * in, T * out, int M, int N){
__shared__ T sdata[BLOCK_SIZE][BLOCK_SIZE];
int row = blockDim.y * blockIdx.y + threadIdx.y;
int column = blockDim.x * blockIdx.x + threadIdx.x;
int ty = threadIdx.y, tx = threadIdx.x, width = BLOCK_SIZE < N ? BLOCK_SIZE : N;
sdata[ty][tx] = (row < M && column < N) ? in[row * N + column]: 0;
__syncthreads();
for(unsigned int s = BLOCK_SIZE/2; s > 0; s >>= 1){
if(ty < s){
sdata[ty][tx] = sdata[ty][tx] < sdata[ty + s][tx] ? sdata[ty + s][tx]: sdata[ty][tx];
}
__syncthreads();
}
//store result in out array
if(ty == 0 && column < N){
//printf("writing out[%d] + sdata[%d][%d]\n, ", blockIdx.y * N + column, 0, tx);
out[blockIdx.y * N + column] = sdata[0][tx];
}
}
template <class T>
void maxCols(T * in, T * out, int M, int N, int threadsPerBlock){
int nBlocksXPrimary = ceil(N/(float)THREADS_PER_BLOCK);
dim3 grid(nBlocksXPrimary, ceil(M/(float)THREADS_PER_BLOCK), 1);
dim3 block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
dim3 gridSecondary(ceil(nBlocksXPrimary/(float)THREADS_PER_BLOCK), ceil(M/(float)THREADS_PER_BLOCK), 1);
//first launch kernel to perform initial reduce
if(ceil(M/(float)THREADS_PER_BLOCK) < 0){
T * tmp;
cudaMalloc((void **) &tmp, ceil(N/(float)THREADS_PER_BLOCK) * sizeof(T));
maxColsKernel<T, THREADS_PER_BLOCK><<<grid, block>>>(in, tmp, M, N);
// cudaError_t cudaerr = cudaDeviceSynchronize();
// if (cudaerr != CUDA_SUCCESS)
// printf("\033[1;31m maxColsKernel launch failed with error \"%s\". \033[0m\n",
// cudaGetErrorString(cudaerr));
// //printf("n = %d\n", (int)ceil(N/(float)THREADS_PER_BLOCK));
maxColsKernel<T, THREADS_PER_BLOCK><<<gridSecondary, block>>>(tmp, out, M, ceil(N/(float)THREADS_PER_BLOCK));
//printf("launching 2ndary\n");
cudaFree(tmp);
}else{
//printf("n = %d\n", (int)ceil(N/(float)THREADS_PER_BLOCK));
maxColsKernel<T, THREADS_PER_BLOCK><<<grid, block>>>(in, out, M, N);
// cudaError_t cudaerr = cudaDeviceSynchronize();
// if (cudaerr != CUDA_SUCCESS)
// printf("\033[1;31m rowSumsKernel launch failed with error \"%s\". \033[0m\n",
// cudaGetErrorString(cudaerr));
// //printf("not launching 2ndary\n");
}
}
/*
* host code wrapper to for sumReduceKernel
*
*/
template <class T>
void maxRows(T * in, T * out, int M, int N, int threadsPerBlock){
dim3 grid(ceil(N/(float)threadsPerBlock), ceil(M/(float)threadsPerBlock), 1);
dim3 block(threadsPerBlock, threadsPerBlock, 1);
//first launch kernel to perform initial reduce
maxRowsKernel<T><<<grid, block, threadsPerBlock * threadsPerBlock * sizeof(T)>>>(in, out, M, N);
// cudaError_t cudaerr = cudaDeviceSynchronize();
// if (cudaerr != CUDA_SUCCESS)
// printf("\033[1;31mmaxRowsKernel launch failed with error \"%s\". \033[0m\n",
// cudaGetErrorString(cudaerr));
}
template void
maxRows<int>(int * in, int * out, int M, int N, int threadsPerBlock);
template void
maxRows<float>(float * in, float * out, int M, int N, int threadsPerBlock);
template void
maxRows<double>(double * in, double * out, int M, int N, int threadsPerBlock);
template void
maxCols<int>(int * in, int * out, int M, int N, int threadsPerBlock);
template void
maxCols<float>(float * in, float * out, int M, int N, int threadsPerBlock);
template void
maxCols<double>(double * in, double * out, int M, int N, int threadsPerBlock);
#endif |
7ef809704a770f3de476932f5bdab2b6bb588946.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/op_kernel_state_wrapper.h"
#include "oneflow/core/kernel/random_generator.h"
#include "oneflow/core/kernel/kernel_util.h"
#include "oneflow/core/common/data_type.h"
#include "oneflow/core/kernel/cuda_graph_support.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void MaskAndScaleGpu(const int64_t n, float scale, const T* x, const int8_t* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] * static_cast<T>(mask[i]) * scale; }
}
template<typename T>
__global__ void MaskAndScaleAddGpu(const int64_t n, float scale, const T* x, const int8_t* mask,
const T* addend, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] * static_cast<T>(mask[i]) * scale + addend[i]; }
}
template<>
__global__ void MaskAndScaleGpu<half>(const int64_t n, float scale, const half* x,
const int8_t* mask, half* y) {
const int64_t h2_n = n / 2;
half2 h2_scale = __float2half2_rn(scale);
const auto* x_h2 = reinterpret_cast<const half2*>(x);
const auto* mask_c2 = reinterpret_cast<const char2*>(mask);
auto* y_h2 = reinterpret_cast<half2*>(y);
CUDA_1D_KERNEL_LOOP(i, h2_n) {
char2 mask_val = mask_c2[i];
half2 one_or_zero_h2;
one_or_zero_h2.x = mask_val.x;
one_or_zero_h2.y = mask_val.y;
y_h2[i] = __hmul2(__hmul2(x_h2[i], one_or_zero_h2), h2_scale);
}
if (n % 2 != 0 && blockIdx.x == 0 && threadIdx.x == 0) {
const int64_t last_idx = n - 1;
half one_or_zero = mask[last_idx];
y[last_idx] = __hmul(__hmul(x[last_idx], one_or_zero), h2_scale.x);
}
}
template<>
__global__ void MaskAndScaleAddGpu<half>(const int64_t n, float scale, const half* x,
const int8_t* mask, const half* addend, half* y) {
const int64_t h2_n = n / 2;
half2 h2_scale = __float2half2_rn(scale);
const auto* x_h2 = reinterpret_cast<const half2*>(x);
const auto* addend_h2 = reinterpret_cast<const half2*>(addend);
const auto* mask_c2 = reinterpret_cast<const char2*>(mask);
auto* y_h2 = reinterpret_cast<half2*>(y);
CUDA_1D_KERNEL_LOOP(i, h2_n) {
char2 mask_val = mask_c2[i];
half2 one_or_zero_h2;
one_or_zero_h2.x = mask_val.x;
one_or_zero_h2.y = mask_val.y;
y_h2[i] = __hadd2(__hmul2(__hmul2(x_h2[i], one_or_zero_h2), h2_scale), addend_h2[i]);
}
if (n % 2 != 0 && blockIdx.x == 0 && threadIdx.x == 0) {
const int64_t last_idx = n - 1;
half one_or_zero = mask[last_idx];
y[last_idx] = __hadd(__hmul(__hmul(x[last_idx], one_or_zero), h2_scale.x), addend[last_idx]);
}
}
template<typename T>
void MaskAndScale(DeviceCtx* ctx, const int64_t n, float scale, const T* x, const int8_t* mask,
T* y) {
hipLaunchKernelGGL(( MaskAndScaleGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, x, mask, y);
}
template<>
void MaskAndScale<half>(DeviceCtx* ctx, const int64_t n, float scale, const half* x,
const int8_t* mask, half* y) {
hipLaunchKernelGGL(( MaskAndScaleGpu<half>)
, dim3(BlocksNum4ThreadsNum(RoundUp(n, 2) / 2)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, x, mask, y);
}
template<typename T>
void MaskAndScaleAdd(DeviceCtx* ctx, const int64_t n, float scale, const T* x, const int8_t* mask,
const T* addend, T* y) {
hipLaunchKernelGGL(( MaskAndScaleAddGpu<T>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, x, mask, addend, y);
}
template<>
void MaskAndScaleAdd<half>(DeviceCtx* ctx, const int64_t n, float scale, const half* x,
const int8_t* mask, const half* addend, half* y) {
hipLaunchKernelGGL(( MaskAndScaleAddGpu<half>)
, dim3(BlocksNum4ThreadsNum(RoundUp(n, 2) / 2)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, x, mask, addend, y);
}
template<typename T>
class DropoutKernelGPU final : public user_op::OpKernel, public user_op::CudaGraphSupport {
public:
DropoutKernelGPU() = default;
~DropoutKernelGPU() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
const user_op::Tensor* mask = ctx->Tensor4ArgNameAndIndex("mask", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const float scale = ctx->Attr<float>("scale");
if (ctx->has_input("_add_to_output", 0)) {
const user_op::Tensor* addend = ctx->Tensor4ArgNameAndIndex("_add_to_output", 0);
MaskAndScaleAdd<T>(ctx->device_ctx(), in->shape().elem_cnt(), scale, in->dptr<T>(),
mask->dptr<int8_t>(), addend->dptr<T>(), out->mut_dptr<T>());
} else {
MaskAndScale<T>(ctx->device_ctx(), in->shape().elem_cnt(), scale, in->dptr<T>(),
mask->dptr<int8_t>(), out->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_DROPOUT_KERNEL_GPU(dtype) \
REGISTER_USER_KERNEL("dropout").SetCreateFn<DropoutKernelGPU<dtype>>().SetIsMatchedHob( \
(user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value));
REGISTER_DROPOUT_KERNEL_GPU(half)
REGISTER_DROPOUT_KERNEL_GPU(float)
REGISTER_DROPOUT_KERNEL_GPU(double)
template<typename T>
class DropoutGradKernelGPU final : public user_op::OpKernel, public user_op::CudaGraphSupport {
public:
DropoutGradKernelGPU() = default;
~DropoutGradKernelGPU() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
const user_op::Tensor* mask = ctx->Tensor4ArgNameAndIndex("mask", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const float scale = ctx->Attr<float>("scale");
MaskAndScale<T>(ctx->device_ctx(), dy->shape().elem_cnt(), scale, dy->dptr<T>(),
mask->dptr<int8_t>(), dx->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_DROPOUT_GRAD_KERNEL_GPU(dtype) \
REGISTER_USER_KERNEL("dropout_grad") \
.SetCreateFn<DropoutGradKernelGPU<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \
.SetInplaceProposalFn([](const user_op::InferContext&, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "dy", 0, true)); \
return Maybe<void>::Ok(); \
});
REGISTER_DROPOUT_GRAD_KERNEL_GPU(half)
REGISTER_DROPOUT_GRAD_KERNEL_GPU(float)
REGISTER_DROPOUT_GRAD_KERNEL_GPU(double)
} // namespace
} // namespace oneflow
| 7ef809704a770f3de476932f5bdab2b6bb588946.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/op_kernel_state_wrapper.h"
#include "oneflow/core/kernel/random_generator.h"
#include "oneflow/core/kernel/kernel_util.h"
#include "oneflow/core/common/data_type.h"
#include "oneflow/core/kernel/cuda_graph_support.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void MaskAndScaleGpu(const int64_t n, float scale, const T* x, const int8_t* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] * static_cast<T>(mask[i]) * scale; }
}
template<typename T>
__global__ void MaskAndScaleAddGpu(const int64_t n, float scale, const T* x, const int8_t* mask,
const T* addend, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] * static_cast<T>(mask[i]) * scale + addend[i]; }
}
template<>
__global__ void MaskAndScaleGpu<half>(const int64_t n, float scale, const half* x,
const int8_t* mask, half* y) {
const int64_t h2_n = n / 2;
half2 h2_scale = __float2half2_rn(scale);
const auto* x_h2 = reinterpret_cast<const half2*>(x);
const auto* mask_c2 = reinterpret_cast<const char2*>(mask);
auto* y_h2 = reinterpret_cast<half2*>(y);
CUDA_1D_KERNEL_LOOP(i, h2_n) {
char2 mask_val = mask_c2[i];
half2 one_or_zero_h2;
one_or_zero_h2.x = mask_val.x;
one_or_zero_h2.y = mask_val.y;
y_h2[i] = __hmul2(__hmul2(x_h2[i], one_or_zero_h2), h2_scale);
}
if (n % 2 != 0 && blockIdx.x == 0 && threadIdx.x == 0) {
const int64_t last_idx = n - 1;
half one_or_zero = mask[last_idx];
y[last_idx] = __hmul(__hmul(x[last_idx], one_or_zero), h2_scale.x);
}
}
template<>
__global__ void MaskAndScaleAddGpu<half>(const int64_t n, float scale, const half* x,
const int8_t* mask, const half* addend, half* y) {
const int64_t h2_n = n / 2;
half2 h2_scale = __float2half2_rn(scale);
const auto* x_h2 = reinterpret_cast<const half2*>(x);
const auto* addend_h2 = reinterpret_cast<const half2*>(addend);
const auto* mask_c2 = reinterpret_cast<const char2*>(mask);
auto* y_h2 = reinterpret_cast<half2*>(y);
CUDA_1D_KERNEL_LOOP(i, h2_n) {
char2 mask_val = mask_c2[i];
half2 one_or_zero_h2;
one_or_zero_h2.x = mask_val.x;
one_or_zero_h2.y = mask_val.y;
y_h2[i] = __hadd2(__hmul2(__hmul2(x_h2[i], one_or_zero_h2), h2_scale), addend_h2[i]);
}
if (n % 2 != 0 && blockIdx.x == 0 && threadIdx.x == 0) {
const int64_t last_idx = n - 1;
half one_or_zero = mask[last_idx];
y[last_idx] = __hadd(__hmul(__hmul(x[last_idx], one_or_zero), h2_scale.x), addend[last_idx]);
}
}
template<typename T>
void MaskAndScale(DeviceCtx* ctx, const int64_t n, float scale, const T* x, const int8_t* mask,
T* y) {
MaskAndScaleGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, x, mask, y);
}
template<>
void MaskAndScale<half>(DeviceCtx* ctx, const int64_t n, float scale, const half* x,
const int8_t* mask, half* y) {
MaskAndScaleGpu<half>
<<<BlocksNum4ThreadsNum(RoundUp(n, 2) / 2), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, x, mask, y);
}
template<typename T>
void MaskAndScaleAdd(DeviceCtx* ctx, const int64_t n, float scale, const T* x, const int8_t* mask,
const T* addend, T* y) {
MaskAndScaleAddGpu<T>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, x, mask, addend, y);
}
template<>
void MaskAndScaleAdd<half>(DeviceCtx* ctx, const int64_t n, float scale, const half* x,
const int8_t* mask, const half* addend, half* y) {
MaskAndScaleAddGpu<half>
<<<BlocksNum4ThreadsNum(RoundUp(n, 2) / 2), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, x, mask, addend, y);
}
template<typename T>
class DropoutKernelGPU final : public user_op::OpKernel, public user_op::CudaGraphSupport {
public:
DropoutKernelGPU() = default;
~DropoutKernelGPU() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
const user_op::Tensor* mask = ctx->Tensor4ArgNameAndIndex("mask", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const float scale = ctx->Attr<float>("scale");
if (ctx->has_input("_add_to_output", 0)) {
const user_op::Tensor* addend = ctx->Tensor4ArgNameAndIndex("_add_to_output", 0);
MaskAndScaleAdd<T>(ctx->device_ctx(), in->shape().elem_cnt(), scale, in->dptr<T>(),
mask->dptr<int8_t>(), addend->dptr<T>(), out->mut_dptr<T>());
} else {
MaskAndScale<T>(ctx->device_ctx(), in->shape().elem_cnt(), scale, in->dptr<T>(),
mask->dptr<int8_t>(), out->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_DROPOUT_KERNEL_GPU(dtype) \
REGISTER_USER_KERNEL("dropout").SetCreateFn<DropoutKernelGPU<dtype>>().SetIsMatchedHob( \
(user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value));
REGISTER_DROPOUT_KERNEL_GPU(half)
REGISTER_DROPOUT_KERNEL_GPU(float)
REGISTER_DROPOUT_KERNEL_GPU(double)
template<typename T>
class DropoutGradKernelGPU final : public user_op::OpKernel, public user_op::CudaGraphSupport {
public:
DropoutGradKernelGPU() = default;
~DropoutGradKernelGPU() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
const user_op::Tensor* mask = ctx->Tensor4ArgNameAndIndex("mask", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const float scale = ctx->Attr<float>("scale");
MaskAndScale<T>(ctx->device_ctx(), dy->shape().elem_cnt(), scale, dy->dptr<T>(),
mask->dptr<int8_t>(), dx->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_DROPOUT_GRAD_KERNEL_GPU(dtype) \
REGISTER_USER_KERNEL("dropout_grad") \
.SetCreateFn<DropoutGradKernelGPU<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \
.SetInplaceProposalFn([](const user_op::InferContext&, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "dy", 0, true)); \
return Maybe<void>::Ok(); \
});
REGISTER_DROPOUT_GRAD_KERNEL_GPU(half)
REGISTER_DROPOUT_GRAD_KERNEL_GPU(float)
REGISTER_DROPOUT_GRAD_KERNEL_GPU(double)
} // namespace
} // namespace oneflow
|
6dc38fa9c7e3e85003545c16b76e16f777ce907d.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2015-present Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
#define CHECK(cmd) \
{\
hipError_t error = cmd;\
if (error != hipSuccess) { \
fprintf(stderr, "error: '%s'(%d) at %s:%d\n", hipGetErrorString(error), error,__FILE__, __LINE__); \
exit(EXIT_FAILURE);\
}\
}
/*
* Square each element in the array A and write to array C.
*/
template <typename T>
__global__ void
vector_square(T *C_d, T *A_d, size_t N)
{
size_t offset = (blockIdx.x * blockDim.x + threadIdx.x);
size_t stride = blockDim.x * gridDim.x ;
//printf("offset: %d\n",offset);
for (size_t i=offset; i<N; i+=stride) {
C_d[i] = A_d[i] * A_d[i];
}
// __SYNC_ACROSS_BLOCKS
if(offset == 0)
printf("I am from thread 0\n");
else if(offset == 40)
printf("I am from thread 40 \n");
__syncthreads();
if(offset==40){
//__syncthreads();
unsigned long long int wait_t=3200000000,start=clock64(),cur;
do{cur=clock64()-start;}
while(cur<wait_t);
printf("Wait is over!\n");
}
if(offset == 0)
printf("I am after grid.sync() from thread 0\n");
else if(offset == 40)
printf("I am after grid.sync() from thread 40 \n");
/*// BLOCKING_SCENARIO
if(offset < 16){
__syncthreads();
}
__syncthreads();
*/
}
int main(int argc, char *argv[])
{
CHECK(hipSetDevice(2));
float *A_d, *C_d;
float *A_h, *C_h;
//size_t N = 1000000;
size_t N = 64;
size_t Nbytes = N * sizeof(float);
hipDeviceProp_t props;
CHECK(hipGetDeviceProperties(&props, 0/*deviceID*/));
printf ("info: running on device %s\n", props.name);
printf ("info: allocate host mem (%6.2f MB)\n", 2*Nbytes/1024.0/1024.0);
A_h = (float*)malloc(Nbytes);
CHECK(A_h == 0 ? hipErrorMemoryAllocation : hipSuccess );
C_h = (float*)malloc(Nbytes);
CHECK(C_h == 0 ? hipErrorMemoryAllocation : hipSuccess );
// Fill with Phi + i
for (size_t i=0; i<N; i++)
{
A_h[i] = 1.618f + i;
}
printf ("info: allocate device mem (%6.2f MB)\n", 2*Nbytes/1024.0/1024.0);
CHECK(hipMalloc(&A_d, Nbytes));
CHECK(hipMalloc(&C_d, Nbytes));
printf ("info: copy Host2Device\n");
CHECK ( hipMemcpy(A_d, A_h, Nbytes, hipMemcpyHostToDevice));
//const unsigned blocks = 512;
//const unsigned threadsPerBlock = 256;
//const unsigned blocks = (N+31)/32;
//const unsigned blocks = 64;
const unsigned threadsPerBlock = 32;
const unsigned blocks = N/threadsPerBlock;
printf ("info: launch 'vector_square' kernel\n");
hipLaunchKernelGGL(( vector_square) , dim3(blocks), dim3(threadsPerBlock), 0, 0, C_d, A_d, N);
printf ("info: copy Device2Host\n");
CHECK ( hipMemcpy(C_h, C_d, Nbytes, hipMemcpyDeviceToHost));
printf ("info: check result\n");
for (size_t i=0; i<N; i++) {
if (C_h[i] != A_h[i] * A_h[i]) {
CHECK(hipErrorUnknown);
}
}
printf ("PASSED!\n");
}
| 6dc38fa9c7e3e85003545c16b76e16f777ce907d.cu | /*
Copyright (c) 2015-present Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <stdio.h>
#include <cuda_runtime.h>
#include <cooperative_groups.h>
#define CHECK(cmd) \
{\
cudaError_t error = cmd;\
if (error != cudaSuccess) { \
fprintf(stderr, "error: '%s'(%d) at %s:%d\n", cudaGetErrorString(error), error,__FILE__, __LINE__); \
exit(EXIT_FAILURE);\
}\
}
/*
* Square each element in the array A and write to array C.
*/
template <typename T>
__global__ void
vector_square(T *C_d, T *A_d, size_t N)
{
size_t offset = (blockIdx.x * blockDim.x + threadIdx.x);
size_t stride = blockDim.x * gridDim.x ;
//printf("offset: %d\n",offset);
for (size_t i=offset; i<N; i+=stride) {
C_d[i] = A_d[i] * A_d[i];
}
// __SYNC_ACROSS_BLOCKS
if(offset == 0)
printf("I am from thread 0\n");
else if(offset == 40)
printf("I am from thread 40 \n");
__syncthreads();
if(offset==40){
//__syncthreads();
unsigned long long int wait_t=3200000000,start=clock64(),cur;
do{cur=clock64()-start;}
while(cur<wait_t);
printf("Wait is over!\n");
}
if(offset == 0)
printf("I am after grid.sync() from thread 0\n");
else if(offset == 40)
printf("I am after grid.sync() from thread 40 \n");
/*// BLOCKING_SCENARIO
if(offset < 16){
__syncthreads();
}
__syncthreads();
*/
}
int main(int argc, char *argv[])
{
CHECK(cudaSetDevice(2));
float *A_d, *C_d;
float *A_h, *C_h;
//size_t N = 1000000;
size_t N = 64;
size_t Nbytes = N * sizeof(float);
cudaDeviceProp props;
CHECK(cudaGetDeviceProperties(&props, 0/*deviceID*/));
printf ("info: running on device %s\n", props.name);
printf ("info: allocate host mem (%6.2f MB)\n", 2*Nbytes/1024.0/1024.0);
A_h = (float*)malloc(Nbytes);
CHECK(A_h == 0 ? cudaErrorMemoryAllocation : cudaSuccess );
C_h = (float*)malloc(Nbytes);
CHECK(C_h == 0 ? cudaErrorMemoryAllocation : cudaSuccess );
// Fill with Phi + i
for (size_t i=0; i<N; i++)
{
A_h[i] = 1.618f + i;
}
printf ("info: allocate device mem (%6.2f MB)\n", 2*Nbytes/1024.0/1024.0);
CHECK(cudaMalloc(&A_d, Nbytes));
CHECK(cudaMalloc(&C_d, Nbytes));
printf ("info: copy Host2Device\n");
CHECK ( cudaMemcpy(A_d, A_h, Nbytes, cudaMemcpyHostToDevice));
//const unsigned blocks = 512;
//const unsigned threadsPerBlock = 256;
//const unsigned blocks = (N+31)/32;
//const unsigned blocks = 64;
const unsigned threadsPerBlock = 32;
const unsigned blocks = N/threadsPerBlock;
printf ("info: launch 'vector_square' kernel\n");
vector_square <<<blocks, threadsPerBlock>>> (C_d, A_d, N);
printf ("info: copy Device2Host\n");
CHECK ( cudaMemcpy(C_h, C_d, Nbytes, cudaMemcpyDeviceToHost));
printf ("info: check result\n");
for (size_t i=0; i<N; i++) {
if (C_h[i] != A_h[i] * A_h[i]) {
CHECK(cudaErrorUnknown);
}
}
printf ("PASSED!\n");
}
|
aa0bb38b85b8c903703da77c518a95461f2f6af5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* reference: http://home.ie.cuhk.edu.hk/~wkshum/papers/pagerank.pdf
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <tuple>
#include <vector>
#include <chrono> // timing
#include <algorithm> // sort
/* global variables, this is where you would change the parameters */
const long long N = 10876; // number of nodes
const int num_iter = 10; // number of pagerank iterations
const std::string filename = "../tests/p2p-Gnutella04.txt";
const float d = 0.85f; // damping factor. 0.85 as defined by Google
const int blocksize = 512;
typedef float trans_m_col[N];
typedef int vis_m_col[N];
void read_inputfile( vis_m_col *visited_matrix, int outgoing_table[ N ] )
{
// Read file and build node.
std::ifstream infile;
infile.open( filename );
if (infile.fail()) {
std::cerr << "Error opeing a file" << std::endl;
infile.close();
exit( 1 );
}
std::string line;
int a, b;
int count_edge = 0;
while ( getline( infile, line ) )
{
std::istringstream iss( line );
if ( ! ( iss >> a >> b ) ) { break; } // Format error.
// Process pair (a, b).
// std::cout << a << " " << b << std::endl;
visited_matrix[ a ][ b ] = 1;
outgoing_table[ a ] += 1;
count_edge++;
}
infile.close();
}
/**
* outgoing_table, transition_matrix, visited_matrix
*/
__global__
void update_entries( trans_m_col *transition_matrix, vis_m_col *visited_matrix, int *outgoing_table, int N )
{
int const idx = threadIdx.x + blockIdx.x * blockDim.x;
int const i = idx / N;
int const j = idx % N;
if (i < N && j < N)
{
if ( outgoing_table[ j ] == 0 )
{
// dangling node: 1 / N
transition_matrix[ i ][ j ] = 1.0f / N;
}
else if ( visited_matrix[ j ][ i ] == 1 )
{
// if v(j, i) is visited then a(ij) = 1/L(j)
transition_matrix[ i ][ j ] = 1.0f / outgoing_table[ j ];
}
// else{ table->ij_entries_matrix[ i ][ j ] = 0.0; }
}
}
__global__
void pagerank( float *score_table, float *old_score_table, trans_m_col *transition_matrix, float d, int N )
{
int const j = threadIdx.x + blockIdx.x * blockDim.x;
if (j < N)
{
/* update pagerank scores */
float sum = 0.0f;
for ( auto k = 0; k < N; ++k )
{
sum += old_score_table[ k ] * transition_matrix[ j ][ k ];
}
score_table[ j ] = d * old_score_table[ j ] + ( 1.0f - d ) * sum;
}
}
int comp( std::tuple< int, float > const &i, std::tuple< int, float > const &j )
{
return std::get< 1 >( i ) > std::get< 1 >( j );
}
void print_top_5( float arr[ N ] )
{
std::vector< std::tuple< int, float > > sorted = {};
for ( auto i = 0; i < N; ++i )
{
sorted.push_back( std::tuple< int, float >{ i, arr[ i ] } );
}
std::sort( sorted.begin(), sorted.end(), comp );
for ( auto i = 0; i < ::min( ( long long ) 5, N); ++i )
{
std::cout << std::get< 0 >( sorted[ i ] ) << "(" << std::get< 1 >( sorted[ i ] ) << ") ";
}
std::cout << std::endl;
}
void print_total( float arr[] )
{
float sum = 0.0f;
for ( auto i = 0; i < N; ++i )
{
sum += arr[ i ];
}
std::cout << "sum=" << sum << std::endl;
}
int main()
{
auto const total_start_time = std::chrono::steady_clock::now();
auto const score_t_size = N * sizeof(float);
auto const out_t_size = N * sizeof(int);
auto const vis_m_size = N * N * sizeof(int);
auto const trans_m_size = N * N * sizeof(float);
vis_m_col *visited_matrix;
visited_matrix = ( vis_m_col * )malloc( vis_m_size );
memset(visited_matrix, 0, vis_m_size);
trans_m_col *transition_matrix;
transition_matrix = ( trans_m_col * )malloc( trans_m_size );
memset(transition_matrix, 0, trans_m_size);
float score_table[ N ] = { 0 };
std::fill_n(score_table, N, 1.0f / N );
int outgoing_table[ N ] = { 0 };
read_inputfile( visited_matrix, outgoing_table );
float *dev_score_table, *dev_old_score_table;
int *dev_outgoing_table;
vis_m_col *dev_visited_matrix;
trans_m_col *dev_transition_matrix;
hipMalloc( &dev_score_table, score_t_size );
hipMalloc( &dev_old_score_table, score_t_size );
hipMalloc( &dev_outgoing_table, out_t_size );
hipMalloc( &dev_visited_matrix, vis_m_size );
hipMalloc( &dev_transition_matrix, trans_m_size );
hipError_t err = hipGetLastError(); // add
if (err != hipSuccess) std::cout << "CUDA error: " << hipGetErrorString(err) << std::endl; // add
hipMemcpy( dev_score_table, score_table, score_t_size, hipMemcpyHostToDevice );
hipMemcpy( dev_outgoing_table, outgoing_table, out_t_size, hipMemcpyHostToDevice );
hipMemcpy( dev_visited_matrix, visited_matrix, vis_m_size, hipMemcpyHostToDevice );
hipMemcpy( dev_transition_matrix, transition_matrix, trans_m_size, hipMemcpyHostToDevice );
/* timing the PageRank algorithm */
auto const pr_start_time = std::chrono::steady_clock::now();
auto num_blocks = ceil( N * N / static_cast< float >( blocksize ) );
hipLaunchKernelGGL(( update_entries), dim3(num_blocks), dim3(blocksize) , 0, 0, dev_transition_matrix, dev_visited_matrix, dev_outgoing_table, N );
num_blocks = ceil( N / static_cast< float >( blocksize ) );
/* iterations must be serial */
for ( auto i = 0; i < num_iter - 1; ++i )
{
/* scores from previous iteration */
hipMemcpy( dev_old_score_table, dev_score_table, score_t_size, hipMemcpyDeviceToDevice );
hipLaunchKernelGGL(( pagerank), dim3(num_blocks), dim3(blocksize) , 0, 0, dev_score_table, dev_old_score_table, dev_transition_matrix, d, N );
}
/* retrieve final scores array from device and store back to host */
hipMemcpy(score_table, dev_score_table, score_t_size, hipMemcpyDeviceToHost);
auto const pr_end_time = std::chrono::steady_clock::now();
auto const pr_time = std::chrono::duration_cast< std::chrono::microseconds >\
( pr_end_time - pr_start_time ).count();
hipFree( dev_score_table );
hipFree( dev_old_score_table );
hipFree( dev_outgoing_table );
hipFree( dev_visited_matrix );
hipFree( dev_transition_matrix );
auto const total_end_time = std::chrono::steady_clock::now();
auto const total_time = std::chrono::duration_cast< std::chrono::microseconds >\
( total_end_time - total_start_time ).count();
print_top_5( score_table );
print_total( score_table );
std::cout << "in_kernel_pagerank_time = "
<< pr_time
<< " us"
<< "\nprogram_total_time = "
<< total_time
<< " us"
<< std::endl;
return 0;
}
| aa0bb38b85b8c903703da77c518a95461f2f6af5.cu | /*
* reference: http://home.ie.cuhk.edu.hk/~wkshum/papers/pagerank.pdf
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <tuple>
#include <vector>
#include <chrono> // timing
#include <algorithm> // sort
/* global variables, this is where you would change the parameters */
const long long N = 10876; // number of nodes
const int num_iter = 10; // number of pagerank iterations
const std::string filename = "../tests/p2p-Gnutella04.txt";
const float d = 0.85f; // damping factor. 0.85 as defined by Google
const int blocksize = 512;
typedef float trans_m_col[N];
typedef int vis_m_col[N];
void read_inputfile( vis_m_col *visited_matrix, int outgoing_table[ N ] )
{
// Read file and build node.
std::ifstream infile;
infile.open( filename );
if (infile.fail()) {
std::cerr << "Error opeing a file" << std::endl;
infile.close();
exit( 1 );
}
std::string line;
int a, b;
int count_edge = 0;
while ( getline( infile, line ) )
{
std::istringstream iss( line );
if ( ! ( iss >> a >> b ) ) { break; } // Format error.
// Process pair (a, b).
// std::cout << a << " " << b << std::endl;
visited_matrix[ a ][ b ] = 1;
outgoing_table[ a ] += 1;
count_edge++;
}
infile.close();
}
/**
* outgoing_table, transition_matrix, visited_matrix
*/
__global__
void update_entries( trans_m_col *transition_matrix, vis_m_col *visited_matrix, int *outgoing_table, int N )
{
int const idx = threadIdx.x + blockIdx.x * blockDim.x;
int const i = idx / N;
int const j = idx % N;
if (i < N && j < N)
{
if ( outgoing_table[ j ] == 0 )
{
// dangling node: 1 / N
transition_matrix[ i ][ j ] = 1.0f / N;
}
else if ( visited_matrix[ j ][ i ] == 1 )
{
// if v(j, i) is visited then a(ij) = 1/L(j)
transition_matrix[ i ][ j ] = 1.0f / outgoing_table[ j ];
}
// else{ table->ij_entries_matrix[ i ][ j ] = 0.0; }
}
}
__global__
void pagerank( float *score_table, float *old_score_table, trans_m_col *transition_matrix, float d, int N )
{
int const j = threadIdx.x + blockIdx.x * blockDim.x;
if (j < N)
{
/* update pagerank scores */
float sum = 0.0f;
for ( auto k = 0; k < N; ++k )
{
sum += old_score_table[ k ] * transition_matrix[ j ][ k ];
}
score_table[ j ] = d * old_score_table[ j ] + ( 1.0f - d ) * sum;
}
}
int comp( std::tuple< int, float > const &i, std::tuple< int, float > const &j )
{
return std::get< 1 >( i ) > std::get< 1 >( j );
}
void print_top_5( float arr[ N ] )
{
std::vector< std::tuple< int, float > > sorted = {};
for ( auto i = 0; i < N; ++i )
{
sorted.push_back( std::tuple< int, float >{ i, arr[ i ] } );
}
std::sort( sorted.begin(), sorted.end(), comp );
for ( auto i = 0; i < std::min( ( long long ) 5, N); ++i )
{
std::cout << std::get< 0 >( sorted[ i ] ) << "(" << std::get< 1 >( sorted[ i ] ) << ") ";
}
std::cout << std::endl;
}
void print_total( float arr[] )
{
float sum = 0.0f;
for ( auto i = 0; i < N; ++i )
{
sum += arr[ i ];
}
std::cout << "sum=" << sum << std::endl;
}
int main()
{
auto const total_start_time = std::chrono::steady_clock::now();
auto const score_t_size = N * sizeof(float);
auto const out_t_size = N * sizeof(int);
auto const vis_m_size = N * N * sizeof(int);
auto const trans_m_size = N * N * sizeof(float);
vis_m_col *visited_matrix;
visited_matrix = ( vis_m_col * )malloc( vis_m_size );
memset(visited_matrix, 0, vis_m_size);
trans_m_col *transition_matrix;
transition_matrix = ( trans_m_col * )malloc( trans_m_size );
memset(transition_matrix, 0, trans_m_size);
float score_table[ N ] = { 0 };
std::fill_n(score_table, N, 1.0f / N );
int outgoing_table[ N ] = { 0 };
read_inputfile( visited_matrix, outgoing_table );
float *dev_score_table, *dev_old_score_table;
int *dev_outgoing_table;
vis_m_col *dev_visited_matrix;
trans_m_col *dev_transition_matrix;
cudaMalloc( &dev_score_table, score_t_size );
cudaMalloc( &dev_old_score_table, score_t_size );
cudaMalloc( &dev_outgoing_table, out_t_size );
cudaMalloc( &dev_visited_matrix, vis_m_size );
cudaMalloc( &dev_transition_matrix, trans_m_size );
cudaError_t err = cudaGetLastError(); // add
if (err != cudaSuccess) std::cout << "CUDA error: " << cudaGetErrorString(err) << std::endl; // add
cudaMemcpy( dev_score_table, score_table, score_t_size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_outgoing_table, outgoing_table, out_t_size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_visited_matrix, visited_matrix, vis_m_size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_transition_matrix, transition_matrix, trans_m_size, cudaMemcpyHostToDevice );
/* timing the PageRank algorithm */
auto const pr_start_time = std::chrono::steady_clock::now();
auto num_blocks = ceil( N * N / static_cast< float >( blocksize ) );
update_entries<<< num_blocks, blocksize >>>( dev_transition_matrix, dev_visited_matrix, dev_outgoing_table, N );
num_blocks = ceil( N / static_cast< float >( blocksize ) );
/* iterations must be serial */
for ( auto i = 0; i < num_iter - 1; ++i )
{
/* scores from previous iteration */
cudaMemcpy( dev_old_score_table, dev_score_table, score_t_size, cudaMemcpyDeviceToDevice );
pagerank<<< num_blocks, blocksize >>>( dev_score_table, dev_old_score_table, dev_transition_matrix, d, N );
}
/* retrieve final scores array from device and store back to host */
cudaMemcpy(score_table, dev_score_table, score_t_size, cudaMemcpyDeviceToHost);
auto const pr_end_time = std::chrono::steady_clock::now();
auto const pr_time = std::chrono::duration_cast< std::chrono::microseconds >\
( pr_end_time - pr_start_time ).count();
cudaFree( dev_score_table );
cudaFree( dev_old_score_table );
cudaFree( dev_outgoing_table );
cudaFree( dev_visited_matrix );
cudaFree( dev_transition_matrix );
auto const total_end_time = std::chrono::steady_clock::now();
auto const total_time = std::chrono::duration_cast< std::chrono::microseconds >\
( total_end_time - total_start_time ).count();
print_top_5( score_table );
print_total( score_table );
std::cout << "in_kernel_pagerank_time = "
<< pr_time
<< " us"
<< "\nprogram_total_time = "
<< total_time
<< " us"
<< std::endl;
return 0;
}
|
fc57e6ef1754f44fa9e1f70f1902ff5ea9a47e7b.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by liang on 2/16/18.
//
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <groute/event_pool.h>
#include <groute/graphs/csr_graph.h>
#include <groute/dwl/work_source.cuh>
#include <groute/device/cta_scheduler.cuh>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/graphs/traversal.h>
#include <utils/stopwatch.h>
#include <moderngpu/context.hxx>
#include <moderngpu/kernel_scan.hxx>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <boost/format.hpp>
#include <utils/cuda_utils.h>
#include "pr_common.h"
DECLARE_double(wl_alloc_factor);
DECLARE_uint64(wl_alloc_abs);
DECLARE_int32(max_pr_iterations);
DECLARE_double(threshold);
DECLARE_int32(grid_size);
DECLARE_int32(block_size);
DECLARE_double(epsilon);
DECLARE_bool(cta_np);
DEFINE_int32(first_iteration, 20, "Iteration times for Topology-Driven");
namespace hybrid_unopt_pr {
template<typename WorkSource,
typename TGraph,
template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankInit__Single__(
WorkSource work_source,
TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
for (index_t ii = 0 + tid; ii < work_source.get_size(); ii += nthreads) {
index_t node = work_source.get_work(ii);
current_ranks[node] = 1.0 - ALPHA;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = ((1.0 - ALPHA) * ALPHA) / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
atomicAdd(residual.get_item_ptr(dest), update);
}
}
}
template<
typename WorkSource,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankKernelTopologyDriven__Single__(
WorkSource work_source, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res == 0) continue; // might happen if work_source has duplicates
current_ranks[node] += res;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = res * ALPHA / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
atomicAdd(residual.get_item_ptr(dest), update);
}
}
}
template<
typename WorkSource,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankKernelTopologyDrivenCTA__Single__(
WorkSource work_source, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x;
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) {
groute::dev::np_local<rank_t> local_work = {0, 0, 0.0};
if (i < work_size) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res > 0) {
current_ranks[node] += res;
local_work.start = graph.begin_edge(node);
local_work.size = graph.end_edge(node) - local_work.start;
if (local_work.size > 0) {
rank_t update = res * ALPHA / local_work.size;
local_work.meta_data = update;
}
}
}
groute::dev::CTAWorkScheduler<rank_t>::template schedule(
local_work,
[&graph, &residual](index_t edge, rank_t update) {
index_t dest = graph.edge_dest(edge);
atomicAdd(residual.get_item_ptr(dest), update);
}
);
}
}
template<
typename WorkSource, typename WorkTarget,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankKernelDataDriven__Single__(
WorkSource work_source, WorkTarget work_target,
float EPSILON, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res == 0) continue; // might happen if work_source has duplicates
current_ranks[node] += res;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = res * ALPHA / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
rank_t prev = atomicAdd(residual.get_item_ptr(dest), update);
if (prev <= EPSILON && prev + update > EPSILON) {
work_target.append(dest);
}
}
}
}
template<
typename WorkSource, typename WorkTarget,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankKernelDataDrivenCTA__Single__(
WorkSource work_source, WorkTarget work_target,
float EPSILON, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x;
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) {
groute::dev::np_local<rank_t> local_work = {0, 0, 0.0};
if (i < work_size) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res > 0) {
current_ranks[node] += res;
local_work.start = graph.begin_edge(node);
local_work.size = graph.end_edge(node) - local_work.start;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (local_work.size > 0) {
rank_t update = res * ALPHA / out_degree;
local_work.meta_data = update;
}
}
}
groute::dev::CTAWorkScheduler<rank_t>::template schedule(
local_work,
[&work_target, &graph, &residual, &EPSILON](index_t edge, rank_t update) {
index_t dest = graph.edge_dest(edge);
rank_t prev = atomicAdd(residual.get_item_ptr(dest), update);
if (prev <= EPSILON && prev + update > EPSILON) {
work_target.append(dest);
}
}
);
}
}
/*
* The per-device Page Rank problem
*/
template<typename TGraph,
template<typename> class ResidualDatum,
template<typename> class RankDatum>
struct Problem {
TGraph m_graph;
ResidualDatum<rank_t> m_residual;
RankDatum<rank_t> m_current_ranks;
Problem(const TGraph &graph, RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) :
m_graph(graph), m_residual(residual), m_current_ranks(current_ranks) {
}
template<typename WorkSource>
void Init__Single__(const WorkSource &workSource, groute::Stream &stream) const {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_graph.owned_nnodes());
Marker::MarkWorkitems(m_graph.owned_nnodes(), "PageRankInit__Single__");
PageRankInit__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(workSource, m_graph, m_current_ranks, m_residual);
}
template<typename WorkSource>
void
RelaxTopologyDriven__Single__(const WorkSource &work_source, groute::Stream &stream) {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, work_source.get_size());
Marker::MarkWorkitems(work_source.get_size(), "PageRankKernelTopologyDriven__Single__");
if (FLAGS_cta_np)
PageRankKernelTopologyDrivenCTA__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(work_source, m_graph, m_current_ranks, m_residual);
else
PageRankKernelTopologyDriven__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(work_source, m_graph, m_current_ranks, m_residual);
}
template<typename WorkSource,
typename WorkTarget>
void
RelaxDataDriven__Single__(const WorkSource &work_source, WorkTarget &output_worklist, groute::Stream &stream) {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, work_source.get_size());
float EPSILON = FLAGS_epsilon;
Marker::MarkWorkitems(work_source.get_size(), "PageRankKernel__Single__");
if (FLAGS_cta_np)
PageRankKernelDataDrivenCTA__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(work_source, output_worklist.DeviceObject(), EPSILON, m_graph, m_current_ranks, m_residual);
else
PageRankKernelDataDriven__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(work_source, output_worklist.DeviceObject(), EPSILON, m_graph, m_current_ranks, m_residual);
}
};
struct Algo {
static const char *NameLower() { return "pr"; }
static const char *Name() { return "PR"; }
template<
typename TGraphAllocator, typename ResidualDatum, typename RankDatum, typename...UnusedData>
static const std::vector<rank_t> &Gather(
TGraphAllocator &graph_allocator, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
graph_allocator.GatherDatum(current_ranks);
return current_ranks.GetHostData();
}
template<
typename ResidualDatum, typename RankDatum, typename...UnusedData>
static std::vector<rank_t> Host(
groute::graphs::host::CSRGraph &graph, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
return PageRankHost(graph);
}
static int Output(const char *file, const std::vector<rank_t> &ranks) {
return PageRankOutput(file, ranks);
}
static int CheckErrors(std::vector<rank_t> &ranks, std::vector<rank_t> ®ression) {
return PageRankCheckErrors(ranks, regression);
}
};
}
bool HybridDrivenPR() {
VLOG(0) << "HybridDrivenPR";
if (FLAGS_cta_np)
VLOG(0) << "CTA_NP Enabled";
typedef groute::Queue<index_t> Worklist;
groute::graphs::single::NodeOutputDatum<rank_t> residual;
groute::graphs::single::NodeOutputDatum<rank_t> current_ranks;
utils::traversal::Context<hybrid_unopt_pr::Algo> context(1);
groute::graphs::single::CSRGraphAllocator
dev_graph_allocator(context.host_graph);
context.SetDevice(0);
dev_graph_allocator.AllocateDatumObjects(residual, current_ranks);
context.SyncDevice(0); // graph allocations are on default streams, must sync device
groute::Stream stream = context.CreateStream(0);
mgpu::standard_context_t mgpu_context(true, stream.cuda_stream);
hybrid_unopt_pr::Problem<
groute::graphs::dev::CSRGraph,
groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatum>
solver(
dev_graph_allocator.DeviceObject(),
current_ranks.DeviceObject(),
residual.DeviceObject());
size_t max_work_size = context.host_graph.nedges * FLAGS_wl_alloc_factor;
if (FLAGS_wl_alloc_abs > 0)
max_work_size = FLAGS_wl_alloc_abs;
Worklist wl1(max_work_size, 0, "input queue"), wl2(max_work_size, 0, "output queue");
wl1.ResetAsync(stream.cuda_stream);
wl2.ResetAsync(stream.cuda_stream);
stream.Sync();
Worklist *in_wl = &wl1, *out_wl = &wl2;
Stopwatch sw(true);
solver.Init__Single__(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()), stream);
int iteration = 0;
for (iteration = 0; iteration < FLAGS_first_iteration; iteration++) {
solver.RelaxTopologyDriven__Single__(
groute::dev::WorkSourceRange<index_t>(dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()), stream);
stream.Sync();
VLOG(1) << "Topology-Driven Iteration: " << iteration;
}
solver.RelaxDataDriven__Single__(
groute::dev::WorkSourceRange<index_t>(dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()),
*in_wl, stream);
groute::Segment<index_t> work_seg;
work_seg = in_wl->GetSeg(stream);
while (work_seg.GetSegmentSize() > 0) {
solver.RelaxDataDriven__Single__(groute::dev::WorkSourceArray<index_t>(work_seg.GetSegmentPtr(),
work_seg.GetSegmentSize()),
*out_wl, stream);
VLOG(1) << "Data-Driven Iteration: " << iteration << " INPUT " << work_seg.GetSegmentSize() << " OUTPUT "
<< out_wl->GetCount(stream);
if (++iteration > FLAGS_max_pr_iterations) {
LOG(WARNING) << "maximum iterations reached";
break;
}
in_wl->ResetAsync(stream.cuda_stream);
std::swap(in_wl, out_wl);
work_seg = in_wl->GetSeg(stream);
}
sw.stop();
VLOG(1)
<< boost::format("%s terminated after %d iterations (max: %d)") % hybrid_unopt_pr::Algo::Name() % iteration %
FLAGS_max_pr_iterations;
VLOG(0) << hybrid_unopt_pr::Algo::Name() << ": " << sw.ms() << " ms. <filter>";
// Gather
auto gathered_output = hybrid_unopt_pr::Algo::Gather(dev_graph_allocator, residual, current_ranks);
if (FLAGS_output.length() != 0)
hybrid_unopt_pr::Algo::Output(FLAGS_output.c_str(), gathered_output);
if (FLAGS_check) {
auto regression = hybrid_unopt_pr::Algo::Host(context.host_graph, residual, current_ranks);
return hybrid_unopt_pr::Algo::CheckErrors(gathered_output, regression) == 0;
} else {
LOG(WARNING) << "Result not checked";
return true;
}
} | fc57e6ef1754f44fa9e1f70f1902ff5ea9a47e7b.cu | //
// Created by liang on 2/16/18.
//
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <cuda.h>
#include <device_launch_parameters.h>
#include <groute/event_pool.h>
#include <groute/graphs/csr_graph.h>
#include <groute/dwl/work_source.cuh>
#include <groute/device/cta_scheduler.cuh>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/graphs/traversal.h>
#include <utils/stopwatch.h>
#include <moderngpu/context.hxx>
#include <moderngpu/kernel_scan.hxx>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <boost/format.hpp>
#include <utils/cuda_utils.h>
#include "pr_common.h"
DECLARE_double(wl_alloc_factor);
DECLARE_uint64(wl_alloc_abs);
DECLARE_int32(max_pr_iterations);
DECLARE_double(threshold);
DECLARE_int32(grid_size);
DECLARE_int32(block_size);
DECLARE_double(epsilon);
DECLARE_bool(cta_np);
DEFINE_int32(first_iteration, 20, "Iteration times for Topology-Driven");
namespace hybrid_unopt_pr {
template<typename WorkSource,
typename TGraph,
template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankInit__Single__(
WorkSource work_source,
TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
for (index_t ii = 0 + tid; ii < work_source.get_size(); ii += nthreads) {
index_t node = work_source.get_work(ii);
current_ranks[node] = 1.0 - ALPHA;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = ((1.0 - ALPHA) * ALPHA) / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
atomicAdd(residual.get_item_ptr(dest), update);
}
}
}
template<
typename WorkSource,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankKernelTopologyDriven__Single__(
WorkSource work_source, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res == 0) continue; // might happen if work_source has duplicates
current_ranks[node] += res;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = res * ALPHA / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
atomicAdd(residual.get_item_ptr(dest), update);
}
}
}
template<
typename WorkSource,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankKernelTopologyDrivenCTA__Single__(
WorkSource work_source, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x;
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) {
groute::dev::np_local<rank_t> local_work = {0, 0, 0.0};
if (i < work_size) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res > 0) {
current_ranks[node] += res;
local_work.start = graph.begin_edge(node);
local_work.size = graph.end_edge(node) - local_work.start;
if (local_work.size > 0) {
rank_t update = res * ALPHA / local_work.size;
local_work.meta_data = update;
}
}
}
groute::dev::CTAWorkScheduler<rank_t>::template schedule(
local_work,
[&graph, &residual](index_t edge, rank_t update) {
index_t dest = graph.edge_dest(edge);
atomicAdd(residual.get_item_ptr(dest), update);
}
);
}
}
template<
typename WorkSource, typename WorkTarget,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankKernelDataDriven__Single__(
WorkSource work_source, WorkTarget work_target,
float EPSILON, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res == 0) continue; // might happen if work_source has duplicates
current_ranks[node] += res;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = res * ALPHA / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
rank_t prev = atomicAdd(residual.get_item_ptr(dest), update);
if (prev <= EPSILON && prev + update > EPSILON) {
work_target.append(dest);
}
}
}
}
template<
typename WorkSource, typename WorkTarget,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankKernelDataDrivenCTA__Single__(
WorkSource work_source, WorkTarget work_target,
float EPSILON, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x;
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) {
groute::dev::np_local<rank_t> local_work = {0, 0, 0.0};
if (i < work_size) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res > 0) {
current_ranks[node] += res;
local_work.start = graph.begin_edge(node);
local_work.size = graph.end_edge(node) - local_work.start;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (local_work.size > 0) {
rank_t update = res * ALPHA / out_degree;
local_work.meta_data = update;
}
}
}
groute::dev::CTAWorkScheduler<rank_t>::template schedule(
local_work,
[&work_target, &graph, &residual, &EPSILON](index_t edge, rank_t update) {
index_t dest = graph.edge_dest(edge);
rank_t prev = atomicAdd(residual.get_item_ptr(dest), update);
if (prev <= EPSILON && prev + update > EPSILON) {
work_target.append(dest);
}
}
);
}
}
/*
* The per-device Page Rank problem
*/
template<typename TGraph,
template<typename> class ResidualDatum,
template<typename> class RankDatum>
struct Problem {
TGraph m_graph;
ResidualDatum<rank_t> m_residual;
RankDatum<rank_t> m_current_ranks;
Problem(const TGraph &graph, RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) :
m_graph(graph), m_residual(residual), m_current_ranks(current_ranks) {
}
template<typename WorkSource>
void Init__Single__(const WorkSource &workSource, groute::Stream &stream) const {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_graph.owned_nnodes());
Marker::MarkWorkitems(m_graph.owned_nnodes(), "PageRankInit__Single__");
PageRankInit__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(workSource, m_graph, m_current_ranks, m_residual);
}
template<typename WorkSource>
void
RelaxTopologyDriven__Single__(const WorkSource &work_source, groute::Stream &stream) {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, work_source.get_size());
Marker::MarkWorkitems(work_source.get_size(), "PageRankKernelTopologyDriven__Single__");
if (FLAGS_cta_np)
PageRankKernelTopologyDrivenCTA__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(work_source, m_graph, m_current_ranks, m_residual);
else
PageRankKernelTopologyDriven__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(work_source, m_graph, m_current_ranks, m_residual);
}
template<typename WorkSource,
typename WorkTarget>
void
RelaxDataDriven__Single__(const WorkSource &work_source, WorkTarget &output_worklist, groute::Stream &stream) {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, work_source.get_size());
float EPSILON = FLAGS_epsilon;
Marker::MarkWorkitems(work_source.get_size(), "PageRankKernel__Single__");
if (FLAGS_cta_np)
PageRankKernelDataDrivenCTA__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(work_source, output_worklist.DeviceObject(), EPSILON, m_graph, m_current_ranks, m_residual);
else
PageRankKernelDataDriven__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(work_source, output_worklist.DeviceObject(), EPSILON, m_graph, m_current_ranks, m_residual);
}
};
struct Algo {
static const char *NameLower() { return "pr"; }
static const char *Name() { return "PR"; }
template<
typename TGraphAllocator, typename ResidualDatum, typename RankDatum, typename...UnusedData>
static const std::vector<rank_t> &Gather(
TGraphAllocator &graph_allocator, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
graph_allocator.GatherDatum(current_ranks);
return current_ranks.GetHostData();
}
template<
typename ResidualDatum, typename RankDatum, typename...UnusedData>
static std::vector<rank_t> Host(
groute::graphs::host::CSRGraph &graph, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
return PageRankHost(graph);
}
static int Output(const char *file, const std::vector<rank_t> &ranks) {
return PageRankOutput(file, ranks);
}
static int CheckErrors(std::vector<rank_t> &ranks, std::vector<rank_t> ®ression) {
return PageRankCheckErrors(ranks, regression);
}
};
}
bool HybridDrivenPR() {
VLOG(0) << "HybridDrivenPR";
if (FLAGS_cta_np)
VLOG(0) << "CTA_NP Enabled";
typedef groute::Queue<index_t> Worklist;
groute::graphs::single::NodeOutputDatum<rank_t> residual;
groute::graphs::single::NodeOutputDatum<rank_t> current_ranks;
utils::traversal::Context<hybrid_unopt_pr::Algo> context(1);
groute::graphs::single::CSRGraphAllocator
dev_graph_allocator(context.host_graph);
context.SetDevice(0);
dev_graph_allocator.AllocateDatumObjects(residual, current_ranks);
context.SyncDevice(0); // graph allocations are on default streams, must sync device
groute::Stream stream = context.CreateStream(0);
mgpu::standard_context_t mgpu_context(true, stream.cuda_stream);
hybrid_unopt_pr::Problem<
groute::graphs::dev::CSRGraph,
groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatum>
solver(
dev_graph_allocator.DeviceObject(),
current_ranks.DeviceObject(),
residual.DeviceObject());
size_t max_work_size = context.host_graph.nedges * FLAGS_wl_alloc_factor;
if (FLAGS_wl_alloc_abs > 0)
max_work_size = FLAGS_wl_alloc_abs;
Worklist wl1(max_work_size, 0, "input queue"), wl2(max_work_size, 0, "output queue");
wl1.ResetAsync(stream.cuda_stream);
wl2.ResetAsync(stream.cuda_stream);
stream.Sync();
Worklist *in_wl = &wl1, *out_wl = &wl2;
Stopwatch sw(true);
solver.Init__Single__(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()), stream);
int iteration = 0;
for (iteration = 0; iteration < FLAGS_first_iteration; iteration++) {
solver.RelaxTopologyDriven__Single__(
groute::dev::WorkSourceRange<index_t>(dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()), stream);
stream.Sync();
VLOG(1) << "Topology-Driven Iteration: " << iteration;
}
solver.RelaxDataDriven__Single__(
groute::dev::WorkSourceRange<index_t>(dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()),
*in_wl, stream);
groute::Segment<index_t> work_seg;
work_seg = in_wl->GetSeg(stream);
while (work_seg.GetSegmentSize() > 0) {
solver.RelaxDataDriven__Single__(groute::dev::WorkSourceArray<index_t>(work_seg.GetSegmentPtr(),
work_seg.GetSegmentSize()),
*out_wl, stream);
VLOG(1) << "Data-Driven Iteration: " << iteration << " INPUT " << work_seg.GetSegmentSize() << " OUTPUT "
<< out_wl->GetCount(stream);
if (++iteration > FLAGS_max_pr_iterations) {
LOG(WARNING) << "maximum iterations reached";
break;
}
in_wl->ResetAsync(stream.cuda_stream);
std::swap(in_wl, out_wl);
work_seg = in_wl->GetSeg(stream);
}
sw.stop();
VLOG(1)
<< boost::format("%s terminated after %d iterations (max: %d)") % hybrid_unopt_pr::Algo::Name() % iteration %
FLAGS_max_pr_iterations;
VLOG(0) << hybrid_unopt_pr::Algo::Name() << ": " << sw.ms() << " ms. <filter>";
// Gather
auto gathered_output = hybrid_unopt_pr::Algo::Gather(dev_graph_allocator, residual, current_ranks);
if (FLAGS_output.length() != 0)
hybrid_unopt_pr::Algo::Output(FLAGS_output.c_str(), gathered_output);
if (FLAGS_check) {
auto regression = hybrid_unopt_pr::Algo::Host(context.host_graph, residual, current_ranks);
return hybrid_unopt_pr::Algo::CheckErrors(gathered_output, regression) == 0;
} else {
LOG(WARNING) << "Result not checked";
return true;
}
} |
147ed466b6551471cc182c4a8f8f5ec1160e644d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define PI 3.14159265358
#define PI_4 0.78539816339 // PI/4
#define FD_STENCIL_1(D) \
{1.0/(D*D), -2.0/(D*D), 1.0/(D*D)}
#define FD_STENCIL_2(D) \
{-0.0833333/(D*D), 1.33333/(D*D), -2.5/(D*D), \
1.33333/(D*D), -0.0833333/(D*D)}
#define FD_STENCIL_3(D) \
{0.0111111/(D*D), -0.15/(D*D), 1.5/(D*D), \
-2.72222/(D*D), 1.5/(D*D), -0.15/(D*D), \
0.0111111/(D*D)}
#define FD_STENCIL_4(D) \
{-0.00178571/(D*D), 0.0253968/(D*D), -0.2/(D*D), \
1.6/(D*D), -2.84722/(D*D), 1.6/(D*D), \
-0.2/(D*D), 0.0253968/(D*D), -0.00178571/(D*D)}
#define FD_STENCIL_5(D) \
{0.00031746/(D*D), -0.00496032/(D*D), 0.0396825/(D*D), \
-0.238095/(D*D), 1.66667/(D*D), -2.92722/(D*D), \
1.66667/(D*D), -0.238095/(D*D), 0.0396825/(D*D), \
-0.00496032/(D*D), 0.00031746/(D*D)}
#define FD_STENCIL_6(D) \
{-6.01251e-05/(D*D), 0.00103896/(D*D), -0.00892857/(D*D), \
0.0529101/(D*D), -0.267857/(D*D), 1.71429/(D*D), \
-2.98278/(D*D), 1.71429/(D*D), -0.267857/(D*D), \
0.0529101/(D*D), -0.00892857/(D*D), 0.00103896/(D*D), \
-6.01251e-05/(D*D)}
#define FD_STENCIL_7(D) \
{1.18929e-05/(D*D), -0.000226625/(D*D), 0.00212121/(D*D), \
-0.0132576/(D*D), 0.0648148/(D*D), -0.291667/(D*D), \
1.75/(D*D), -3.02359/(D*D), 1.75/(D*D), \
-0.291667/(D*D), 0.0648148/(D*D), -0.0132576/(D*D), \
0.00212121/(D*D), -0.000226625/(D*D), 1.18929e-05/(D*D)}
#define FD_STENCIL2(N,D) FD_STENCIL_##N(D)
#define FD_STENCIL(N,D) FD_STENCIL2(N,D) // Unwraps N and D
#ifndef tFloat
# define tFloat float
#endif
__constant__ tFloat tStencil[] = FD_STENCIL(1 , dt);
__constant__ tFloat xStencil[] = FD_STENCIL(sr, dx);
// 0.9899*sqrt(8.0*log(10.0))/(PI*freq);
__constant__ tFloat hat_t0 = 1.3523661426929/freq;
__device__ tFloat hatWavelet(tFloat t);
__device__ tFloat hatWavelet(tFloat t){
const tFloat pift = PI*freq*(t - hat_t0);
const tFloat pift2 = pift*pift;
return (1.0 - 2.0*pift2)*exp(-pift2);
}
__global__ void fd2d(tFloat *u1,
const tFloat *u2,
const tFloat *u3,
const tFloat currentTime){
__shared__ tFloat Lu[By + 2*sr][Bx + 2*sr];
tFloat r_u2, r_u3;
const int bx = (blockIdx.x * Bx);
const int by = (blockIdx.y * By);
const int lx = threadIdx.x;
const int ly = threadIdx.y;
const int tx = bx + lx;
const int ty = by + ly;
const int id = ty*w + tx;
r_u2 = u2[id];
r_u3 = u3[id];
const int nX1 = (tx - sr + w) % w;
const int nY1 = (ty - sr + h) % h;
const int nX2 = (tx + Bx - sr + w) % w;
const int nY2 = (ty + By - sr + h) % h;
Lu[ly][lx] = u2[nY1*w + nX1];
if(lx < 2*sr){
Lu[ly][lx + Bx] = u2[nY1*w + nX2];
if(ly < 2*sr)
Lu[ly + By][lx + Bx] = u2[nY2*w + nX2];
}
if(ly < 2*sr)
Lu[ly + By][lx] = u2[nY2*w + nX1];
__syncthreads();
tFloat lap = 0.0;
for(int i = 0; i < (2*sr + 1); i++)
lap += xStencil[i]*Lu[ly + sr][lx + i] + xStencil[i]*Lu[ly + i][lx + sr];
const tFloat u_n1 = (-tStencil[1]*r_u2 - tStencil[2]*r_u3 + lap)/tStencil[0];
if((tx == mX) && (ty == mY))
u1[id] = u_n1 + hatWavelet(currentTime)/tStencil[0];
else
u1[id] = u_n1;
}
| 147ed466b6551471cc182c4a8f8f5ec1160e644d.cu | #define PI 3.14159265358
#define PI_4 0.78539816339 // PI/4
#define FD_STENCIL_1(D) \
{1.0/(D*D), -2.0/(D*D), 1.0/(D*D)}
#define FD_STENCIL_2(D) \
{-0.0833333/(D*D), 1.33333/(D*D), -2.5/(D*D), \
1.33333/(D*D), -0.0833333/(D*D)}
#define FD_STENCIL_3(D) \
{0.0111111/(D*D), -0.15/(D*D), 1.5/(D*D), \
-2.72222/(D*D), 1.5/(D*D), -0.15/(D*D), \
0.0111111/(D*D)}
#define FD_STENCIL_4(D) \
{-0.00178571/(D*D), 0.0253968/(D*D), -0.2/(D*D), \
1.6/(D*D), -2.84722/(D*D), 1.6/(D*D), \
-0.2/(D*D), 0.0253968/(D*D), -0.00178571/(D*D)}
#define FD_STENCIL_5(D) \
{0.00031746/(D*D), -0.00496032/(D*D), 0.0396825/(D*D), \
-0.238095/(D*D), 1.66667/(D*D), -2.92722/(D*D), \
1.66667/(D*D), -0.238095/(D*D), 0.0396825/(D*D), \
-0.00496032/(D*D), 0.00031746/(D*D)}
#define FD_STENCIL_6(D) \
{-6.01251e-05/(D*D), 0.00103896/(D*D), -0.00892857/(D*D), \
0.0529101/(D*D), -0.267857/(D*D), 1.71429/(D*D), \
-2.98278/(D*D), 1.71429/(D*D), -0.267857/(D*D), \
0.0529101/(D*D), -0.00892857/(D*D), 0.00103896/(D*D), \
-6.01251e-05/(D*D)}
#define FD_STENCIL_7(D) \
{1.18929e-05/(D*D), -0.000226625/(D*D), 0.00212121/(D*D), \
-0.0132576/(D*D), 0.0648148/(D*D), -0.291667/(D*D), \
1.75/(D*D), -3.02359/(D*D), 1.75/(D*D), \
-0.291667/(D*D), 0.0648148/(D*D), -0.0132576/(D*D), \
0.00212121/(D*D), -0.000226625/(D*D), 1.18929e-05/(D*D)}
#define FD_STENCIL2(N,D) FD_STENCIL_##N(D)
#define FD_STENCIL(N,D) FD_STENCIL2(N,D) // Unwraps N and D
#ifndef tFloat
# define tFloat float
#endif
__constant__ tFloat tStencil[] = FD_STENCIL(1 , dt);
__constant__ tFloat xStencil[] = FD_STENCIL(sr, dx);
// 0.9899*sqrt(8.0*log(10.0))/(PI*freq);
__constant__ tFloat hat_t0 = 1.3523661426929/freq;
__device__ tFloat hatWavelet(tFloat t);
__device__ tFloat hatWavelet(tFloat t){
const tFloat pift = PI*freq*(t - hat_t0);
const tFloat pift2 = pift*pift;
return (1.0 - 2.0*pift2)*exp(-pift2);
}
__global__ void fd2d(tFloat *u1,
const tFloat *u2,
const tFloat *u3,
const tFloat currentTime){
__shared__ tFloat Lu[By + 2*sr][Bx + 2*sr];
tFloat r_u2, r_u3;
const int bx = (blockIdx.x * Bx);
const int by = (blockIdx.y * By);
const int lx = threadIdx.x;
const int ly = threadIdx.y;
const int tx = bx + lx;
const int ty = by + ly;
const int id = ty*w + tx;
r_u2 = u2[id];
r_u3 = u3[id];
const int nX1 = (tx - sr + w) % w;
const int nY1 = (ty - sr + h) % h;
const int nX2 = (tx + Bx - sr + w) % w;
const int nY2 = (ty + By - sr + h) % h;
Lu[ly][lx] = u2[nY1*w + nX1];
if(lx < 2*sr){
Lu[ly][lx + Bx] = u2[nY1*w + nX2];
if(ly < 2*sr)
Lu[ly + By][lx + Bx] = u2[nY2*w + nX2];
}
if(ly < 2*sr)
Lu[ly + By][lx] = u2[nY2*w + nX1];
__syncthreads();
tFloat lap = 0.0;
for(int i = 0; i < (2*sr + 1); i++)
lap += xStencil[i]*Lu[ly + sr][lx + i] + xStencil[i]*Lu[ly + i][lx + sr];
const tFloat u_n1 = (-tStencil[1]*r_u2 - tStencil[2]*r_u3 + lap)/tStencil[0];
if((tx == mX) && (ty == mY))
u1[id] = u_n1 + hatWavelet(currentTime)/tStencil[0];
else
u1[id] = u_n1;
}
|
b7be021a78874f33923b483003fd2b7b07a313e7.hip | // !!! This is a file automatically generated by hipify!!!
/**
CUDA
([email protected])
$ nvcc -O3 CUDA**_N-Queen.cu && ./a.out (-c|-r|-g|-s)
-c:cpu
-r cpu
-g GPU
-s SGPU()
BOUND1
BOUND1,2
8-3n27()+(n27)
(n27)
n27
(n27)
O(N)O(1)
2*N-1N
flags -flags & flags
===================
1
===================
(ON
)
0
ON
-----Q-- 00000100 0
---Q---- 00010000 1
------ Q- 00000010 2
Q------- 10000000 3
-------Q 00000001 4
-Q------ 01000000 5
---- Q--- 00001000 6
-- Q----- 00100000 7
===================
2
===================
1. : left
2. : down
3. : right
1 3 (0x08)
2 2 (0x04)
0 0x10 1
1 5 (0x20) 2 6 (0x40)
1
right left
rightleft1
*-------------
|. . . . . .
|. . . -3. . 0x02 -|
|. . -2. . . 0x04 |(1 bit right)
|. -1. . . . 0x08 -|
|Q . . . . . 0x10 (Q 4 down)
|. +1. . . . 0x20 -|
|. . +2. . . 0x40 |(1 bit left)
|. . . +3. . 0x80 -|
*-------------
nn+1
n(bit)OR
leftdownright
n+1
left :(left |bit)<<1
right:(right|bit)>>1
down : down|bit
===================
3
===================
n+1OR
ON
OR
ON
bitmap
bit=-bitmap & bitmap;//
00000011 3
00000010 2
00000001 1
00000000 0
11111111 -1
11111110 -2
11111101 -3
n-nn+1
n=22n-nANDn
ON
1
00010110 22
AND 11101010 -22
------------------
00000010
while bitmap ON
while(bitmap) {
bit=-bitmap & bitmap;
bitmap ^= bit;
//(bit)
}
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -r
CPUR
N: Total Unique hh:mm:ss.ms
4: 2 0 0.00
5: 10 0 0.00
6: 4 0 0.00
7: 40 0 0.00
8: 92 0 0.00
9: 352 0 0.00
10: 724 0 0.00
11: 2680 0 0.00
12: 14200 0 0.01
13: 73712 0 0.04
14: 365596 0 0.19
15: 2279184 0 1.24
16: 14772512 0 7.79
17: 95815104 0 57.57
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -c
CPU
N: Total Unique hh:mm:ss.ms
4: 2 0 0.00
5: 10 0 0.00
6: 4 0 0.00
7: 40 0 0.00
8: 92 0 0.00
9: 352 0 0.00
10: 724 0 0.00
11: 2680 0 0.00
12: 14200 0 0.01
13: 73712 0 0.04
14: 365596 0 0.21
15: 2279184 0 1.40
16: 14772512 0 8.78
17: 95815104 0 1:05.00
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -s
SGPU
N: Total Unique dd:hh:mm:ss.ms
4: 2 0 00:00:00:00.02
5: 10 0 00:00:00:00.00
6: 4 0 00:00:00:00.00
7: 40 0 00:00:00:00.00
8: 92 0 00:00:00:00.00
9: 352 0 00:00:00:00.00
10: 724 0 00:00:00:00.00
11: 2680 0 00:00:00:00.01
12: 14200 0 00:00:00:00.02
13: 73712 0 00:00:00:00.03
14: 365596 0 00:00:00:00.08
15: 2279184 0 00:00:00:00.48
16: 14772512 0 00:00:00:02.41
17: 95815104 0 00:00:00:18.30
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -g
GPU
N: Total Unique dd:hh:mm:ss.ms
4: 2 0 00:00:00:00.02
5: 10 0 00:00:00:00.00
6: 4 0 00:00:00:00.00
7: 40 0 00:00:00:00.00
8: 92 0 00:00:00:00.00
9: 352 0 00:00:00:00.00
10: 724 0 00:00:00:00.00
11: 2680 0 00:00:00:00.01
12: 14200 0 00:00:00:00.05
13: 73712 0 00:00:00:00.07
14: 365596 0 00:00:00:00.07
15: 2279184 0 00:00:00:00.37
16: 14772512 0 00:00:00:02.30
17: 95815104 0 00:00:00:18.07
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#define THREAD_NUM 96
#define MAX 27
//
long TOTAL=0; //CPU,CPUR
long UNIQUE=0; //CPU,CPUR
typedef unsigned long long uint64;
typedef struct{
uint64 bv;
uint64 down;
uint64 left;
uint64 right;
int cnt;
int x[MAX];
int y[MAX];
}Board ;
//
Board B;
Board b[2457600];
// GPU
__global__ void cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* d_results,int totalCond);
long long solve_nqueen_cuda(int size,int steps);
void NQueenG(int size,int mask,int row,int steps);
// SGPU
__global__ void sgpu_cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* results,int totalCond);
long long sgpu_solve_nqueen_cuda(int size,int steps);
// CPU
void TimeFormat(clock_t utime,char *form);
// CPU
void NQueen(int size,int mask,int row,uint64 b,uint64 l,uint64 d,uint64 r);
// CPUR
void NQueenR(int size,int mask,int row,uint64 bv,uint64 left,uint64 down,uint64 right,int cnt);
//
//GPU
__global__
void cuda_kernel(
int size,
int mark,
unsigned int* totalDown,
unsigned int* totalLeft,
unsigned int* totalRight,
unsigned int* d_results,
int totalCond)
{
register const unsigned int mask=(1<<size)-1;
register unsigned int total=0;
//row=01
//mask
//n=8 mask==2
register int row=0;
register unsigned int bit;
//
//
//
//ID
register unsigned const int tid=threadIdx.x;
//ID
register unsigned const int bid=blockIdx.x;
//ID
register unsigned const int idx=bid*blockDim.x+tid;
//
//
//
//shared
//10mask
//GPU10
//THREAD_NUM
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=totalDown[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=totalLeft[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=totalRight[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
//down,left,rightbitmap
bitmap[tid][row]
=mask&~(
down[tid][row]
|left[tid][row]
|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
//
//
//GPUstepstotalCond
if(idx<totalCond){
//totalDown,totalLeft,totalRight
//down,left,right
//CPU t_steps
//
// idx
//
/**06 **********/
register unsigned int bitmap_tid_row;
register unsigned int down_tid_row;
register unsigned int left_tid_row;
register unsigned int right_tid_row;
while(row>=0){
//bitmap[tid][row]
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
/***************************************/
//
//bitmap[tid][row]=00000000
//1
/**06 **********/
//if(bitmap[tid][row]==0){
if(bitmap_tid_row==0){
/***************************************/
row--;
}else{
//
bitmap[tid][row]
^=bit
/**06 **********/
//=(-bitmap[tid][row]&bitmap[tid][row]);
=(-bitmap_tid_row&bitmap_tid_row);
/***************************************/
//
if((bit&mask)!=0){
//?
//
if(row+1==mark){
total++;
row--;
}else{
int rowP=row+1;
/**07register ****/
//down[tid][rowP]=down[tid][row]|bit;
down[tid][rowP]=down_tid_row|bit;
//left[tid][rowP]=(left[tid][row]|bit)<<1;
left[tid][rowP]=(left_tid_row|bit)<<1;
//right[tid][rowP]=(right[tid][row]|bit)>>1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]
=mask&~(
down[tid][rowP]
|left[tid][rowP]
|right[tid][rowP]);
row++;
}
}else{
//
row--;
}
}
}
//sum[tid]
sum[tid]=total;
}else{
//totalCondtotal
sum[tid]=0;
}
//__syncthreads()
//__syncthreads()
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
}
__syncthreads();if(tid<32){
sum[tid]+=sum[tid+32];
}
__syncthreads();if(tid<16){
sum[tid]+=sum[tid+16];
}
__syncthreads();if(tid<8){
sum[tid]+=sum[tid+8];
}
__syncthreads();if(tid<4){
sum[tid]+=sum[tid+4];
}
__syncthreads();if(tid<2){
sum[tid]+=sum[tid+2];
}
__syncthreads();if(tid<1){
sum[tid]+=sum[tid+1];
}
__syncthreads();if(tid==0){
d_results[bid]=sum[0];
}
}
//
// GPU
long solve_nqueen_cuda(int size,int mask,int row,int n_left,int n_down,int n_right,int steps)
{
//GPUGPU
const unsigned int mark=size>11?size-10:2;
const unsigned int h_mark=row;
long total=0;
int totalCond=0;
bool matched=false;
//host
unsigned int down[32]; down[row]=n_down;
unsigned int right[32]; right[row]=n_right;
unsigned int left[32]; left[row]=n_left;
//bitmap
//stack1
unsigned int bitmap[32];
//bitmap[row]=(left[row]|down[row]|right[row]);
/***06 bitGPU*********************/
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
/************************/
unsigned int bit;
//unsigned int* totalDown=new unsigned int[steps];
unsigned int* totalDown;
hipHostMalloc((void**) &totalDown,sizeof(int)*steps);
//unsigned int* totalLeft=new unsigned int[steps];
unsigned int* totalLeft;
hipHostMalloc((void**) &totalLeft,sizeof(int)*steps);
//unsigned int* totalRight=new unsigned int[steps];
unsigned int* totalRight;
hipHostMalloc((void**) &totalRight,sizeof(int)*steps);
//unsigned int* h_results=new unsigned int[steps];
unsigned int* h_results;
hipHostMalloc((void**) &h_results,sizeof(int)*steps);
//device
unsigned int* downCuda;
hipMalloc((void**) &downCuda,sizeof(int)*steps);
unsigned int* leftCuda;
hipMalloc((void**) &leftCuda,sizeof(int)*steps);
unsigned int* rightCuda;
hipMalloc((void**) &rightCuda,sizeof(int)*steps);
unsigned int* resultsCuda;
hipMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
//123CPU->row==mark 3
//down,left,right totalDown,totalLeft,totalRight
//
//->3GPU
//13CPU
//n15row=5CPU
//GPU(GPU10
//)
//while(row>=0) {
register int rowP=0;
while(row>=h_mark) {
//bitmap[row]=00000000
//1
/***06 bit*********************/
//06GPU
if(bitmap[row]==0){ row--; }
/************************/
/***06 bit*********************/
//06SGPU
//if((bitmap[row]&mask)==mask){row--;}
/************************/
else{//
//06SGPU
/***06 bit*********************/
//bit=(bitmap[row]+1)&~bitmap[row];
//bitmap[row]|=bit;
/************************/
//06GPU
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]); //
if((bit&mask)!=0){//
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
/***06 bit*********************/
//bitmap[rowP]=(down[rowP]|left[rowP]|right[rowP]);
/************************/
/***06 bit*********************/
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
/************************/
row++;
if(row==mark){
//3(mark)
//down,left,right
//
//GPU
//totalCond threadId down,left,right
//row=2(13n15row=5)
//totalDown,totalLeft,totalRight
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
//
totalCond++;
//GPUGPUstepsGPU
//
//ntotalCondstepsn
//
//totalCond==steps
if(totalCond==steps){
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
//size-mark GPU totalCond
hipLaunchKernelGGL(( cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
//steps
//totalCond
//GPUGPUmatched=true
matched=true;
//totalCond==stepsGPU0
//(stepsGPU)
totalCond=0;
}
//totalDown,totalLeft,totalRight1
// row=2
//totalDown,totalLeft,totalRight
row--;
}
}else{
//row==markCPU
//nqueen
row--;
}
}
}
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
//size-mark GPU totalCond
//steps
//totalCond
hipLaunchKernelGGL(( cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
//
hipFree(downCuda);
hipFree(leftCuda);
hipFree(rightCuda);
hipFree(resultsCuda);
/***06 cudaFreeHost**/
//delete[] totalDown;
hipHostFree(totalDown);
//delete[] totalLeft;
hipHostFree(totalLeft);
//delete[] totalRight;
hipHostFree(totalRight);
//delete[] h_results;
hipHostFree(h_results);
/************************/
return total;
}
//GPU
void NQueenG(int size,int steps)
{
register int sizeE=size-1;
register int bit=0;
register int mask=((1<<size)-1);
if(size<=0||size>32){return;}
//
int lim=(size%2==0)?size/2:sizeE/2;
for(int col=0;col<lim;col++){
bit=(1<<col);
TOTAL+=solve_nqueen_cuda(size,mask,1,bit<<1,bit,bit>>1,steps);
}
//TOTAL
TOTAL=TOTAL*2;
//
if(size%2==1){
bit=(1<<(sizeE)/2);
TOTAL+=solve_nqueen_cuda(size,mask,1,bit<<1,bit,bit>>1,steps);
}
}
//
//SGPU
__global__
void sgpu_cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* d_results,int totalCond)
{
//
const int tid=threadIdx.x;//ID
const int bid=blockIdx.x;//ID
const int idx=bid*blockDim.x+tid;//ID
//
__shared__ unsigned int down[THREAD_NUM][10];//shared
__shared__ unsigned int left[THREAD_NUM][10];//THREAD_NUM
__shared__ unsigned int right[THREAD_NUM][10];//10maskGPU10
__shared__ unsigned int bitmap[THREAD_NUM][10];
__shared__ unsigned int sum[THREAD_NUM];
//
const unsigned int mask=(1<<size)-1;
int total=0;
int row=0;//row=01mask n=8 mask==2
unsigned int bit;
if(idx<totalCond){// GPUstepstotalCond
down[tid][row]=totalDown[idx];//totalDown,totalLeft,totalRightdown,left,right
left[tid][row]=totalLeft[idx];//CPU t_steps idx
right[tid][row]=totalRight[idx];
bitmap[tid][row]=down[tid][row]|left[tid][row]|right[tid][row];//down,left,rightbitmap
while(row>=0){
//
//06GPU
//if(bitmap[tid][row]==0){//bitmap[tid][row]=00000000 1
//06SGPU
if((bitmap[tid][row]&mask)==mask){//bitmap[tid][row]=00000000 1
//
row--;
}else{
//
//06GPU
//bitmap[tid][row]^=bit=(-bitmap[tid][row]&bitmap[tid][row]); //
//06SGPU
bit=(bitmap[tid][row]+1)&~bitmap[tid][row];
bitmap[tid][row]|=bit;
//
if((bit&mask)!=0){//
if(row+1==mark){//?
total++;
row--;
}
else{
down[tid][row+1]=down[tid][row]|bit;
left[tid][row+1]=(left[tid][row]|bit)<<1;
right[tid][row+1]=(right[tid][row]|bit)>>1;
bitmap[tid][row+1]=(down[tid][row+1]|left[tid][row+1]|right[tid][row+1]);
row++;
}
}else{//
row--;
}
}
}
sum[tid]=total;//sum[tid]
}else{//totalCond total
sum[tid]=0;
}
//__syncthreads()
//__syncthreads()
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){sum[tid]+=sum[tid+64];}//__syncthreads();1
__syncthreads();if(tid<32){sum[tid]+=sum[tid+32];}
__syncthreads();if(tid<16){sum[tid]+=sum[tid+16];}
__syncthreads();if(tid<8){sum[tid]+=sum[tid+8];}
__syncthreads();if(tid<4){sum[tid]+=sum[tid+4];}
__syncthreads();if(tid<2){sum[tid]+=sum[tid+2];}
__syncthreads();if(tid<1){sum[tid]+=sum[tid+1];}
__syncthreads();if(tid==0){d_results[bid]=sum[0];}
}
//
//SGPU
long long sgpu_solve_nqueen_cuda(int size,int steps)
{
unsigned int down[32];
unsigned int left[32];
unsigned int right[32];
unsigned int bitmap[32];
unsigned int bit;
if(size<=0||size>32){return 0;}
unsigned int* totalDown=new unsigned int[steps];
unsigned int* totalLeft=new unsigned int[steps];
unsigned int* totalRight=new unsigned int[steps];
unsigned int* h_results=new unsigned int[steps];
//device
unsigned int* downCuda;
hipMalloc((void**) &downCuda,sizeof(int)*steps);
unsigned int* leftCuda;
hipMalloc((void**) &leftCuda,sizeof(int)*steps);
unsigned int* rightCuda;
hipMalloc((void**) &rightCuda,sizeof(int)*steps);
unsigned int* resultsCuda;
hipMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
const unsigned int mask=(1<<size)-1;
const unsigned int mark=size>11?size-10:2;
long long total=0;
int totalCond=0;
int row=0;
down[0]=0;
left[0]=0;
right[0]=0;
bitmap[0]=0;
bool matched=false;
for(int col=0;col<size/2;col++){
bit=(1<<col);
bitmap[0]|=bit;
down[1]=bit;
left[1]=bit<<1;
right[1]=bit>>1;
bitmap[1]=(down[1]|left[1]|right[1]);
row=1;
while(row>0){
if((bitmap[row]&mask)==mask){row--;}
else{
bit=(bitmap[row]+1)&~bitmap[row];
bitmap[row]|=bit;
if((bit&mask)!=0){
down[row+1]=down[row]|bit;
left[row+1]=(left[row]|bit)<<1;
right[row+1]=(right[row]|bit)>>1;
bitmap[row+1]=(down[row+1]|left[row+1]|right[row+1]);
row++;
if(row==mark){
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
totalCond++;
if(totalCond==steps){
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
hipLaunchKernelGGL(( sgpu_cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
matched=true;
totalCond=0;
}
row--;
}
}else{row--;}
}
}
}
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
hipLaunchKernelGGL(( sgpu_cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
total*=2;
if(size%2==1){
matched=false;
totalCond=0;
bit=(1<<(size-1)/2);
bitmap[0]|=bit;
down[1]=bit;
left[1]=bit<<1;
right[1]=bit>>1;
bitmap[1]=(down[1]|left[1]|right[1]);
row=1;
while(row>0){
if((bitmap[row]&mask)==mask){row--;}
else{
bit=(bitmap[row]+1)&~bitmap[row];
bitmap[row]|=bit;
if((bit&mask)!=0){
down[row+1]=down[row]|bit;
left[row+1]=(left[row]|bit)<<1;
right[row+1]=(right[row]|bit)>>1;
bitmap[row+1]=(down[row+1]|left[row+1]|right[row+1]);
row++;
if(row==mark){
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
totalCond++;
if(totalCond==steps){
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
hipLaunchKernelGGL(( sgpu_cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
matched=true;
totalCond=0;
}
row--;
}
}else{row--;}
}
}
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
hipLaunchKernelGGL(( sgpu_cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
}
hipFree(downCuda);
hipFree(leftCuda);
hipFree(rightCuda);
hipFree(resultsCuda);
delete[] totalDown;
delete[] totalLeft;
delete[] totalRight;
delete[] h_results;
return total;
}
//
//CUDA
bool InitCUDA()
{
int count;
hipGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
int i;
for(i=0;i<count;i++){
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop,i)==hipSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
hipSetDevice(i);
return true;
}
//
//hh:mm:ss.ms
void TimeFormat(clock_t utime,char *form){
int dd,hh,mm;
float ftime,ss;
ftime=(float)utime/CLOCKS_PER_SEC;
mm=(int)ftime/60;
ss=ftime-(int)(mm*60);
dd=mm/(24*60);
mm=mm%(24*60);
hh=mm/60;
mm=mm%60;
if(dd)
sprintf(form,"%4d %02d:%02d:%05.2f",dd,hh,mm,ss);
else if(hh)
sprintf(form," %2d:%02d:%05.2f",hh,mm,ss);
else if(mm)
sprintf(form," %2d:%05.2f",mm,ss);
else
sprintf(form," %5.2f",ss);
}
//
int symmetryOps_n27(int w,int e,int n,int s,int size){
int lsize=(size-2)*(size-1)-w;
if(n<w || n>=lsize){
return 0;
}
if(e<w || e>=lsize){
return 0;
}
if(s<w || s>=lsize){
return 0;
}
//// Check for minimum if n, e, s = (N-2)*(N-1)-1-w
if(s==w){
if((n!=w)||(e!=w)){
// right rotation is smaller unless w = n = e = s
//w=n=e=sskip
return 0;
}
//w=n=e=s90
//2
return 2;
}
if((e==w)&&(n>=s)){
//e==w180
if(n>s){
//180n>=ssmaller?
return 0;
}
//4
return 4;
}
return 8;
}
//
bool board_placement(int si,int x,int y)
{
//
//printf("i:%d:x:%d:y:%d\n",i,B.x[i],B.y[i]);
if(B.x[x]==y){
//printf("Duplicate x:%d:y:%d\n",x,y);
////OK
return true;
}
B.x[x]=y;
//x y p.N-1-x+y x+y
uint64 bv=1<<x;
uint64 down=1<<y;
B.y[x]=B.y[x]+down;
uint64 left=1<<(si-1-x+y);
uint64 right=1<<(x+y);
//printf("check valid x:%d:y:%d:p.N-1-x+y:%d;x+y:%d\n",x,y,si-1-x+y,x+y);
//printf("check valid pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
//printf("bvcheck:%d:bhcheck:%d:bucheck:%d:bdcheck:%d\n",B.bv&bv,B.bh&bh,B.bu&bu,B.bd&bd);
if((B.bv&bv)||(B.down&down)||(B.left&left)||(B.right&right)){
//printf("valid_false\n");
return false;
}
//printf("before pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
B.bv|=bv;
B.down|=down;
B.left|=left;
B.right|=right;
//printf("after pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
//printf("valid_true\n");
return true;
}
//
//CPU
void NQueen(int size,int mask,int row,uint64 b,uint64 l,uint64 d,uint64 r){
int sizeE=size-1;
int n;
uint64 bitmap[size];
uint64 bv[size];
uint64 left[size];
uint64 down[size];
uint64 right[size];
uint64 bit=0;
bitmap[row]=mask&~(l|d|r);
bv[row]=b;
down[row]=d;
left[row]=l;
right[row]=r;
while(row>=2){
while((bv[row]&1)!=0) {
n=row++;
bv[row]=bv[n]>>1;//
left[row]=left[n]<<1;//left
right[row]=right[n]>>1;//right
down[row]=down[n];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
bv[row+1]=bv[row]>>1;
if(bitmap[row]==0){
--row;
}else{
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0||row>=sizeE){
//if((bit)!=0){
if(row>=sizeE){
TOTAL++;
--row;
}else{
n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=down[n]|bit;
right[row]=(right[n]|bit)>>1;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
//bitmap[row]=~(left[row]|down[row]|right[row]);
}
}else{
--row;
}
}
}
}
//
//
//CPUR
void NQueenR(int size,uint64 mask, int row,uint64 bv,uint64 left,uint64 down,uint64 right,int cnt){
uint64 bitmap=0;
uint64 bit=0;
//
while((bv&1)!=0) {
bv>>=1;//
left<<=1;//left
right>>=1;//right
row++;
}
bv>>=1;
if(row==size){
//TOTAL++;
UNIQUE++; //
TOTAL+=cnt; //
}else{
//bitmap=mask&~(left|down|right);//mask10
bitmap=~(left|down|right);
while(bitmap>0){
bit=(-bitmap&bitmap);
bitmap=(bitmap^bit);
NQueenR(size,mask,row+1,bv,(left|bit)<<1,down|bit,(right|bit)>>1,cnt);
}
}
}
//
long prepare(int size){
//CPUR
int pres_a[930];
int pres_b[930];
int idx=0;
long bcnt;
for(int a=0;a<size;a++){
for(int b=0;b<size;b++){
if((a>=b&&(a-b)<=1)||(b>a&&(b-a)<=1)){
continue;
}
pres_a[idx]=a;
pres_b[idx]=b;
idx++;
}
}
Board wB=B;
for(int w=0;w<idx;w++){
B=wB;
B.bv=B.down=B.left=B.right=0;
for(int j=0;j<size;j++){
B.x[j]=-1;
}
board_placement(size,0,pres_a[w]);
board_placement(size,1,pres_b[w]);
Board nB=B;
//int lsize=(size-2)*(size-1)-w;
//for(int n=w;n<lsize;n++){
for(int n=0;n<idx;n++){
B=nB;
if(board_placement(size,pres_a[n],size-1)==false){
continue;
}
if(board_placement(size,pres_b[n],size-2)==false){
continue;
}
Board eB=B;
//for(int e=w;e<lsize;e++){
for(int e=0;e<idx;e++){
B=eB;
if(board_placement(size,size-1,size-1-pres_a[e])==false){
continue;
}
if(board_placement(size,size-2,size-1-pres_b[e])==false){
continue;
}
Board sB=B;
//for(int s=w;s<lsize;s++){
for(int s=0;s<idx;s++){
B=sB;
if(board_placement(size,size-1-pres_a[s],0)==false){
continue;
}
if(board_placement(size,size-1-pres_b[s],1)==false){
continue;
}
int cnt=symmetryOps_n27(w,e,n,s,size);
if(cnt !=0){
B.cnt=cnt;
b[bcnt]=B;
bcnt++;
}
}
}
}
}
return bcnt;
}
//
int main(int argc,char** argv) {
bool cpu=false,cpur=false,gpu=false,sgpu=false;
int argstart=1,steps=24576;
/** */
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;}
else if(argv[1][1]=='s'||argv[1][1]=='S'){sgpu=true;}
else
cpur=true;
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r|-s]\n",argv[0]);
printf(" -c: CPU only\n");
printf(" -r: CPUR only\n");
printf(" -g: GPU only\n");
printf(" -s: SGPU only\n");
printf("Default to 8 queen\n");
}
/** */
if(cpu){
printf("\n\nCPU \n");
}else if(cpur){
printf("\n\nCPUR \n");
}else if(gpu){
printf("\n\nGPU \n");
}else if(sgpu){
printf("\n\nSGPU \n");
}
if(cpu||cpur){
printf("%s\n"," N: Total Unique hh:mm:ss.ms");
clock_t st; //
char t[20]; //hh:mm:ss.ms
int min=5;
int targetN=15;
uint64 mask;
for(int i=min;i<=targetN;i++){
TOTAL=0;
UNIQUE=0;
mask=((1<<i)-1);
int size=i;
// 22
long bcnt=prepare(size);
//
st=clock();
for (long bc=0;bc<=bcnt;bc++){
B=b[bc];
if(cpur){
//CPUR
NQueenR(i,mask,2,B.bv >> 2,
B.left>>4,
((((B.down>>2)|(~0<<(size-4)))+1)<<(size-5))-1,
(B.right>>4)<<(size-5),B.cnt);
}else if(cpu){
//CPU
NQueen(i,mask,2,B.bv >> 2,
B.left>>4,
((((B.down>>2)|(~0<<(size-4)))+1)<<(size-5))-1,
(B.right>>4)<<(size-5));
}
}
//
TimeFormat(clock()-st,t);
printf("%2d:%13ld%16ld%s\n",i,TOTAL,UNIQUE,t);
}
}
if(gpu||sgpu){
if(!InitCUDA()){return 0;}
int min=4;int targetN=17;
struct timeval t0;struct timeval t1;
int ss;int ms;int dd;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(int i=min;i<=targetN;i++){
gettimeofday(&t0,NULL); //
if(gpu){
TOTAL=0;
UNIQUE=0;
NQueenG(i,steps);
}else if(sgpu){
TOTAL=sgpu_solve_nqueen_cuda(i,steps);
UNIQUE=0;
}
gettimeofday(&t1,NULL); //
if(t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%16ld%4.2d:%02d:%02d:%02d.%02d\n", i,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
}
}
return 0;
}
| b7be021a78874f33923b483003fd2b7b07a313e7.cu | /**
CUDAã§åŠã¶ã¢ã«ãŽãªãºã ãšããŒã¿æ§é
ã¹ããããã€ã¹ãããã§ïŒ®âã¯ã€ãŒã³åé¡ãæé©å
äžè¬ç€Ÿå£æ³äºº å
±åé信瀟 æ
å ±æè¡å± éŽæš ç¶äžé([email protected])
ã³ã³ãã€ã«ãšå®è¡
$ nvcc -O3 CUDA**_N-Queen.cu && ./a.out (-c|-r|-g|-s)
-c:cpu
-r cpuååž°
-g GPU
-s SGPU(ãµããŒãºçãšæããã)
ã ïŒïŒãã«ãŒããã©ãŒã¹
ã ïŒïŒïŒ
ã ïŒïŒããã¯ãã©ãã¯ïŒé
åïŒ
ã ïŒïŒïŒ
ã ïŒïŒïŒ
ã ïŒïŒããã¯ãã©ãã¯ïŒããããããïŒã
ã ïŒïŒãã©ãŒ
ã ïŒïŒå¯Ÿè±¡è§£é€
ã ïŒïŒã¯ã€ãŒã³ã®äœçœ®ã«ããåå²BOUND1
ïŒïŒïŒã¯ã€ãŒã³ã®äœçœ®ã«ããåå²BOUND1,2
ïŒïŒïŒæåã
ïŒïŒïŒæé©å
ïŒïŒïŒäžŠååŠç
8-3ïŒn27察称解é€(å)+ããã(n27)
äžäžå·Šå³ïŒè¡ã«ã¯ã€ãŒã³ãé
眮ããã®ã¡ïŒããã(n27)ïŒå¯Ÿç§°è§£é€ã§è§£ãæ±ããŸãã
æåãã¯ãŸã 远å ããŠããªã
å¯Ÿç§°è§£é€æ³
äžã€ã®è§£ã«ã¯ãç€é¢ãïŒïŒåºŠãïŒïŒïŒåºŠãïŒïŒïŒåºŠå転ãåã³ãããã®é¡åã®åèš
ã ïŒåã®å¯Ÿç§°è§£ãååšããã察ç
§çãªè§£ãé€å»ãããŠããŒã¯è§£ããè§£ãæ±ããææ³ã
察象解é€ãn27ã®ãã®ã«å€æŽãã
ãããã(n27)
ãããæŒç®ã䜿ã£ãŠé«éå ç¶æ
ãããããããã«ããã¯ããåŠçãã
åçŽãªããã¯ãã©ãã¯ãããïŒïŒãïŒïŒåé«é
ãããããããã§ããã°ãã·ããã«ããé«éã«ããŒã¿ãç§»åã§ããã
ãã©ã°é
åã§ã¯ããŒã¿ã®ç§»åã«O(N)ã®æéããããããããããããã§ããã°O(1)
ãã©ã°é
åã®ããã«ãæãæ¹åã« 2*N-1ã®èŠçŽ ãçšæããã®ã§ã¯ãªããNãããã§å
åã
ãé
眮å¯èœãªãããåã flags ã«å
¥ãã-flags & flags ã§é ã«ããããåãåºãåŠçã
ãããã¯ãã©ãã¯ãããïŒïŒâïŒïŒåé«éã
===================
èãæ¹ 1
===================
ãÃã®ãã§ã¹ããŒããåã®ããããã£ãŒã«ãã§è¡šããã²ãšã€ã®æšªåã®ç¶æ
ãã²ãš
ã€ã®ããããã£ãŒã«ãã«å¯Ÿå¿ãããŸãã(ã¯ã€ãŒã³ã眮ããŠããäœçœ®ã®ããããONã«
ãã)
ããããŠããã¯ãã©ããã³ã°ã¯0çªç®ã®ããããã£ãŒã«ããããäžã«åãã£ãŠãé ã«ã
ãããã®ãããäœçœ®ãã²ãšã€ã ãONã«ããŠé²ããŠãããŸãã
-----Q-- 00000100 0çªç®ã®ããããã£ãŒã«ã
---Q---- 00010000 1çªç®ã®ããããã£ãŒã«ã
------ Q- 00000010 2çªç®ã®ããããã£ãŒã«ã
Q------- 10000000 3çªç®ã®ããããã£ãŒã«ã
-------Q 00000001 4çªç®ã®ããããã£ãŒã«ã
-Q------ 01000000 5çªç®ã®ããããã£ãŒã«ã
---- Q--- 00001000 6çªç®ã®ããããã£ãŒã«ã
-- Q----- 00100000 7çªç®ã®ããããã£ãŒã«ã
===================
èãæ¹ 2
===================
次ã«ãå¹ãçããã§ãã¯ããããã«ããã«ïŒã€ã®ããããã£ãŒã«ããçšæããŸãã
1. å·Šäžã«å¹ãçãé²ããã®: left
2. çäžã«å¹ãçãé²ããã®: down
3. å³äžã«å¹ãçãé²ããã®: right
次ã«ãæãã®å©ãçãèããŸãã
äžå³ã®å Žåã
1åç®ã®å³æãäžã®å©ãç㯠3 çªç®(0x08)
2åç®ã®å³æãäžã®å©ãç㯠2 çªç®(0x04) ã«ãªããŸãã
ãã®å€ã¯ 0 åç®ã®ã¯ã€ãŒã³ã®äœçœ® 0x10 ã 1 ããããã€ãå³ã·ãããããã°æ±ãã
ããšãã§ããŸãã
ãŸããå·Šæãäžã®å©ãçã®å Žåã1 åç®ã§ã¯ 5 çªç®(0x20) ã§ 2 åç®ã§ã¯ 6 çªç®(0x40)
ã«ãªãã®ã§ãä»åºŠã¯ 1 ããããã€ãå·Šã·ãããããã°æ±ããããšãã§ããŸãã
ã€ãŸããå³ã·ããã®å©ãçã rightãå·Šã·ããã®å©ãçã left ã§è¡šãããšã§ãã¯ã€ãŒ
ã³ã®å¹ãçã¯rightãšleftã1 ãããã·ããããã ãã§æ±ããããšãã§ããããã§ãã
*-------------
|. . . . . .
|. . . -3. . 0x02 -|
|. . -2. . . 0x04 |(1 bit å³ã·ãã right)
|. -1. . . . 0x08 -|
|Q . . . . . 0x10 â(Q ã®äœçœ®ã¯ 4 down)
|. +1. . . . 0x20 -|
|. . +2. . . 0x40 |(1 bit å·Šã·ãã left)
|. . . +3. . 0x80 -|
*-------------
å³ïŒæãã®å©ãçã®ãã§ãã¯
nçªç®ã®ããããã£ãŒã«ãããn+1çªç®ã®ããããã£ãŒã«ãã«æ¢çŽ¢ãé²ãããšãã«ãã
ã®ïŒã€ã®ããããã£ãŒã«ããšnçªç®ã®ããããã£ãŒã«ã(bit)ãšã®ORæŒç®ãããããè¡
ããŸããleftã¯å·Šã«ã²ãšã€ã·ããããdownã¯ãã®ãŸãŸãrightã¯å³ã«ã²ãšã€ã·ããããŠ
n+1çªç®ã®ããããã£ãŒã«ãæ¢çŽ¢ã«æž¡ããŠãããŸãã
left :(left |bit)<<1
right:(right|bit)>>1
down : down|bit
===================
èãæ¹ 3
===================
n+1çªç®ã®ããããã£ãŒã«ãã®æ¢çŽ¢ã§ã¯ããã®ïŒã€ã®ããããã£ãŒã«ããORæŒç®ãã
ããããã£ãŒã«ããäœãããããONã«ãªã£ãŠããäœçœ®ã¯å¹ãçã«åœããã®ã§çœ®ãããšã
ã§ããªãäœçœ®ãšããããšã«ãªããŸããæ¬¡ã«ãã®ïŒã€ã®ããããã£ãŒã«ããORãããã
ããã£ãŒã«ãããããå転ãããŸããã€ãŸããé
眮å¯èœãªããããONã«ãªã£ãããããã£ãŒ
ã«ããã«å€æããŸãããããŠãã®é
眮å¯èœãªããããã£ãŒã«ãã bitmap ãšåŒã¶ãšããŠã
æ¬¡ã®æŒç®ãè¡ãªã£ãŠã¿ãŸãã
bit=-bitmap & bitmap;//äžçªå³ã®ããããåãåºã
ãã®æŒç®åŒã®æå³ãçè§£ããã«ã¯è² ã®å€ãã³ã³ãã¥ãŒã¿ã«ãããïŒé²æ³ã§ã¯ã©ã®ãã
ã«è¡šçŸãããŠããã®ããç¥ãå¿
èŠããããŸããè² ã®å€ãïŒé²æ³ã§å
·äœçã«è¡šãããŠã¿ã
ãšæ¬¡ã®ããã«ãªããŸãã
00000011 3
00000010 2
00000001 1
00000000 0
11111111 -1
11111110 -2
11111101 -3
æ£ã®å€nãè² ã®å€-nã«ãããšãã¯ãnããããå転ããŠãã+1ãããŠããŸãããããŠã
äŸãã°n=22ãšããŠnãš-nãANDæŒç®ãããšäžã®ããã«ãªããŸããnãïŒé²æ³ã§è¡šãããšãã®
äžçªäžäœã®ONããããã²ãšã€ã ãæœåºãããçµæãåŸãããã®ã§ããæ¥µããŠç°¡åãªæŒç®
ã«ãã£ãŠ1ãããæœåºãå®çŸãããŠããããšãéèŠã§ãã
00010110 22
AND 11101010 -22
------------------
00000010
ããŠãããã§äžã®ãããªwhileæãæžãã°ããã®ã«ãŒã㯠bitmap ã®ONãããã®æ°ã®
åæ°ã ãã«ãŒãããããšã«ãªããŸããé
眮å¯èœãªãã¿ãŒã³ãã²ãšã€ãã€å
šãç¡é§ããªã
çæãããããšã«ãªããŸãã
while(bitmap) {
bit=-bitmap & bitmap;
bitmap ^= bit;
//ããã§ã¯é
眮å¯èœãªãã¿ãŒã³ãã²ãšã€ãã€çæããã(bit)
}
å®è¡çµæ
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -r
ïŒïŒCPUR ååž° ããã¯ãã©ãã¯ïŒãããããã
N: Total Unique hh:mm:ss.ms
4: 2 0 0.00
5: 10 0 0.00
6: 4 0 0.00
7: 40 0 0.00
8: 92 0 0.00
9: 352 0 0.00
10: 724 0 0.00
11: 2680 0 0.00
12: 14200 0 0.01
13: 73712 0 0.04
14: 365596 0 0.19
15: 2279184 0 1.24
16: 14772512 0 7.79
17: 95815104 0 57.57
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -c
ïŒïŒCPU éååž° ããã¯ãã©ãã¯ïŒãããããã
N: Total Unique hh:mm:ss.ms
4: 2 0 0.00
5: 10 0 0.00
6: 4 0 0.00
7: 40 0 0.00
8: 92 0 0.00
9: 352 0 0.00
10: 724 0 0.00
11: 2680 0 0.00
12: 14200 0 0.01
13: 73712 0 0.04
14: 365596 0 0.21
15: 2279184 0 1.40
16: 14772512 0 8.78
17: 95815104 0 1:05.00
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -s
ïŒïŒSGPU éååž° ããã¯ãã©ãã¯ïŒãããããã
N: Total Unique dd:hh:mm:ss.ms
4: 2 0 00:00:00:00.02
5: 10 0 00:00:00:00.00
6: 4 0 00:00:00:00.00
7: 40 0 00:00:00:00.00
8: 92 0 00:00:00:00.00
9: 352 0 00:00:00:00.00
10: 724 0 00:00:00:00.00
11: 2680 0 00:00:00:00.01
12: 14200 0 00:00:00:00.02
13: 73712 0 00:00:00:00.03
14: 365596 0 00:00:00:00.08
15: 2279184 0 00:00:00:00.48
16: 14772512 0 00:00:00:02.41
17: 95815104 0 00:00:00:18.30
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -g
ïŒïŒGPU éååž° ããã¯ãã©ãã¯ïŒãããããã
N: Total Unique dd:hh:mm:ss.ms
4: 2 0 00:00:00:00.02
5: 10 0 00:00:00:00.00
6: 4 0 00:00:00:00.00
7: 40 0 00:00:00:00.00
8: 92 0 00:00:00:00.00
9: 352 0 00:00:00:00.00
10: 724 0 00:00:00:00.00
11: 2680 0 00:00:00:00.01
12: 14200 0 00:00:00:00.05
13: 73712 0 00:00:00:00.07
14: 365596 0 00:00:00:00.07
15: 2279184 0 00:00:00:00.37
16: 14772512 0 00:00:00:02.30
17: 95815104 0 00:00:00:18.07
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define THREAD_NUM 96
#define MAX 27
//倿°å®£èš
long TOTAL=0; //CPU,CPUR
long UNIQUE=0; //CPU,CPUR
typedef unsigned long long uint64;
typedef struct{
uint64 bv;
uint64 down;
uint64 left;
uint64 right;
int cnt;
int x[MAX];
int y[MAX];
}Board ;
//
Board B;
Board b[2457600];
//颿°å®£èš GPU
__global__ void cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* d_results,int totalCond);
long long solve_nqueen_cuda(int size,int steps);
void NQueenG(int size,int mask,int row,int steps);
//颿°å®£èš SGPU
__global__ void sgpu_cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* results,int totalCond);
long long sgpu_solve_nqueen_cuda(int size,int steps);
//颿°å®£èš CPU
void TimeFormat(clock_t utime,char *form);
//颿°å®£èš CPU
void NQueen(int size,int mask,int row,uint64 b,uint64 l,uint64 d,uint64 r);
//颿°å®£èš CPUR
void NQueenR(int size,int mask,int row,uint64 bv,uint64 left,uint64 down,uint64 right,int cnt);
//
//GPU
__global__
void cuda_kernel(
int size,
int mark,
unsigned int* totalDown,
unsigned int* totalLeft,
unsigned int* totalRight,
unsigned int* d_results,
int totalCond)
{
register const unsigned int mask=(1<<size)-1;
register unsigned int total=0;
//row=0ãšãªã£ãŠãã1è¡ç®ãããã£ãŠããããã§ã¯ãªã
//maskè¡ç®ä»¥éããã¹ã¿ãŒã
//n=8 ãªã mask==2 ãªã®ã§ ããããã¹ã¿ãŒã
register int row=0;
register unsigned int bit;
//
//ã¹ã¬ãã
//
//ãããã¯å
ã®ã¹ã¬ããID
register unsigned const int tid=threadIdx.x;
//ã°ãªããå
ã®ãããã¯ID
register unsigned const int bid=blockIdx.x;
//å
šäœéããŠã®ID
register unsigned const int idx=bid*blockDim.x+tid;
//
//ã·ã§ã¢ãŒãã¡ã¢ãª
//
//sharedã¡ã¢ãªã䜿ã ãããã¯å
ã¹ã¬ããã§å
±æ
//10åºå®ãªã®ã¯çŸåšã®maskèšå®ã§
//GPUã§å®è¡ããã®ã¯æå€§10ã ãã
//THREAD_NUMã¯ãããã¯ãããã®ã¹ã¬ããæ°
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=totalDown[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=totalLeft[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=totalRight[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
//down,left,rightããbitmapãåºã
bitmap[tid][row]
=mask&~(
down[tid][row]
|left[tid][row]
|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
//
//äœåãªã¹ã¬ããã¯åãããªã
//GPUã¯stepsæ°èµ·åãããtotalCond以äžã¯ç©ºåããã
if(idx<totalCond){
//totalDown,totalLeft,totalRightã®æ
å ±ã
//down,left,rightã«è©°ãçŽã
//CPU ã§è©°ã蟌ãã t_ã¯stepsåããã
//ãããã¯å
ã§ã¯ãããã¯ãããã®ã¹ã¬ããæ°ã«éå®
//ãããã®ã§ idxã§ãã
//
/**06 ã¹ã«ã©ãŒå€æ°ã«çœ®ãæãã**********/
register unsigned int bitmap_tid_row;
register unsigned int down_tid_row;
register unsigned int left_tid_row;
register unsigned int right_tid_row;
while(row>=0){
//bitmap[tid][row]ãã¹ã«ã©ãŒå€æ°ã«çœ®ãæã
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
/***************************************/
//
//bitmap[tid][row]=00000000 ã¯ã€ãŒã³ã
//ã©ãã«ã眮ããªãã®ã§1è¡äžã«æ»ã
/**06 ã¹ã«ã©ãŒå€æ°ã«çœ®ãæãã**********/
//if(bitmap[tid][row]==0){
if(bitmap_tid_row==0){
/***************************************/
row--;
}else{
//ã¯ã€ãŒã³ã眮ã
bitmap[tid][row]
^=bit
/**06 ã¹ã«ã©ãŒå€æ°ã«çœ®ãæãã**********/
//=(-bitmap[tid][row]&bitmap[tid][row]);
=(-bitmap_tid_row&bitmap_tid_row);
/***************************************/
//眮ãå Žæããããã©ãã
if((bit&mask)!=0){
//æçµè¡?æçµè¡ããïŒååã®è¡ãŸã§
//ç¡äºå°éããã å ç®ãã
if(row+1==mark){
total++;
row--;
}else{
int rowP=row+1;
/**07ã¹ã«ã©ãŒå€æ°ã«çœ®ãæããŠregisterå¯Ÿå¿ ****/
//down[tid][rowP]=down[tid][row]|bit;
down[tid][rowP]=down_tid_row|bit;
//left[tid][rowP]=(left[tid][row]|bit)<<1;
left[tid][rowP]=(left_tid_row|bit)<<1;
//right[tid][rowP]=(right[tid][row]|bit)>>1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]
=mask&~(
down[tid][rowP]
|left[tid][rowP]
|right[tid][rowP]);
row++;
}
}else{
//眮ãå Žæããªããã°ïŒåäžã«
row--;
}
}
}
//æåŸsum[tid]ã«å ç®ãã
sum[tid]=total;
}else{
//totalCondæªæºã¯ç©ºåãããã®ã§totalã¯å ç®ããªã
sum[tid]=0;
}
//__syncthreads()ã§ãããã¯å
ã®ã¹ã¬ããéã®åæ
//å
šãŠã®ã¹ã¬ããã__syncthreads()ã«èŸ¿ãçãã®ãåŸ
ã€
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
}
__syncthreads();if(tid<32){
sum[tid]+=sum[tid+32];
}
__syncthreads();if(tid<16){
sum[tid]+=sum[tid+16];
}
__syncthreads();if(tid<8){
sum[tid]+=sum[tid+8];
}
__syncthreads();if(tid<4){
sum[tid]+=sum[tid+4];
}
__syncthreads();if(tid<2){
sum[tid]+=sum[tid+2];
}
__syncthreads();if(tid<1){
sum[tid]+=sum[tid+1];
}
__syncthreads();if(tid==0){
d_results[bid]=sum[0];
}
}
//
// GPU
long solve_nqueen_cuda(int size,int mask,int row,int n_left,int n_down,int n_right,int steps)
{
//äœè¡ç®ããGPUã§è¡ãããããã®èšå®ã¯å€æŽå¯èœãèšå®å€ãå€ãããã»ã©GPUã§äžŠè¡ããŠåã
const unsigned int mark=size>11?size-10:2;
const unsigned int h_mark=row;
long total=0;
int totalCond=0;
bool matched=false;
//host
unsigned int down[32]; down[row]=n_down;
unsigned int right[32]; right[row]=n_right;
unsigned int left[32]; left[row]=n_left;
//bitmapãé
åã§æã€ããšã«ãã
//stackã䜿ããªãã§1è¡åã«æ»ãã
unsigned int bitmap[32];
//bitmap[row]=(left[row]|down[row]|right[row]);
/***06 bitåŠçãGPU*********************/
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
/************************/
unsigned int bit;
//unsigned int* totalDown=new unsigned int[steps];
unsigned int* totalDown;
cudaMallocHost((void**) &totalDown,sizeof(int)*steps);
//unsigned int* totalLeft=new unsigned int[steps];
unsigned int* totalLeft;
cudaMallocHost((void**) &totalLeft,sizeof(int)*steps);
//unsigned int* totalRight=new unsigned int[steps];
unsigned int* totalRight;
cudaMallocHost((void**) &totalRight,sizeof(int)*steps);
//unsigned int* h_results=new unsigned int[steps];
unsigned int* h_results;
cudaMallocHost((void**) &h_results,sizeof(int)*steps);
//device
unsigned int* downCuda;
cudaMalloc((void**) &downCuda,sizeof(int)*steps);
unsigned int* leftCuda;
cudaMalloc((void**) &leftCuda,sizeof(int)*steps);
unsigned int* rightCuda;
cudaMalloc((void**) &rightCuda,sizeof(int)*steps);
unsigned int* resultsCuda;
cudaMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
//12è¡ç®ãŸã§ã¯3è¡ç®ãŸã§CPU->row==mark以äžã§ 3è¡ç®ãŸã§ã®
//down,left,rightæ
å ±ã totalDown,totalLeft,totalRight
//ã«æ ŒçŽ
//ãã->3è¡ç®ä»¥éãGPUãã«ãã¹ã¬ããã§å®è¡ãçµæãååŸ
//13è¡ç®ä»¥éã¯CPUã§å®è¡ããè¡æ°ãïŒåãã€å¢ããŠè¡ã
//äŸãã°n15ã ãšrow=5ãŸã§CPUã§å®è¡ãã
//ãã以éã¯GPU(çŸåšã®èšå®ã ãšGPUã§ã¯æå€§10è¡å®è¡ãã
//ããã«ãªã£ãŠãã)
//while(row>=0) {
register int rowP=0;
while(row>=h_mark) {
//bitmap[row]=00000000 ã¯ã€ãŒã³ã
//ã©ãã«ã眮ããªãã®ã§1è¡äžã«æ»ã
/***06 bitæäœå€æŽ*********************/
//06GPU ãã£ã¡ã®ã»ããåªç§
if(bitmap[row]==0){ row--; }
/************************/
/***06 bitæäœå€æŽã§ã³ã¡ã³ã*********************/
//06SGPU
//if((bitmap[row]&mask)==mask){row--;}
/************************/
else{//ãããå Žæãããã°é²ã
//06SGPU
/***06 bitæäœå€æŽã§ã³ã¡ã³ã*********************/
//bit=(bitmap[row]+1)&~bitmap[row];
//bitmap[row]|=bit;
/************************/
//06GPU ãã£ã¡ã®ã»ããåªç§
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]); //ã¯ã€ãŒã³ã眮ã
if((bit&mask)!=0){//眮ãå Žæãããã°å
ã«é²ã
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
/***06 bitæäœå€æŽã§ã³ã¡ã³ã*********************/
//bitmap[rowP]=(down[rowP]|left[rowP]|right[rowP]);
/************************/
/***06 bitæäœå€æŽ*********************/
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
/************************/
row++;
if(row==mark){
//3è¡ç®(mark)ã«ã¯ã€ãŒã³ãïŒåãã€çœ®ããŠãã£ãŠã
//down,left,rightæ
å ±ãæ ŒçŽã
//ãã®æ¬¡ã®è¡ãžã¯é²ãŸãªãããã®è¡ã§å¯èœãªå Žæã«ã¯ã€ãŒ
//ã³çœ®ãçµãã£ããGPU䞊åå®è¡
//totalCond ãthreadIdã«ãªã åã¹ã¬ããã« down,left,rightæ
å ±ãæž¡ã
//row=2(13è¡ç®ä»¥éã¯å¢ããŠãããäŸãã°n15ã ãšrow=5)ã®æ
å ±ã
//totalDown,totalLeft,totalRightã«æ ŒçŽãã
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
//ã¹ã¬ããæ°ãã€ã³ã¯ãªã¡ã³ããã
totalCond++;
//æå€§GPUæ°ã«éããŠããŸã£ããäžæŠããã§GPUãå®è¡ãããstepsã¯GPUã®å
//æäžŠè¡çšŒåæ°ãå¶åŸ¡
//nã®æ°ãå°ãªããã¡ã¯totalCondãstepsãè¶
ããããšã¯ãªããnã®æ°ãå¢ã
//ãŠè¡ããšè¶
ããããã«ãªãã
//ããã§ã¯totalCond==stepsã®å Žåã ããã®äžãž
if(totalCond==steps){
//matched=trueã®æã«COUNT远å //GPUå
ã§ã«ãŠã³ãããŠããã®ã§ãGPUã
//ãåºããmatched=trueã«ãªã£ãŠã
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
//size-mark ã¯äœè¡GPUãå®è¡ããã totalCondã¯ã¹ã¬ããæ°
cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
//stepsæ°ã®æ°ã ããã«ãã¹ã¬ããã§èµ·åããã®ã ããå®éã«èšç®ãè¡ãã
//ãã®ã¯totalCondã®æ°ã ãã§ãã以å€ã¯ç©ºåãã«ãªã
//GPUå
ã§ã«ãŠã³ãããŠããã®ã§ãGPUããåºããmatched=trueã«ãªã£ãŠã
matched=true;
//totalCond==stepsã«ãŒãã§GPUãå®è¡ãããã¹ã¬ããããŸã0ããéå§ã
//ã(ããã«ãããªãã©ãstepsæ°åã ãGPUãèµ·åã§ãã)
totalCond=0;
}
//totalDown,totalLeft,totalRightã«æ
å ±ãæ ŒçŽããã1è¡äžã«äžãã
//ãããç¹°ãè¿ãããšã«ãã row=2ã§å¯èœãªå Žæå
šãŠã«ã¯ã€ãŒã³ã眮ããŠ
//totalDown,totalLeft,totalRightã«æ
å ±ãæ ŒçŽãã
row--;
}
}else{
//眮ãå Žæããªããã°äžã«äžãããrow==markè¡ã«éãããŸã§ã¯CPUåŽã§æ®éã«
//nqueenããã
row--;
}
}
}
//matched=trueã®æã«COUNT远å //GPUå
ã§ã«ãŠã³ãããŠããã®ã§ãGPUããåºãã
//matched=trueã«ãªã£ãŠã
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
//size-mark ã¯äœè¡GPUãå®è¡ããã totalCondã¯ã¹ã¬ããæ°
//stepsæ°ã®æ°ã ããã«ãã¹ã¬ããã§èµ·åããã®ã ããå®éã«èšç®ãè¡ãããã®ã¯
//totalCondã®æ°ã ãã§ãã以å€ã¯ç©ºåãã«ãªã
cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
//
cudaFree(downCuda);
cudaFree(leftCuda);
cudaFree(rightCuda);
cudaFree(resultsCuda);
/***06 cudaFreeHostãžå€æŽ**/
//delete[] totalDown;
cudaFreeHost(totalDown);
//delete[] totalLeft;
cudaFreeHost(totalLeft);
//delete[] totalRight;
cudaFreeHost(totalRight);
//delete[] h_results;
cudaFreeHost(h_results);
/************************/
return total;
}
//GPU
void NQueenG(int size,int steps)
{
register int sizeE=size-1;
register int bit=0;
register int mask=((1<<size)-1);
if(size<=0||size>32){return;}
//å¶æ°ã奿°å
±é å³åŽååã ãã¯ã€ãŒã³ã眮ã
int lim=(size%2==0)?size/2:sizeE/2;
for(int col=0;col<lim;col++){
bit=(1<<col);
TOTAL+=solve_nqueen_cuda(size,mask,1,bit<<1,bit,bit>>1,steps);
}
//ãã©ãŒãªã®ã§TOTALãïŒåãã
TOTAL=TOTAL*2;
//奿°ã®å Žåã¯ããã«äžå€®ã«ã¯ã€ãŒã³ã眮ã
if(size%2==1){
bit=(1<<(sizeE)/2);
TOTAL+=solve_nqueen_cuda(size,mask,1,bit<<1,bit,bit>>1,steps);
}
}
//
//SGPU
__global__
void sgpu_cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* d_results,int totalCond)
{
//ã¹ã¬ãã
const int tid=threadIdx.x;//ãããã¯å
ã®ã¹ã¬ããID
const int bid=blockIdx.x;//ã°ãªããå
ã®ãããã¯ID
const int idx=bid*blockDim.x+tid;//å
šäœéããŠã®ID
//ã·ã§ã¢ãŒãã¡ã¢ãª
__shared__ unsigned int down[THREAD_NUM][10];//sharedã¡ã¢ãªã䜿ã ãããã¯å
ã¹ã¬ããã§å
±æ
__shared__ unsigned int left[THREAD_NUM][10];//THREAD_NUMã¯ãããã¯ãããã®ã¹ã¬ããæ°
__shared__ unsigned int right[THREAD_NUM][10];//10ã§åºå®ãªã®ã¯çŸåšã®maskã®èšå®ã§GPUã§å®è¡ããã®ã¯æå€§10ã ãã
__shared__ unsigned int bitmap[THREAD_NUM][10];
__shared__ unsigned int sum[THREAD_NUM];
//
const unsigned int mask=(1<<size)-1;
int total=0;
int row=0;//row=0ãšãªã£ãŠãã1è¡ç®ãããã£ãŠããããã§ã¯ãªãmaskè¡ç®ä»¥éããã¹ã¿ãŒã n=8 ãªã mask==2 ãªã®ã§ ããããã¹ã¿ãŒã
unsigned int bit;
if(idx<totalCond){//äœåãªã¹ã¬ããã¯åãããªã GPUã¯stepsæ°èµ·åãããtotalCond以äžã¯ç©ºåããã
down[tid][row]=totalDown[idx];//totalDown,totalLeft,totalRightã®æ
å ±ãdown,left,rightã«è©°ãçŽã
left[tid][row]=totalLeft[idx];//CPU ã§è©°ã蟌ãã t_ã¯stepsåããããããã¯å
ã§ã¯ãããã¯ãããã®ã¹ã¬ããããã«éå®ãããã®ã§ idxã§ãã
right[tid][row]=totalRight[idx];
bitmap[tid][row]=down[tid][row]|left[tid][row]|right[tid][row];//down,left,rightããbitmapãåºã
while(row>=0){
//
//06ã®GPU
//if(bitmap[tid][row]==0){//bitmap[tid][row]=00000000 ã¯ã€ãŒã³ãã©ãã«ã眮ããªãã®ã§1è¡äžã«æ»ã
//06ã®SGPU
if((bitmap[tid][row]&mask)==mask){//bitmap[tid][row]=00000000 ã¯ã€ãŒã³ãã©ãã«ã眮ããªãã®ã§1è¡äžã«æ»ã
//
row--;
}else{
//
//06GPU
//bitmap[tid][row]^=bit=(-bitmap[tid][row]&bitmap[tid][row]); //ã¯ã€ãŒã³ã眮ã
//06SGPU
bit=(bitmap[tid][row]+1)&~bitmap[tid][row];
bitmap[tid][row]|=bit;
//
if((bit&mask)!=0){//眮ãå Žæããããã©ãã
if(row+1==mark){//æçµè¡?æçµè¡ããïŒååã®è¡ãŸã§ç¡äºå°éããã å ç®ãã
total++;
row--;
}
else{
down[tid][row+1]=down[tid][row]|bit;
left[tid][row+1]=(left[tid][row]|bit)<<1;
right[tid][row+1]=(right[tid][row]|bit)>>1;
bitmap[tid][row+1]=(down[tid][row+1]|left[tid][row+1]|right[tid][row+1]);
row++;
}
}else{//眮ãå Žæããªããã°ïŒåäžã«
row--;
}
}
}
sum[tid]=total;//æåŸsum[tid]ã«å ç®ãã
}else{//totalCondæªæºã¯ç©ºåãããã®ã§åœç¶ totalã¯å ç®ããªã
sum[tid]=0;
}
//__syncthreads()ã§ããããã¯å
ã®ã¹ã¬ããéã®åæããšããŸãã
//åæãåããšããããšã¯ãå
šãŠã®ã¹ã¬ããã__syncthreads()ã«èŸ¿ãçãã®ãåŸ
ã€
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){sum[tid]+=sum[tid+64];}//__syncthreads();ã¯è€æ°åå¿
èŠ1åã ãèšè¿°ãããæ°ãéã£ã
__syncthreads();if(tid<32){sum[tid]+=sum[tid+32];}
__syncthreads();if(tid<16){sum[tid]+=sum[tid+16];}
__syncthreads();if(tid<8){sum[tid]+=sum[tid+8];}
__syncthreads();if(tid<4){sum[tid]+=sum[tid+4];}
__syncthreads();if(tid<2){sum[tid]+=sum[tid+2];}
__syncthreads();if(tid<1){sum[tid]+=sum[tid+1];}
__syncthreads();if(tid==0){d_results[bid]=sum[0];}
}
//
//SGPU
long long sgpu_solve_nqueen_cuda(int size,int steps)
{
unsigned int down[32];
unsigned int left[32];
unsigned int right[32];
unsigned int bitmap[32];
unsigned int bit;
if(size<=0||size>32){return 0;}
unsigned int* totalDown=new unsigned int[steps];
unsigned int* totalLeft=new unsigned int[steps];
unsigned int* totalRight=new unsigned int[steps];
unsigned int* h_results=new unsigned int[steps];
//device
unsigned int* downCuda;
cudaMalloc((void**) &downCuda,sizeof(int)*steps);
unsigned int* leftCuda;
cudaMalloc((void**) &leftCuda,sizeof(int)*steps);
unsigned int* rightCuda;
cudaMalloc((void**) &rightCuda,sizeof(int)*steps);
unsigned int* resultsCuda;
cudaMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
const unsigned int mask=(1<<size)-1;
const unsigned int mark=size>11?size-10:2;
long long total=0;
int totalCond=0;
int row=0;
down[0]=0;
left[0]=0;
right[0]=0;
bitmap[0]=0;
bool matched=false;
for(int col=0;col<size/2;col++){
bit=(1<<col);
bitmap[0]|=bit;
down[1]=bit;
left[1]=bit<<1;
right[1]=bit>>1;
bitmap[1]=(down[1]|left[1]|right[1]);
row=1;
while(row>0){
if((bitmap[row]&mask)==mask){row--;}
else{
bit=(bitmap[row]+1)&~bitmap[row];
bitmap[row]|=bit;
if((bit&mask)!=0){
down[row+1]=down[row]|bit;
left[row+1]=(left[row]|bit)<<1;
right[row+1]=(right[row]|bit)>>1;
bitmap[row+1]=(down[row+1]|left[row+1]|right[row+1]);
row++;
if(row==mark){
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
totalCond++;
if(totalCond==steps){
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
sgpu_cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
matched=true;
totalCond=0;
}
row--;
}
}else{row--;}
}
}
}
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
sgpu_cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
total*=2;
if(size%2==1){
matched=false;
totalCond=0;
bit=(1<<(size-1)/2);
bitmap[0]|=bit;
down[1]=bit;
left[1]=bit<<1;
right[1]=bit>>1;
bitmap[1]=(down[1]|left[1]|right[1]);
row=1;
while(row>0){
if((bitmap[row]&mask)==mask){row--;}
else{
bit=(bitmap[row]+1)&~bitmap[row];
bitmap[row]|=bit;
if((bit&mask)!=0){
down[row+1]=down[row]|bit;
left[row+1]=(left[row]|bit)<<1;
right[row+1]=(right[row]|bit)>>1;
bitmap[row+1]=(down[row+1]|left[row+1]|right[row+1]);
row++;
if(row==mark){
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
totalCond++;
if(totalCond==steps){
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
sgpu_cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
matched=true;
totalCond=0;
}
row--;
}
}else{row--;}
}
}
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
sgpu_cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
}
cudaFree(downCuda);
cudaFree(leftCuda);
cudaFree(rightCuda);
cudaFree(resultsCuda);
delete[] totalDown;
delete[] totalLeft;
delete[] totalRight;
delete[] h_results;
return total;
}
//
//CUDA åæå
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
int i;
for(i=0;i<count;i++){
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
cudaSetDevice(i);
return true;
}
//
//hh:mm:ss.ms圢åŒã«åŠçæéãåºå
void TimeFormat(clock_t utime,char *form){
int dd,hh,mm;
float ftime,ss;
ftime=(float)utime/CLOCKS_PER_SEC;
mm=(int)ftime/60;
ss=ftime-(int)(mm*60);
dd=mm/(24*60);
mm=mm%(24*60);
hh=mm/60;
mm=mm%60;
if(dd)
sprintf(form,"%4d %02d:%02d:%05.2f",dd,hh,mm,ss);
else if(hh)
sprintf(form," %2d:%02d:%05.2f",hh,mm,ss);
else if(mm)
sprintf(form," %2d:%05.2f",mm,ss);
else
sprintf(form," %5.2f",ss);
}
//
int symmetryOps_n27(int w,int e,int n,int s,int size){
int lsize=(size-2)*(size-1)-w;
if(n<w || n>=lsize){
return 0;
}
if(e<w || e>=lsize){
return 0;
}
if(s<w || s>=lsize){
return 0;
}
//// Check for minimum if n, e, s = (N-2)*(N-1)-1-w
if(s==w){
if((n!=w)||(e!=w)){
// right rotation is smaller unless w = n = e = s
//å³å転ã§åãå Žåw=n=e=sã§ãªããã°å€ãå°ããã®ã§skip
return 0;
}
//w=n=e=sã§ããã°90床å転ã§åãå¯èœæ§
//ãã®å Žåã¯ãã©ãŒã®2
return 2;
}
if((e==w)&&(n>=s)){
//e==wã¯180床å転ããŠåã
if(n>s){
//180床å転ããŠåãæn>=sã®æã¯smaller?
return 0;
}
//ãã®å Žåã¯4
return 4;
}
return 8;
}
//
bool board_placement(int si,int x,int y)
{
//åãå Žæã«çœ®ãããã§ãã¯
//printf("i:%d:x:%d:y:%d\n",i,B.x[i],B.y[i]);
if(B.x[x]==y){
//printf("Duplicate x:%d:y:%d\n",x,y);
////åãå Žæã«çœ®ãã®ã¯OK
return true;
}
B.x[x]=y;
//xã¯è¡ yã¯å p.N-1-x+yã¯å³äžããå·Šäž x+yã¯å·Šäžããå³äž
uint64 bv=1<<x;
uint64 down=1<<y;
B.y[x]=B.y[x]+down;
uint64 left=1<<(si-1-x+y);
uint64 right=1<<(x+y);
//printf("check valid x:%d:y:%d:p.N-1-x+y:%d;x+y:%d\n",x,y,si-1-x+y,x+y);
//printf("check valid pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
//printf("bvcheck:%d:bhcheck:%d:bucheck:%d:bdcheck:%d\n",B.bv&bv,B.bh&bh,B.bu&bu,B.bd&bd);
if((B.bv&bv)||(B.down&down)||(B.left&left)||(B.right&right)){
//printf("valid_false\n");
return false;
}
//printf("before pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
B.bv|=bv;
B.down|=down;
B.left|=left;
B.right|=right;
//printf("after pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
//printf("valid_true\n");
return true;
}
//
//CPU éååž°ç ããžãã¯ã¡ãœãã
void NQueen(int size,int mask,int row,uint64 b,uint64 l,uint64 d,uint64 r){
int sizeE=size-1;
int n;
uint64 bitmap[size];
uint64 bv[size];
uint64 left[size];
uint64 down[size];
uint64 right[size];
uint64 bit=0;
bitmap[row]=mask&~(l|d|r);
bv[row]=b;
down[row]=d;
left[row]=l;
right[row]=r;
while(row>=2){
while((bv[row]&1)!=0) {
n=row++;
bv[row]=bv[n]>>1;//å³ã«ïŒãããã·ãã
left[row]=left[n]<<1;//left å·Šã«ïŒãããã·ãã
right[row]=right[n]>>1;//right å³ã«ïŒãããã·ãã
down[row]=down[n];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
bv[row+1]=bv[row]>>1;
if(bitmap[row]==0){
--row;
}else{
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0||row>=sizeE){
//if((bit)!=0){
if(row>=sizeE){
TOTAL++;
--row;
}else{
n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=down[n]|bit;
right[row]=(right[n]|bit)>>1;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
//bitmap[row]=~(left[row]|down[row]|right[row]);
}
}else{
--row;
}
}
}
}
//
//
//CPUR ååž°ç ããžãã¯ã¡ãœãã
void NQueenR(int size,uint64 mask, int row,uint64 bv,uint64 left,uint64 down,uint64 right,int cnt){
uint64 bitmap=0;
uint64 bit=0;
//æ¢ã«ã¯ã€ãŒã³ã眮ããŠããè¡ã¯ã¹ããããã
while((bv&1)!=0) {
bv>>=1;//å³ã«ïŒãããã·ãã
left<<=1;//left å·Šã«ïŒãããã·ãã
right>>=1;//right å³ã«ïŒãããã·ãã
row++;
}
bv>>=1;
if(row==size){
//TOTAL++;
UNIQUE++; //ãŠããŒã¯è§£ãå ç®
TOTAL+=cnt; //察称解é€ã§åŸãããè§£æ°ãå ç®
}else{
//bitmap=mask&~(left|down|right);//maskã€ãããš10æ¡ç®ä»¥éæ°ãåºãªããªãã®ã§å€ãã
bitmap=~(left|down|right);
while(bitmap>0){
bit=(-bitmap&bitmap);
bitmap=(bitmap^bit);
NQueenR(size,mask,row+1,bv,(left|bit)<<1,down|bit,(right|bit)>>1,cnt);
}
}
}
//
long prepare(int size){
//CPUR
int pres_a[930];
int pres_b[930];
int idx=0;
long bcnt;
for(int a=0;a<size;a++){
for(int b=0;b<size;b++){
if((a>=b&&(a-b)<=1)||(b>a&&(b-a)<=1)){
continue;
}
pres_a[idx]=a;
pres_b[idx]=b;
idx++;
}
}
Board wB=B;
for(int w=0;w<idx;w++){
B=wB;
B.bv=B.down=B.left=B.right=0;
for(int j=0;j<size;j++){
B.x[j]=-1;
}
board_placement(size,0,pres_a[w]);
board_placement(size,1,pres_b[w]);
Board nB=B;
//int lsize=(size-2)*(size-1)-w;
//for(int n=w;n<lsize;n++){
for(int n=0;n<idx;n++){
B=nB;
if(board_placement(size,pres_a[n],size-1)==false){
continue;
}
if(board_placement(size,pres_b[n],size-2)==false){
continue;
}
Board eB=B;
//for(int e=w;e<lsize;e++){
for(int e=0;e<idx;e++){
B=eB;
if(board_placement(size,size-1,size-1-pres_a[e])==false){
continue;
}
if(board_placement(size,size-2,size-1-pres_b[e])==false){
continue;
}
Board sB=B;
//for(int s=w;s<lsize;s++){
for(int s=0;s<idx;s++){
B=sB;
if(board_placement(size,size-1-pres_a[s],0)==false){
continue;
}
if(board_placement(size,size-1-pres_b[s],1)==false){
continue;
}
int cnt=symmetryOps_n27(w,e,n,s,size);
if(cnt !=0){
B.cnt=cnt;
b[bcnt]=B;
bcnt++;
}
}
}
}
}
return bcnt;
}
//ã¡ã€ã³ã¡ãœãã
int main(int argc,char** argv) {
bool cpu=false,cpur=false,gpu=false,sgpu=false;
int argstart=1,steps=24576;
/** ãã©ã¡ãŒã¿ã®åŠç */
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;}
else if(argv[1][1]=='s'||argv[1][1]=='S'){sgpu=true;}
else
cpur=true;
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r|-s]\n",argv[0]);
printf(" -c: CPU only\n");
printf(" -r: CPUR only\n");
printf(" -g: GPU only\n");
printf(" -s: SGPU only\n");
printf("Default to 8 queen\n");
}
/** åºåãšå®è¡ */
if(cpu){
printf("\n\nïŒïŒCPU éååž° ããã¯ãã©ãã¯ïŒãããããã\n");
}else if(cpur){
printf("\n\nïŒïŒCPUR ååž° ããã¯ãã©ãã¯ïŒãããããã\n");
}else if(gpu){
printf("\n\nïŒïŒGPU éååž° ããã¯ãã©ãã¯ïŒãããããã\n");
}else if(sgpu){
printf("\n\nïŒïŒSGPU éååž° ããã¯ãã©ãã¯ïŒãããããã\n");
}
if(cpu||cpur){
printf("%s\n"," N: Total Unique hh:mm:ss.ms");
clock_t st; //éåºŠèšæž¬çš
char t[20]; //hh:mm:ss.msãæ ŒçŽ
int min=5;
int targetN=15;
uint64 mask;
for(int i=min;i<=targetN;i++){
TOTAL=0;
UNIQUE=0;
mask=((1<<i)-1);
int size=i;
//äºåæºå äžäžå·Šå³2è¡2åã«ã¯ã€ãŒã³ãé
眮ãã
long bcnt=prepare(size);
//äºåæºåãçµãã£ãŠããæéãèšæž¬ãã
st=clock();
for (long bc=0;bc<=bcnt;bc++){
B=b[bc];
if(cpur){
//CPUR
NQueenR(i,mask,2,B.bv >> 2,
B.left>>4,
((((B.down>>2)|(~0<<(size-4)))+1)<<(size-5))-1,
(B.right>>4)<<(size-5),B.cnt);
}else if(cpu){
//CPU
NQueen(i,mask,2,B.bv >> 2,
B.left>>4,
((((B.down>>2)|(~0<<(size-4)))+1)<<(size-5))-1,
(B.right>>4)<<(size-5));
}
}
//
TimeFormat(clock()-st,t);
printf("%2d:%13ld%16ld%s\n",i,TOTAL,UNIQUE,t);
}
}
if(gpu||sgpu){
if(!InitCUDA()){return 0;}
int min=4;int targetN=17;
struct timeval t0;struct timeval t1;
int ss;int ms;int dd;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(int i=min;i<=targetN;i++){
gettimeofday(&t0,NULL); // èšæž¬éå§
if(gpu){
TOTAL=0;
UNIQUE=0;
NQueenG(i,steps);
}else if(sgpu){
TOTAL=sgpu_solve_nqueen_cuda(i,steps);
UNIQUE=0;
}
gettimeofday(&t1,NULL); // èšæž¬çµäº
if(t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%16ld%4.2d:%02d:%02d:%02d.%02d\n", i,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
}
}
return 0;
}
|
a78edefbeb16cdb5043fc8379b3b8c63d2d78eab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_conv_ristretto_layer.hpp"
namespace caffe {
__global__ void sync_conv_ristretto_groups() { }
template <typename Dtype>
void CuDNNConvolutionRistrettoLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
//const Dtype* weight = this->weights_quantized_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
//const Dtype* bias_data = this->blobs_[1]->gpu_data();
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_ristretto_groups), dim3(1), dim3(1), 0, 0, );
if (this->phase_ == TEST) {
this->QuantizeLayerOutputs_gpu(top_data, top[i]->count());
}
}
}
template <typename Dtype>
void CuDNNConvolutionRistrettoLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_ristretto_groups), dim3(1), dim3(1), 0, 0, );
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionRistrettoLayer);
} // namespace caffe
#endif
| a78edefbeb16cdb5043fc8379b3b8c63d2d78eab.cu | #ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_conv_ristretto_layer.hpp"
namespace caffe {
__global__ void sync_conv_ristretto_groups() { }
template <typename Dtype>
void CuDNNConvolutionRistrettoLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
//const Dtype* weight = this->weights_quantized_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
//const Dtype* bias_data = this->blobs_[1]->gpu_data();
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_ristretto_groups<<<1, 1>>>();
if (this->phase_ == TEST) {
this->QuantizeLayerOutputs_gpu(top_data, top[i]->count());
}
}
}
template <typename Dtype>
void CuDNNConvolutionRistrettoLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_ristretto_groups<<<1, 1>>>();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionRistrettoLayer);
} // namespace caffe
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.